summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2020-07-31 09:50:14 +0100
committerJoonas Lahtinen <joonas.lahtinen@linux.intel.com>2020-09-03 15:35:25 +0300
commit0e69f4622e6aa5d1147d15e1d7ff4bba70496c8a (patch)
tree7ba089e2c6e79c3af47e083811cf6ceae4dec2ad
parente17a6ce709c2a54ffae61c3a556a429365d5968d (diff)
drm/i915: Reduce locking around i915_active_acquire_preallocate_barrier()
As the conversion between idle-barrier and full i915_active_fence is already serialised by explicit memory barriers, we can reduce the spinlock in i915_active_acquire_preallocate_barrier() for finding an idle-barrier to reuse to an RCU read lock to ensure the fence remains valid, only taking the spinlock for the update of the rbtree itself. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Thomas Hellström <thomas.hellstrom@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20200731085015.32368-6-chris@chris-wilson.co.uk Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
-rw-r--r--drivers/gpu/drm/i915/i915_active.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index 89c34a69a2ea..b0a6522be3d1 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -807,7 +807,6 @@ static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx)
if (RB_EMPTY_ROOT(&ref->tree))
return NULL;
- spin_lock_irq(&ref->tree_lock);
GEM_BUG_ON(i915_active_is_idle(ref));
/*
@@ -833,9 +832,9 @@ static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx)
prev = p;
if (node->timeline < idx)
- p = p->rb_right;
+ p = READ_ONCE(p->rb_right);
else
- p = p->rb_left;
+ p = READ_ONCE(p->rb_left);
}
/*
@@ -872,11 +871,10 @@ static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx)
goto match;
}
- spin_unlock_irq(&ref->tree_lock);
-
return NULL;
match:
+ spin_lock_irq(&ref->tree_lock);
rb_erase(p, &ref->tree); /* Hide from waits and sibling allocations */
if (p == &ref->cache->node)
WRITE_ONCE(ref->cache, NULL);
@@ -910,7 +908,9 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
struct llist_node *prev = first;
struct active_node *node;
+ rcu_read_lock();
node = reuse_idle_barrier(ref, idx);
+ rcu_read_unlock();
if (!node) {
node = kmem_cache_alloc(global.slab_cache, GFP_KERNEL);
if (!node)