提交 093b9228 编写于 作者: C Chris Wilson 提交者: Joonas Lahtinen

drm/i915: Split i915_active.mutex into an irq-safe spinlock for the rbtree

As we want to be able to run inside atomic context for retiring the
i915_active, and we are no longer allowed to abuse mutex_trylock, split
the tree management portion of i915_active.mutex into an irq-safe
spinlock.

References: a0855d24 ("locking/mutex: Complain upon mutex API misuse in IRQ contexts")
References: https://bugs.freedesktop.org/show_bug.cgi?id=111626
Fixes: 274cbf20 ("drm/i915: Push the i915_active.retire into a worker")
Signed-off-by: NChris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
Cc: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: NTvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191114172535.1116-1-chris@chris-wilson.co.uk
(cherry picked from commit c9ad602f)
Signed-off-by: NJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
上级 fa039b93
...@@ -91,14 +91,15 @@ static void debug_active_init(struct i915_active *ref) ...@@ -91,14 +91,15 @@ static void debug_active_init(struct i915_active *ref)
static void debug_active_activate(struct i915_active *ref) static void debug_active_activate(struct i915_active *ref)
{ {
lockdep_assert_held(&ref->mutex); spin_lock_irq(&ref->tree_lock);
if (!atomic_read(&ref->count)) /* before the first inc */ if (!atomic_read(&ref->count)) /* before the first inc */
debug_object_activate(ref, &active_debug_desc); debug_object_activate(ref, &active_debug_desc);
spin_unlock_irq(&ref->tree_lock);
} }
static void debug_active_deactivate(struct i915_active *ref) static void debug_active_deactivate(struct i915_active *ref)
{ {
lockdep_assert_held(&ref->mutex); lockdep_assert_held(&ref->tree_lock);
if (!atomic_read(&ref->count)) /* after the last dec */ if (!atomic_read(&ref->count)) /* after the last dec */
debug_object_deactivate(ref, &active_debug_desc); debug_object_deactivate(ref, &active_debug_desc);
} }
...@@ -128,29 +129,22 @@ __active_retire(struct i915_active *ref) ...@@ -128,29 +129,22 @@ __active_retire(struct i915_active *ref)
{ {
struct active_node *it, *n; struct active_node *it, *n;
struct rb_root root; struct rb_root root;
bool retire = false; unsigned long flags;
lockdep_assert_held(&ref->mutex);
GEM_BUG_ON(i915_active_is_idle(ref)); GEM_BUG_ON(i915_active_is_idle(ref));
/* return the unused nodes to our slabcache -- flushing the allocator */ /* return the unused nodes to our slabcache -- flushing the allocator */
if (atomic_dec_and_test(&ref->count)) { if (!atomic_dec_and_lock_irqsave(&ref->count, &ref->tree_lock, flags))
debug_active_deactivate(ref);
root = ref->tree;
ref->tree = RB_ROOT;
ref->cache = NULL;
retire = true;
}
mutex_unlock(&ref->mutex);
if (!retire)
return; return;
GEM_BUG_ON(rcu_access_pointer(ref->excl.fence)); GEM_BUG_ON(rcu_access_pointer(ref->excl.fence));
rbtree_postorder_for_each_entry_safe(it, n, &root, node) { debug_active_deactivate(ref);
GEM_BUG_ON(i915_active_fence_isset(&it->base));
kmem_cache_free(global.slab_cache, it); root = ref->tree;
} ref->tree = RB_ROOT;
ref->cache = NULL;
spin_unlock_irqrestore(&ref->tree_lock, flags);
/* After the final retire, the entire struct may be freed */ /* After the final retire, the entire struct may be freed */
if (ref->retire) if (ref->retire)
...@@ -158,6 +152,11 @@ __active_retire(struct i915_active *ref) ...@@ -158,6 +152,11 @@ __active_retire(struct i915_active *ref)
/* ... except if you wait on it, you must manage your own references! */ /* ... except if you wait on it, you must manage your own references! */
wake_up_var(ref); wake_up_var(ref);
rbtree_postorder_for_each_entry_safe(it, n, &root, node) {
GEM_BUG_ON(i915_active_fence_isset(&it->base));
kmem_cache_free(global.slab_cache, it);
}
} }
static void static void
...@@ -169,7 +168,6 @@ active_work(struct work_struct *wrk) ...@@ -169,7 +168,6 @@ active_work(struct work_struct *wrk)
if (atomic_add_unless(&ref->count, -1, 1)) if (atomic_add_unless(&ref->count, -1, 1))
return; return;
mutex_lock(&ref->mutex);
__active_retire(ref); __active_retire(ref);
} }
...@@ -180,9 +178,7 @@ active_retire(struct i915_active *ref) ...@@ -180,9 +178,7 @@ active_retire(struct i915_active *ref)
if (atomic_add_unless(&ref->count, -1, 1)) if (atomic_add_unless(&ref->count, -1, 1))
return; return;
/* If we are inside interrupt context (fence signaling), defer */ if (ref->flags & I915_ACTIVE_RETIRE_SLEEPS) {
if (ref->flags & I915_ACTIVE_RETIRE_SLEEPS ||
!mutex_trylock(&ref->mutex)) {
queue_work(system_unbound_wq, &ref->work); queue_work(system_unbound_wq, &ref->work);
return; return;
} }
...@@ -227,7 +223,7 @@ active_instance(struct i915_active *ref, struct intel_timeline *tl) ...@@ -227,7 +223,7 @@ active_instance(struct i915_active *ref, struct intel_timeline *tl)
if (!prealloc) if (!prealloc)
return NULL; return NULL;
mutex_lock(&ref->mutex); spin_lock_irq(&ref->tree_lock);
GEM_BUG_ON(i915_active_is_idle(ref)); GEM_BUG_ON(i915_active_is_idle(ref));
parent = NULL; parent = NULL;
...@@ -257,7 +253,7 @@ active_instance(struct i915_active *ref, struct intel_timeline *tl) ...@@ -257,7 +253,7 @@ active_instance(struct i915_active *ref, struct intel_timeline *tl)
out: out:
ref->cache = node; ref->cache = node;
mutex_unlock(&ref->mutex); spin_unlock_irq(&ref->tree_lock);
BUILD_BUG_ON(offsetof(typeof(*node), base)); BUILD_BUG_ON(offsetof(typeof(*node), base));
return &node->base; return &node->base;
...@@ -278,8 +274,10 @@ void __i915_active_init(struct i915_active *ref, ...@@ -278,8 +274,10 @@ void __i915_active_init(struct i915_active *ref,
if (bits & I915_ACTIVE_MAY_SLEEP) if (bits & I915_ACTIVE_MAY_SLEEP)
ref->flags |= I915_ACTIVE_RETIRE_SLEEPS; ref->flags |= I915_ACTIVE_RETIRE_SLEEPS;
spin_lock_init(&ref->tree_lock);
ref->tree = RB_ROOT; ref->tree = RB_ROOT;
ref->cache = NULL; ref->cache = NULL;
init_llist_head(&ref->preallocated_barriers); init_llist_head(&ref->preallocated_barriers);
atomic_set(&ref->count, 0); atomic_set(&ref->count, 0);
__mutex_init(&ref->mutex, "i915_active", key); __mutex_init(&ref->mutex, "i915_active", key);
...@@ -510,7 +508,7 @@ static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx) ...@@ -510,7 +508,7 @@ static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx)
if (RB_EMPTY_ROOT(&ref->tree)) if (RB_EMPTY_ROOT(&ref->tree))
return NULL; return NULL;
mutex_lock(&ref->mutex); spin_lock_irq(&ref->tree_lock);
GEM_BUG_ON(i915_active_is_idle(ref)); GEM_BUG_ON(i915_active_is_idle(ref));
/* /*
...@@ -575,7 +573,7 @@ static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx) ...@@ -575,7 +573,7 @@ static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx)
goto match; goto match;
} }
mutex_unlock(&ref->mutex); spin_unlock_irq(&ref->tree_lock);
return NULL; return NULL;
...@@ -583,7 +581,7 @@ static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx) ...@@ -583,7 +581,7 @@ static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx)
rb_erase(p, &ref->tree); /* Hide from waits and sibling allocations */ rb_erase(p, &ref->tree); /* Hide from waits and sibling allocations */
if (p == &ref->cache->node) if (p == &ref->cache->node)
ref->cache = NULL; ref->cache = NULL;
mutex_unlock(&ref->mutex); spin_unlock_irq(&ref->tree_lock);
return rb_entry(p, struct active_node, node); return rb_entry(p, struct active_node, node);
} }
...@@ -664,6 +662,7 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref, ...@@ -664,6 +662,7 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
void i915_active_acquire_barrier(struct i915_active *ref) void i915_active_acquire_barrier(struct i915_active *ref)
{ {
struct llist_node *pos, *next; struct llist_node *pos, *next;
unsigned long flags;
GEM_BUG_ON(i915_active_is_idle(ref)); GEM_BUG_ON(i915_active_is_idle(ref));
...@@ -673,7 +672,7 @@ void i915_active_acquire_barrier(struct i915_active *ref) ...@@ -673,7 +672,7 @@ void i915_active_acquire_barrier(struct i915_active *ref)
* populated by i915_request_add_active_barriers() to point to the * populated by i915_request_add_active_barriers() to point to the
* request that will eventually release them. * request that will eventually release them.
*/ */
mutex_lock_nested(&ref->mutex, SINGLE_DEPTH_NESTING); spin_lock_irqsave_nested(&ref->tree_lock, flags, SINGLE_DEPTH_NESTING);
llist_for_each_safe(pos, next, take_preallocated_barriers(ref)) { llist_for_each_safe(pos, next, take_preallocated_barriers(ref)) {
struct active_node *node = barrier_from_ll(pos); struct active_node *node = barrier_from_ll(pos);
struct intel_engine_cs *engine = barrier_to_engine(node); struct intel_engine_cs *engine = barrier_to_engine(node);
...@@ -699,7 +698,7 @@ void i915_active_acquire_barrier(struct i915_active *ref) ...@@ -699,7 +698,7 @@ void i915_active_acquire_barrier(struct i915_active *ref)
llist_add(barrier_to_ll(node), &engine->barrier_tasks); llist_add(barrier_to_ll(node), &engine->barrier_tasks);
intel_engine_pm_put(engine); intel_engine_pm_put(engine);
} }
mutex_unlock(&ref->mutex); spin_unlock_irqrestore(&ref->tree_lock, flags);
} }
void i915_request_add_active_barriers(struct i915_request *rq) void i915_request_add_active_barriers(struct i915_request *rq)
......
...@@ -48,6 +48,7 @@ struct i915_active { ...@@ -48,6 +48,7 @@ struct i915_active {
atomic_t count; atomic_t count;
struct mutex mutex; struct mutex mutex;
spinlock_t tree_lock;
struct active_node *cache; struct active_node *cache;
struct rb_root tree; struct rb_root tree;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册