diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index a53f6b30b6956930930775b5b9b79c5ac2de05d4..6eff81fb939c95c1912711e5b21d8e8b162fd4e2 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -3963,7 +3963,7 @@ __i915_request_irq_complete(struct drm_i915_gem_request *req) */ if (engine->irq_seqno_barrier && rcu_access_pointer(engine->breadcrumbs.irq_seqno_bh) == current && - cmpxchg_relaxed(&engine->breadcrumbs.irq_posted, 1, 0)) { + test_and_clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted)) { struct task_struct *tsk; /* The ordering of irq_posted versus applying the barrier diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 6fefc34ef602e9ee758c980bb37e2b1d63e6746c..7e087c34426514f9dd91105e98f5597008f68387 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1033,7 +1033,7 @@ static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv) static void notify_ring(struct intel_engine_cs *engine) { - smp_store_mb(engine->breadcrumbs.irq_posted, true); + set_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted); if (intel_engine_wakeup(engine)) trace_i915_gem_request_notify(engine); } diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c index c6fa77177615ebf3a396391012a9a8ee37ae2f72..6b24f2544b6b681d8df21de0410294376f7275c0 100644 --- a/drivers/gpu/drm/i915/intel_breadcrumbs.c +++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c @@ -81,7 +81,7 @@ static void irq_enable(struct intel_engine_cs *engine) * we still need to force the barrier before reading the seqno, * just in case. */ - engine->breadcrumbs.irq_posted = true; + set_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted); /* Caller disables interrupts */ spin_lock(&engine->i915->irq_lock); @@ -95,8 +95,6 @@ static void irq_disable(struct intel_engine_cs *engine) spin_lock(&engine->i915->irq_lock); engine->irq_disable(engine); spin_unlock(&engine->i915->irq_lock); - - engine->breadcrumbs.irq_posted = false; } static void __intel_breadcrumbs_enable_irq(struct intel_breadcrumbs *b) @@ -257,7 +255,8 @@ static bool __intel_engine_add_wait(struct intel_engine_cs *engine, * in case the seqno passed. */ __intel_breadcrumbs_enable_irq(b); - if (READ_ONCE(b->irq_posted)) + if (test_bit(ENGINE_IRQ_BREADCRUMB, + &engine->irq_posted)) wake_up_process(to_wait(next)->tsk); } @@ -610,7 +609,7 @@ void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine) if (intel_engine_has_waiter(engine)) { b->timeout = wait_timeout(); __intel_breadcrumbs_enable_irq(b); - if (READ_ONCE(b->irq_posted)) + if (test_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted)) wake_up_process(b->first_wait->tsk); } else { /* sanitize the IMR and unmask any auxiliary interrupts */ diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index dbd32585f27a64f43576742ba6c9d22fb8555876..a9ea84ea3155b5bb2fed12173a0613b39225ad11 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -211,6 +211,9 @@ struct intel_engine_cs { struct intel_render_state *render_state; + unsigned long irq_posted; +#define ENGINE_IRQ_BREADCRUMB 0 + /* Rather than have every client wait upon all user interrupts, * with the herd waking after every interrupt and each doing the * heavyweight seqno dance, we delegate the task (of being the @@ -229,7 +232,6 @@ struct intel_engine_cs { */ struct intel_breadcrumbs { struct task_struct __rcu *irq_seqno_bh; /* bh for interrupts */ - bool irq_posted; spinlock_t lock; /* protects the lists of requests; irqsafe */ struct rb_root waiters; /* sorted by retirement, priority */