提交 e137d3ab 编写于 作者: C Chris Wilson 提交者: Rodrigo Vivi

drm/i915/gt: execlists->active is serialised by the tasklet

The active/pending execlists is no longer protected by the
engine->active.lock, but is serialised by the tasklet instead. Update
the locking around the debug and stats to follow suit.

v2: local_bh_disable() to prevent recursing into the tasklet in case we
trigger a softirq (Tvrtko)

Fixes: df403069 ("drm/i915/execlists: Lift process_csb() out of the irq-off spinlock")
Signed-off-by: NChris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: NTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191009160906.16195-1-chris@chris-wilson.co.uk
(cherry picked from commit c36eebd9)
Signed-off-by: NRodrigo Vivi <rodrigo.vivi@intel.com>
上级 749085a2
...@@ -136,6 +136,20 @@ execlists_active(const struct intel_engine_execlists *execlists) ...@@ -136,6 +136,20 @@ execlists_active(const struct intel_engine_execlists *execlists)
return READ_ONCE(*execlists->active); return READ_ONCE(*execlists->active);
} }
static inline void
execlists_active_lock_bh(struct intel_engine_execlists *execlists)
{
local_bh_disable(); /* prevent local softirq and lock recursion */
tasklet_lock(&execlists->tasklet);
}
static inline void
execlists_active_unlock_bh(struct intel_engine_execlists *execlists)
{
tasklet_unlock(&execlists->tasklet);
local_bh_enable(); /* restore softirq, and kick ksoftirqd! */
}
struct i915_request * struct i915_request *
execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists); execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists);
......
...@@ -1197,9 +1197,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine, ...@@ -1197,9 +1197,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
struct drm_printer *m) struct drm_printer *m)
{ {
struct drm_i915_private *dev_priv = engine->i915; struct drm_i915_private *dev_priv = engine->i915;
const struct intel_engine_execlists * const execlists = struct intel_engine_execlists * const execlists = &engine->execlists;
&engine->execlists;
unsigned long flags;
u64 addr; u64 addr;
if (engine->id == RENDER_CLASS && IS_GEN_RANGE(dev_priv, 4, 7)) if (engine->id == RENDER_CLASS && IS_GEN_RANGE(dev_priv, 4, 7))
...@@ -1281,7 +1279,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine, ...@@ -1281,7 +1279,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
idx, hws[idx * 2], hws[idx * 2 + 1]); idx, hws[idx * 2], hws[idx * 2 + 1]);
} }
spin_lock_irqsave(&engine->active.lock, flags); execlists_active_lock_bh(execlists);
for (port = execlists->active; (rq = *port); port++) { for (port = execlists->active; (rq = *port); port++) {
char hdr[80]; char hdr[80];
int len; int len;
...@@ -1309,7 +1307,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine, ...@@ -1309,7 +1307,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
hwsp_seqno(rq)); hwsp_seqno(rq));
print_request(m, rq, hdr); print_request(m, rq, hdr);
} }
spin_unlock_irqrestore(&engine->active.lock, flags); execlists_active_unlock_bh(execlists);
} else if (INTEL_GEN(dev_priv) > 6) { } else if (INTEL_GEN(dev_priv) > 6) {
drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n", drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
ENGINE_READ(engine, RING_PP_DIR_BASE)); ENGINE_READ(engine, RING_PP_DIR_BASE));
...@@ -1440,8 +1438,8 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine) ...@@ -1440,8 +1438,8 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine)
if (!intel_engine_supports_stats(engine)) if (!intel_engine_supports_stats(engine))
return -ENODEV; return -ENODEV;
spin_lock_irqsave(&engine->active.lock, flags); execlists_active_lock_bh(execlists);
write_seqlock(&engine->stats.lock); write_seqlock_irqsave(&engine->stats.lock, flags);
if (unlikely(engine->stats.enabled == ~0)) { if (unlikely(engine->stats.enabled == ~0)) {
err = -EBUSY; err = -EBUSY;
...@@ -1469,8 +1467,8 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine) ...@@ -1469,8 +1467,8 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine)
} }
unlock: unlock:
write_sequnlock(&engine->stats.lock); write_sequnlock_irqrestore(&engine->stats.lock, flags);
spin_unlock_irqrestore(&engine->active.lock, flags); execlists_active_unlock_bh(execlists);
return err; return err;
} }
......
...@@ -77,6 +77,12 @@ struct drm_i915_private; ...@@ -77,6 +77,12 @@ struct drm_i915_private;
#define I915_GEM_IDLE_TIMEOUT (HZ / 5) #define I915_GEM_IDLE_TIMEOUT (HZ / 5)
static inline void tasklet_lock(struct tasklet_struct *t)
{
while (!tasklet_trylock(t))
cpu_relax();
}
static inline void __tasklet_disable_sync_once(struct tasklet_struct *t) static inline void __tasklet_disable_sync_once(struct tasklet_struct *t)
{ {
if (!atomic_fetch_inc(&t->count)) if (!atomic_fetch_inc(&t->count))
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册