提交 221ab971 编写于 作者: C Chris Wilson

drm/i915/execlists: Unwind incomplete requests on resets

Given the mechanism to unwind and replay requests (designed to support
preemption), we have an alternative to the current method of
resubmitting the ELSP upon reset. Resubmitting ELSP turns out to be more
complicated than expected, due to having to handle lost context-switch
interrupts and so guessing what ELSP we need to resubmit later. Instead,
by unwinding the requests and clearing the ELSP tracking entirely, we
can then just dequeue the first pair of ready requests after resetting,
using the normal submission procedure.

Currently, the unwound requests have maximum priority and so are
guaranteed to be resubmitted upon resume. If we are lucky, we may be
able to coalesce a new request on top!
Suggested-by: NMichał Winiarski <michal.winiarski@intel.com>
Signed-off-by: NChris Wilson <chris@chris-wilson.co.uk>
Cc: Michał Winiarski <michal.winiarski@intel.com>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20170916204414.32762-4-chris@chris-wilson.co.ukReviewed-by: NMichał Winiarski <michal.winiarski@intel.com>
上级 27606fd8
...@@ -1308,9 +1308,6 @@ static u8 gtiir[] = { ...@@ -1308,9 +1308,6 @@ static u8 gtiir[] = {
static int gen8_init_common_ring(struct intel_engine_cs *engine) static int gen8_init_common_ring(struct intel_engine_cs *engine)
{ {
struct drm_i915_private *dev_priv = engine->i915; struct drm_i915_private *dev_priv = engine->i915;
struct execlist_port *port = engine->execlist_port;
unsigned int n;
bool submit;
int ret; int ret;
ret = intel_mocs_init_engine(engine); ret = intel_mocs_init_engine(engine);
...@@ -1346,26 +1343,8 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine) ...@@ -1346,26 +1343,8 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine)
engine->csb_head = -1; engine->csb_head = -1;
/* After a GPU reset, we may have requests to replay */ /* After a GPU reset, we may have requests to replay */
submit = false; if (!i915.enable_guc_submission && engine->execlist_first)
for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++) { tasklet_schedule(&engine->irq_tasklet);
if (!port_isset(&port[n]))
break;
DRM_DEBUG_DRIVER("Restarting %s:%d from 0x%x\n",
engine->name, n,
port_request(&port[n])->global_seqno);
/* Discard the current inflight count */
port_set(&port[n], port_request(&port[n]));
submit = true;
}
if (!i915.enable_guc_submission) {
if (submit)
execlists_submit_ports(engine);
else if (engine->execlist_first)
tasklet_schedule(&engine->irq_tasklet);
}
return 0; return 0;
} }
...@@ -1407,9 +1386,13 @@ static void reset_common_ring(struct intel_engine_cs *engine, ...@@ -1407,9 +1386,13 @@ static void reset_common_ring(struct intel_engine_cs *engine,
struct drm_i915_gem_request *request) struct drm_i915_gem_request *request)
{ {
struct execlist_port *port = engine->execlist_port; struct execlist_port *port = engine->execlist_port;
struct drm_i915_gem_request *rq, *rn;
struct intel_context *ce; struct intel_context *ce;
unsigned long flags;
unsigned int n; unsigned int n;
spin_lock_irqsave(&engine->timeline->lock, flags);
/* /*
* Catch up with any missed context-switch interrupts. * Catch up with any missed context-switch interrupts.
* *
...@@ -1419,20 +1402,28 @@ static void reset_common_ring(struct intel_engine_cs *engine, ...@@ -1419,20 +1402,28 @@ static void reset_common_ring(struct intel_engine_cs *engine,
* guessing the missed context-switch events by looking at what * guessing the missed context-switch events by looking at what
* requests were completed. * requests were completed.
*/ */
if (!request) { for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++)
for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++) i915_gem_request_put(port_request(&port[n]));
i915_gem_request_put(port_request(&port[n])); memset(engine->execlist_port, 0, sizeof(engine->execlist_port));
memset(engine->execlist_port, 0, sizeof(engine->execlist_port));
return;
}
if (request->ctx != port_request(port)->ctx) { /* Push back any incomplete requests for replay after the reset. */
i915_gem_request_put(port_request(port)); list_for_each_entry_safe_reverse(rq, rn,
port[0] = port[1]; &engine->timeline->requests, link) {
memset(&port[1], 0, sizeof(port[1])); struct i915_priolist *p;
if (i915_gem_request_completed(rq))
break;
__i915_gem_request_unsubmit(rq);
p = lookup_priolist(engine,
&rq->priotree,
rq->priotree.priority);
list_add(&rq->priotree.link,
&ptr_mask_bits(p, 1)->requests);
} }
GEM_BUG_ON(request->ctx != port_request(port)->ctx); spin_unlock_irqrestore(&engine->timeline->lock, flags);
/* If the request was innocent, we leave the request in the ELSP /* If the request was innocent, we leave the request in the ELSP
* and will try to replay it on restarting. The context image may * and will try to replay it on restarting. The context image may
...@@ -1444,7 +1435,7 @@ static void reset_common_ring(struct intel_engine_cs *engine, ...@@ -1444,7 +1435,7 @@ static void reset_common_ring(struct intel_engine_cs *engine,
* and have to at least restore the RING register in the context * and have to at least restore the RING register in the context
* image back to the expected values to skip over the guilty request. * image back to the expected values to skip over the guilty request.
*/ */
if (request->fence.error != -EIO) if (!request || request->fence.error != -EIO)
return; return;
/* We want a simple context + ring to execute the breadcrumb update. /* We want a simple context + ring to execute the breadcrumb update.
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册