提交 cf4591d1 编写于 作者: M Mika Kuoppala 提交者: Mika Kuoppala

drm/i915: Wrap port cancellation into a function

On reset and wedged path, we want to release the requests
that are tied to ports and then mark the ports to be unset.
Introduce a function for this.

v2: rebase
v3: drop local, keep GEM_BUG_ON (Michał, Chris)
v4: rebase

Cc: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: NMika Kuoppala <mika.kuoppala@intel.com>
Reviewed-by: NMichał Winiarski <michal.winiarski@intel.com>
Reviewed-by: NChris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20170922124307.10914-3-mika.kuoppala@intel.com
上级 19df9a57
...@@ -568,21 +568,27 @@ static void execlists_dequeue(struct intel_engine_cs *engine) ...@@ -568,21 +568,27 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
execlists_submit_ports(engine); execlists_submit_ports(engine);
} }
static void execlist_cancel_port_requests(struct intel_engine_execlists *execlists)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(execlists->port); i++)
i915_gem_request_put(port_request(&execlists->port[i]));
memset(execlists->port, 0, sizeof(execlists->port));
}
static void execlists_cancel_requests(struct intel_engine_cs *engine) static void execlists_cancel_requests(struct intel_engine_cs *engine)
{ {
struct intel_engine_execlists * const execlists = &engine->execlists; struct intel_engine_execlists * const execlists = &engine->execlists;
struct execlist_port *port = execlists->port;
struct drm_i915_gem_request *rq, *rn; struct drm_i915_gem_request *rq, *rn;
struct rb_node *rb; struct rb_node *rb;
unsigned long flags; unsigned long flags;
unsigned long n;
spin_lock_irqsave(&engine->timeline->lock, flags); spin_lock_irqsave(&engine->timeline->lock, flags);
/* Cancel the requests on the HW and clear the ELSP tracker. */ /* Cancel the requests on the HW and clear the ELSP tracker. */
for (n = 0; n < ARRAY_SIZE(execlists->port); n++) execlist_cancel_port_requests(execlists);
i915_gem_request_put(port_request(&port[n]));
memset(execlists->port, 0, sizeof(execlists->port));
/* Mark all executing requests as skipped. */ /* Mark all executing requests as skipped. */
list_for_each_entry(rq, &engine->timeline->requests, link) { list_for_each_entry(rq, &engine->timeline->requests, link) {
...@@ -613,9 +619,10 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine) ...@@ -613,9 +619,10 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
/* Remaining _unready_ requests will be nop'ed when submitted */ /* Remaining _unready_ requests will be nop'ed when submitted */
execlists->queue = RB_ROOT; execlists->queue = RB_ROOT;
execlists->first = NULL; execlists->first = NULL;
GEM_BUG_ON(port_isset(&port[0])); GEM_BUG_ON(port_isset(&execlists->port[0]));
/* /*
* The port is checked prior to scheduling a tasklet, but * The port is checked prior to scheduling a tasklet, but
...@@ -1372,11 +1379,9 @@ static void reset_common_ring(struct intel_engine_cs *engine, ...@@ -1372,11 +1379,9 @@ static void reset_common_ring(struct intel_engine_cs *engine,
struct drm_i915_gem_request *request) struct drm_i915_gem_request *request)
{ {
struct intel_engine_execlists * const execlists = &engine->execlists; struct intel_engine_execlists * const execlists = &engine->execlists;
struct execlist_port *port = execlists->port;
struct drm_i915_gem_request *rq, *rn; struct drm_i915_gem_request *rq, *rn;
struct intel_context *ce; struct intel_context *ce;
unsigned long flags; unsigned long flags;
unsigned int n;
spin_lock_irqsave(&engine->timeline->lock, flags); spin_lock_irqsave(&engine->timeline->lock, flags);
...@@ -1389,9 +1394,7 @@ static void reset_common_ring(struct intel_engine_cs *engine, ...@@ -1389,9 +1394,7 @@ static void reset_common_ring(struct intel_engine_cs *engine,
* guessing the missed context-switch events by looking at what * guessing the missed context-switch events by looking at what
* requests were completed. * requests were completed.
*/ */
for (n = 0; n < ARRAY_SIZE(execlists->port); n++) execlist_cancel_port_requests(execlists);
i915_gem_request_put(port_request(&port[n]));
memset(execlists->port, 0, sizeof(execlists->port));
/* Push back any incomplete requests for replay after the reset. */ /* Push back any incomplete requests for replay after the reset. */
list_for_each_entry_safe_reverse(rq, rn, list_for_each_entry_safe_reverse(rq, rn,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册