提交 40e895ce 编写于 作者: J John Harrison 提交者: Daniel Vetter

drm/i915: Set context in request from creation even in legacy mode

In execlist mode, the context object pointer is written in to the request
structure (and reference counted) at the point of request creation. In legacy
mode, this only happens inside i915_add_request().

This patch updates the legacy code path to match the execlist version. This
allows all the intermediate code between request creation and request submission
to get at the context object given only a request structure. Thus negating the
need to pass context pointers here, there and everywhere.

v2: Moved the context reference so it does not need to be undone if the
get_seqno() fails.

v3: Fixed execlist mode always hitting a warning about invalid last_contexts
(which don't exist in execlist mode).

v4: Updated for new i915_gem_request_alloc() scheme.

For: VIZ-5115
Signed-off-by: NJohn Harrison <John.C.Harrison@Intel.com>
Reviewed-by: NTomas Elf <tomas.elf@intel.com>
Signed-off-by: NDaniel Vetter <daniel.vetter@ffwll.ch>
上级 0c8dac88
......@@ -2536,14 +2536,7 @@ void __i915_add_request(struct intel_engine_cs *ring,
*/
request->batch_obj = obj;
if (!i915.enable_execlists) {
/* Hold a reference to the current context so that we can inspect
* it later in case a hangcheck error event fires.
*/
request->ctx = ring->last_context;
if (request->ctx)
i915_gem_context_reference(request->ctx);
}
WARN_ON(!i915.enable_execlists && (request->ctx != ring->last_context));
request->emitted_jiffies = jiffies;
list_add_tail(&request->list, &ring->request_list);
......@@ -2654,21 +2647,24 @@ int i915_gem_request_alloc(struct intel_engine_cs *ring,
if (req == NULL)
return -ENOMEM;
kref_init(&req->ref);
req->i915 = dev_priv;
ret = i915_gem_get_seqno(ring->dev, &req->seqno);
if (ret)
goto err;
kref_init(&req->ref);
req->i915 = dev_priv;
req->ring = ring;
req->ctx = ctx;
i915_gem_context_reference(req->ctx);
if (i915.enable_execlists)
ret = intel_logical_ring_alloc_request_extras(req, ctx);
ret = intel_logical_ring_alloc_request_extras(req);
else
ret = intel_ring_alloc_request_extras(req);
if (ret)
if (ret) {
i915_gem_context_unreference(req->ctx);
goto err;
}
/*
* Reserve space in the ring buffer for all the commands required to
......
......@@ -659,20 +659,17 @@ static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
return logical_ring_invalidate_all_caches(ringbuf, ctx);
}
int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request,
struct intel_context *ctx)
int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
{
int ret;
if (ctx != request->ring->default_context) {
ret = intel_lr_context_pin(request->ring, ctx);
if (request->ctx != request->ring->default_context) {
ret = intel_lr_context_pin(request->ring, request->ctx);
if (ret)
return ret;
}
request->ringbuf = ctx->engine[request->ring->id].ringbuf;
request->ctx = ctx;
i915_gem_context_reference(request->ctx);
request->ringbuf = request->ctx->engine[request->ring->id].ringbuf;
return 0;
}
......
......@@ -36,8 +36,7 @@
#define RING_CONTEXT_STATUS_PTR(ring) ((ring)->mmio_base+0x3a0)
/* Logical Rings */
int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request,
struct intel_context *ctx);
int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request);
void intel_logical_ring_stop(struct intel_engine_cs *ring);
void intel_logical_ring_cleanup(struct intel_engine_cs *ring);
int intel_logical_rings_init(struct drm_device *dev);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册