提交 7069b144 编写于 作者: C Chris Wilson

drm/i915: Replace the pinned context address with its unique ID

Rather than reuse the current location of the context in the global GTT
for its hardware identifier, use the context's unique ID assigned to it
for its whole lifetime.
Signed-off-by: NChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: NTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1461833819-3991-18-git-send-email-chris@chris-wilson.co.uk
上级 5d1808ec
...@@ -2043,15 +2043,13 @@ static void i915_dump_lrc_obj(struct seq_file *m, ...@@ -2043,15 +2043,13 @@ static void i915_dump_lrc_obj(struct seq_file *m,
struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state; struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
unsigned long ggtt_offset = 0; unsigned long ggtt_offset = 0;
seq_printf(m, "CONTEXT: %s %u\n", engine->name, ctx->hw_id);
if (ctx_obj == NULL) { if (ctx_obj == NULL) {
seq_printf(m, "Context on %s with no gem object\n", seq_puts(m, "\tNot allocated\n");
engine->name);
return; return;
} }
seq_printf(m, "CONTEXT: %s %u\n", engine->name,
intel_execlists_ctx_id(ctx, engine));
if (!i915_gem_obj_ggtt_bound(ctx_obj)) if (!i915_gem_obj_ggtt_bound(ctx_obj))
seq_puts(m, "\tNot bound in GGTT\n"); seq_puts(m, "\tNot bound in GGTT\n");
else else
...@@ -2170,8 +2168,8 @@ static int i915_execlists(struct seq_file *m, void *data) ...@@ -2170,8 +2168,8 @@ static int i915_execlists(struct seq_file *m, void *data)
seq_printf(m, "\t%d requests in queue\n", count); seq_printf(m, "\t%d requests in queue\n", count);
if (head_req) { if (head_req) {
seq_printf(m, "\tHead request id: %u\n", seq_printf(m, "\tHead request context: %u\n",
intel_execlists_ctx_id(head_req->ctx, engine)); head_req->ctx->hw_id);
seq_printf(m, "\tHead request tail: %u\n", seq_printf(m, "\tHead request tail: %u\n",
head_req->tail); head_req->tail);
} }
......
...@@ -224,6 +224,7 @@ enum { ...@@ -224,6 +224,7 @@ enum {
FAULT_AND_CONTINUE /* Unsupported */ FAULT_AND_CONTINUE /* Unsupported */
}; };
#define GEN8_CTX_ID_SHIFT 32 #define GEN8_CTX_ID_SHIFT 32
#define GEN8_CTX_ID_WIDTH 21
#define GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x17 #define GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x17
#define GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x26 #define GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x26
...@@ -307,7 +308,7 @@ logical_ring_init_platform_invariants(struct intel_engine_cs *engine) ...@@ -307,7 +308,7 @@ logical_ring_init_platform_invariants(struct intel_engine_cs *engine)
* This is what a descriptor looks like, from LSB to MSB: * This is what a descriptor looks like, from LSB to MSB:
* bits 0-11: flags, GEN8_CTX_* (cached in ctx_desc_template) * bits 0-11: flags, GEN8_CTX_* (cached in ctx_desc_template)
* bits 12-31: LRCA, GTT address of (the HWSP of) this context * bits 12-31: LRCA, GTT address of (the HWSP of) this context
* bits 32-52: ctx ID, a globally unique tag (the LRCA again!) * bits 32-52: ctx ID, a globally unique tag
* bits 53-54: mbz, reserved for use by hardware * bits 53-54: mbz, reserved for use by hardware
* bits 55-63: group ID, currently unused and set to 0 * bits 55-63: group ID, currently unused and set to 0
*/ */
...@@ -315,14 +316,14 @@ static void ...@@ -315,14 +316,14 @@ static void
intel_lr_context_descriptor_update(struct intel_context *ctx, intel_lr_context_descriptor_update(struct intel_context *ctx,
struct intel_engine_cs *engine) struct intel_engine_cs *engine)
{ {
uint64_t lrca, desc; u64 desc;
lrca = ctx->engine[engine->id].lrc_vma->node.start + BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (1<<GEN8_CTX_ID_WIDTH));
LRC_PPHWSP_PN * PAGE_SIZE;
desc = engine->ctx_desc_template; /* bits 0-11 */ desc = engine->ctx_desc_template; /* bits 0-11 */
desc |= lrca; /* bits 12-31 */ desc |= ctx->engine[engine->id].lrc_vma->node.start + /* bits 12-31 */
desc |= (lrca >> PAGE_SHIFT) << GEN8_CTX_ID_SHIFT; /* bits 32-52 */ LRC_PPHWSP_PN * PAGE_SIZE;
desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT; /* bits 32-52 */
ctx->engine[engine->id].lrc_desc = desc; ctx->engine[engine->id].lrc_desc = desc;
} }
...@@ -333,28 +334,6 @@ uint64_t intel_lr_context_descriptor(struct intel_context *ctx, ...@@ -333,28 +334,6 @@ uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
return ctx->engine[engine->id].lrc_desc; return ctx->engine[engine->id].lrc_desc;
} }
/**
* intel_execlists_ctx_id() - get the Execlists Context ID
* @ctx: Context to get the ID for
* @ring: Engine to get the ID for
*
* Do not confuse with ctx->id! Unfortunately we have a name overload
* here: the old context ID we pass to userspace as a handler so that
* they can refer to a context, and the new context ID we pass to the
* ELSP so that the GPU can inform us of the context status via
* interrupts.
*
* The context ID is a portion of the context descriptor, so we can
* just extract the required part from the cached descriptor.
*
* Return: 20-bits globally unique context ID.
*/
u32 intel_execlists_ctx_id(struct intel_context *ctx,
struct intel_engine_cs *engine)
{
return intel_lr_context_descriptor(ctx, engine) >> GEN8_CTX_ID_SHIFT;
}
static void execlists_elsp_write(struct drm_i915_gem_request *rq0, static void execlists_elsp_write(struct drm_i915_gem_request *rq0,
struct drm_i915_gem_request *rq1) struct drm_i915_gem_request *rq1)
{ {
...@@ -500,7 +479,7 @@ execlists_check_remove_request(struct intel_engine_cs *engine, u32 request_id) ...@@ -500,7 +479,7 @@ execlists_check_remove_request(struct intel_engine_cs *engine, u32 request_id)
if (!head_req) if (!head_req)
return 0; return 0;
if (unlikely(intel_execlists_ctx_id(head_req->ctx, engine) != request_id)) if (unlikely(head_req->ctx->hw_id != request_id))
return 0; return 0;
WARN(head_req->elsp_submitted == 0, "Never submitted head request\n"); WARN(head_req->elsp_submitted == 0, "Never submitted head request\n");
......
...@@ -113,9 +113,6 @@ void intel_lr_context_reset(struct drm_i915_private *dev_priv, ...@@ -113,9 +113,6 @@ void intel_lr_context_reset(struct drm_i915_private *dev_priv,
uint64_t intel_lr_context_descriptor(struct intel_context *ctx, uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
struct intel_engine_cs *engine); struct intel_engine_cs *engine);
u32 intel_execlists_ctx_id(struct intel_context *ctx,
struct intel_engine_cs *engine);
/* Execlists */ /* Execlists */
int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists); int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists);
struct i915_execbuffer_params; struct i915_execbuffer_params;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册