提交 9021ad03 编写于 作者: C Chris Wilson

drm/i915: Name the inner most per-engine intel_context struct

We want to give a name to the currently anonymous per-engine struct
inside the context, so that we can assign it to a local variable and
save clumsy typing. The name we have chosen is intel_context as it
reflects the HW facing portion of the context state (the logical context
state, the registers, the ringbuffer etc).
Signed-off-by: NChris Wilson <chris@chris-wilson.co.uk>
Cc: Dave Gordon <david.s.gordon@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1464098023-3294-4-git-send-email-chris@chris-wilson.co.uk
上级 ca585b5d
......@@ -869,7 +869,7 @@ struct i915_gem_context {
} legacy_hw_ctx;
/* Execlists */
struct {
struct intel_context {
struct drm_i915_gem_object *state;
struct intel_ringbuffer *ringbuf;
int pin_count;
......
......@@ -363,7 +363,6 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
struct i915_gem_context *ctx = client->owner;
struct guc_context_desc desc;
struct sg_table *sg;
enum intel_engine_id id;
u32 gfx_addr;
memset(&desc, 0, sizeof(desc));
......@@ -373,10 +372,10 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
desc.priority = client->priority;
desc.db_id = client->doorbell_id;
for_each_engine_id(engine, dev_priv, id) {
for_each_engine(engine, dev_priv) {
struct intel_context *ce = &ctx->engine[engine->id];
struct guc_execlist_context *lrc = &desc.lrc[engine->guc_id];
struct drm_i915_gem_object *obj;
uint64_t ctx_desc;
/* TODO: We have a design issue to be solved here. Only when we
* receive the first batch, we know which engine is used by the
......@@ -385,20 +384,18 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
* for now who owns a GuC client. But for future owner of GuC
* client, need to make sure lrc is pinned prior to enter here.
*/
obj = ctx->engine[id].state;
if (!obj)
if (!ce->state)
break; /* XXX: continue? */
ctx_desc = intel_lr_context_descriptor(ctx, engine);
lrc->context_desc = (u32)ctx_desc;
lrc->context_desc = lower_32_bits(ce->lrc_desc);
/* The state page is after PPHWSP */
gfx_addr = i915_gem_obj_ggtt_offset(obj);
gfx_addr = i915_gem_obj_ggtt_offset(ce->state);
lrc->ring_lcra = gfx_addr + LRC_STATE_PN * PAGE_SIZE;
lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) |
(engine->guc_id << GUC_ELC_ENGINE_OFFSET);
obj = ctx->engine[id].ringbuf->obj;
obj = ce->ringbuf->obj;
gfx_addr = i915_gem_obj_ggtt_offset(obj);
lrc->ring_begin = gfx_addr;
......
......@@ -300,7 +300,7 @@ logical_ring_init_platform_invariants(struct intel_engine_cs *engine)
* descriptor for a pinned context
*
* @ctx: Context to work on
* @ring: Engine the descriptor will be used with
* @engine: Engine the descriptor will be used with
*
* The context descriptor encodes various attributes of a context,
* including its GTT address and some flags. Because it's fairly
......@@ -318,16 +318,17 @@ static void
intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
{
struct intel_context *ce = &ctx->engine[engine->id];
u64 desc;
BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (1<<GEN8_CTX_ID_WIDTH));
desc = engine->ctx_desc_template; /* bits 0-11 */
desc |= ctx->engine[engine->id].lrc_vma->node.start + /* bits 12-31 */
LRC_PPHWSP_PN * PAGE_SIZE;
desc |= ce->lrc_vma->node.start + LRC_PPHWSP_PN * PAGE_SIZE;
/* bits 12-31 */
desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT; /* bits 32-52 */
ctx->engine[engine->id].lrc_desc = desc;
ce->lrc_desc = desc;
}
uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx,
......@@ -674,6 +675,7 @@ static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
{
struct intel_engine_cs *engine = request->engine;
struct intel_context *ce = &request->ctx->engine[engine->id];
int ret;
/* Flush enough space to reduce the likelihood of waiting after
......@@ -682,13 +684,13 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request
*/
request->reserved_space += EXECLISTS_REQUEST_SIZE;
if (request->ctx->engine[engine->id].state == NULL) {
if (!ce->state) {
ret = execlists_context_deferred_alloc(request->ctx, engine);
if (ret)
return ret;
}
request->ringbuf = request->ctx->engine[engine->id].ringbuf;
request->ringbuf = ce->ringbuf;
if (i915.enable_guc_submission) {
/*
......@@ -709,12 +711,12 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request
if (ret)
goto err_unpin;
if (!request->ctx->engine[engine->id].initialised) {
if (!ce->initialised) {
ret = engine->init_context(request);
if (ret)
goto err_unpin;
request->ctx->engine[engine->id].initialised = true;
ce->initialised = true;
}
/* Note that after this point, we have committed to using
......@@ -933,24 +935,22 @@ static int intel_lr_context_pin(struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = ctx->i915;
struct drm_i915_gem_object *ctx_obj;
struct intel_ringbuffer *ringbuf;
struct intel_context *ce = &ctx->engine[engine->id];
void *vaddr;
u32 *lrc_reg_state;
int ret;
lockdep_assert_held(&ctx->i915->dev->struct_mutex);
if (ctx->engine[engine->id].pin_count++)
if (ce->pin_count++)
return 0;
ctx_obj = ctx->engine[engine->id].state;
ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN,
PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
ret = i915_gem_obj_ggtt_pin(ce->state, GEN8_LR_CONTEXT_ALIGN,
PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
if (ret)
goto err;
vaddr = i915_gem_object_pin_map(ctx_obj);
vaddr = i915_gem_object_pin_map(ce->state);
if (IS_ERR(vaddr)) {
ret = PTR_ERR(vaddr);
goto unpin_ctx_obj;
......@@ -958,17 +958,17 @@ static int intel_lr_context_pin(struct i915_gem_context *ctx,
lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
ringbuf = ctx->engine[engine->id].ringbuf;
ret = intel_pin_and_map_ringbuffer_obj(dev_priv, ringbuf);
ret = intel_pin_and_map_ringbuffer_obj(dev_priv, ce->ringbuf);
if (ret)
goto unpin_map;
i915_gem_context_reference(ctx);
ctx->engine[engine->id].lrc_vma = i915_gem_obj_to_ggtt(ctx_obj);
ce->lrc_vma = i915_gem_obj_to_ggtt(ce->state);
intel_lr_context_descriptor_update(ctx, engine);
lrc_reg_state[CTX_RING_BUFFER_START+1] = ringbuf->vma->node.start;
ctx->engine[engine->id].lrc_reg_state = lrc_reg_state;
ctx_obj->dirty = true;
lrc_reg_state[CTX_RING_BUFFER_START+1] = ce->ringbuf->vma->node.start;
ce->lrc_reg_state = lrc_reg_state;
ce->state->dirty = true;
/* Invalidate GuC TLB. */
if (i915.enable_guc_submission)
......@@ -977,34 +977,33 @@ static int intel_lr_context_pin(struct i915_gem_context *ctx,
return 0;
unpin_map:
i915_gem_object_unpin_map(ctx_obj);
i915_gem_object_unpin_map(ce->state);
unpin_ctx_obj:
i915_gem_object_ggtt_unpin(ctx_obj);
i915_gem_object_ggtt_unpin(ce->state);
err:
ctx->engine[engine->id].pin_count = 0;
ce->pin_count = 0;
return ret;
}
void intel_lr_context_unpin(struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
{
struct drm_i915_gem_object *ctx_obj;
struct intel_context *ce = &ctx->engine[engine->id];
lockdep_assert_held(&ctx->i915->dev->struct_mutex);
GEM_BUG_ON(ctx->engine[engine->id].pin_count == 0);
GEM_BUG_ON(ce->pin_count == 0);
if (--ctx->engine[engine->id].pin_count)
if (--ce->pin_count)
return;
intel_unpin_ringbuffer_obj(ctx->engine[engine->id].ringbuf);
intel_unpin_ringbuffer_obj(ce->ringbuf);
ctx_obj = ctx->engine[engine->id].state;
i915_gem_object_unpin_map(ctx_obj);
i915_gem_object_ggtt_unpin(ctx_obj);
i915_gem_object_unpin_map(ce->state);
i915_gem_object_ggtt_unpin(ce->state);
ctx->engine[engine->id].lrc_vma = NULL;
ctx->engine[engine->id].lrc_desc = 0;
ctx->engine[engine->id].lrc_reg_state = NULL;
ce->lrc_vma = NULL;
ce->lrc_desc = 0;
ce->lrc_reg_state = NULL;
i915_gem_context_unreference(ctx);
}
......@@ -2490,12 +2489,13 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
{
struct drm_i915_gem_object *ctx_obj;
struct intel_context *ce = &ctx->engine[engine->id];
uint32_t context_size;
struct intel_ringbuffer *ringbuf;
int ret;
WARN_ON(ctx->legacy_hw_ctx.rcs_state != NULL);
WARN_ON(ctx->engine[engine->id].state);
WARN_ON(ce->state);
context_size = round_up(intel_lr_context_size(engine), 4096);
......@@ -2520,9 +2520,9 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
goto error_ringbuf;
}
ctx->engine[engine->id].ringbuf = ringbuf;
ctx->engine[engine->id].state = ctx_obj;
ctx->engine[engine->id].initialised = engine->init_context == NULL;
ce->ringbuf = ringbuf;
ce->state = ctx_obj;
ce->initialised = engine->init_context == NULL;
return 0;
......@@ -2530,8 +2530,8 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
intel_ringbuffer_free(ringbuf);
error_deref_obj:
drm_gem_object_unreference(&ctx_obj->base);
ctx->engine[engine->id].ringbuf = NULL;
ctx->engine[engine->id].state = NULL;
ce->ringbuf = NULL;
ce->state = NULL;
return ret;
}
......@@ -2541,10 +2541,8 @@ void intel_lr_context_reset(struct drm_i915_private *dev_priv,
struct intel_engine_cs *engine;
for_each_engine(engine, dev_priv) {
struct drm_i915_gem_object *ctx_obj =
ctx->engine[engine->id].state;
struct intel_ringbuffer *ringbuf =
ctx->engine[engine->id].ringbuf;
struct intel_context *ce = &ctx->engine[engine->id];
struct drm_i915_gem_object *ctx_obj = ce->state;
void *vaddr;
uint32_t *reg_state;
......@@ -2563,7 +2561,7 @@ void intel_lr_context_reset(struct drm_i915_private *dev_priv,
i915_gem_object_unpin_map(ctx_obj);
ringbuf->head = 0;
ringbuf->tail = 0;
ce->ringbuf->head = 0;
ce->ringbuf->tail = 0;
}
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册