diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index e0c932054cd8da2e6e680e693f789afb7252aded..784978d3375882b8ba161c25c1695fcca7959566 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -869,7 +869,7 @@ struct i915_gem_context { } legacy_hw_ctx; /* Execlists */ - struct { + struct intel_context { struct drm_i915_gem_object *state; struct intel_ringbuffer *ringbuf; int pin_count; diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c index b42a3adc3fa4abcbd1754a8e8a0b322e41ebc334..7b3c96e6ea378c405f6560f21d0cd29c058d7e43 100644 --- a/drivers/gpu/drm/i915/i915_guc_submission.c +++ b/drivers/gpu/drm/i915/i915_guc_submission.c @@ -363,7 +363,6 @@ static void guc_init_ctx_desc(struct intel_guc *guc, struct i915_gem_context *ctx = client->owner; struct guc_context_desc desc; struct sg_table *sg; - enum intel_engine_id id; u32 gfx_addr; memset(&desc, 0, sizeof(desc)); @@ -373,10 +372,10 @@ static void guc_init_ctx_desc(struct intel_guc *guc, desc.priority = client->priority; desc.db_id = client->doorbell_id; - for_each_engine_id(engine, dev_priv, id) { + for_each_engine(engine, dev_priv) { + struct intel_context *ce = &ctx->engine[engine->id]; struct guc_execlist_context *lrc = &desc.lrc[engine->guc_id]; struct drm_i915_gem_object *obj; - uint64_t ctx_desc; /* TODO: We have a design issue to be solved here. Only when we * receive the first batch, we know which engine is used by the @@ -385,20 +384,18 @@ static void guc_init_ctx_desc(struct intel_guc *guc, * for now who owns a GuC client. But for future owner of GuC * client, need to make sure lrc is pinned prior to enter here. */ - obj = ctx->engine[id].state; - if (!obj) + if (!ce->state) break; /* XXX: continue? */ - ctx_desc = intel_lr_context_descriptor(ctx, engine); - lrc->context_desc = (u32)ctx_desc; + lrc->context_desc = lower_32_bits(ce->lrc_desc); /* The state page is after PPHWSP */ - gfx_addr = i915_gem_obj_ggtt_offset(obj); + gfx_addr = i915_gem_obj_ggtt_offset(ce->state); lrc->ring_lcra = gfx_addr + LRC_STATE_PN * PAGE_SIZE; lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) | (engine->guc_id << GUC_ELC_ENGINE_OFFSET); - obj = ctx->engine[id].ringbuf->obj; + obj = ce->ringbuf->obj; gfx_addr = i915_gem_obj_ggtt_offset(obj); lrc->ring_begin = gfx_addr; diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 3d95b26b32ef71cb382850b283e2075384429bd5..a1dba678942d319a6480bce051fd9b17de49e9e3 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -300,7 +300,7 @@ logical_ring_init_platform_invariants(struct intel_engine_cs *engine) * descriptor for a pinned context * * @ctx: Context to work on - * @ring: Engine the descriptor will be used with + * @engine: Engine the descriptor will be used with * * The context descriptor encodes various attributes of a context, * including its GTT address and some flags. Because it's fairly @@ -318,16 +318,17 @@ static void intel_lr_context_descriptor_update(struct i915_gem_context *ctx, struct intel_engine_cs *engine) { + struct intel_context *ce = &ctx->engine[engine->id]; u64 desc; BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (1<ctx_desc_template; /* bits 0-11 */ - desc |= ctx->engine[engine->id].lrc_vma->node.start + /* bits 12-31 */ - LRC_PPHWSP_PN * PAGE_SIZE; + desc |= ce->lrc_vma->node.start + LRC_PPHWSP_PN * PAGE_SIZE; + /* bits 12-31 */ desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT; /* bits 32-52 */ - ctx->engine[engine->id].lrc_desc = desc; + ce->lrc_desc = desc; } uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx, @@ -674,6 +675,7 @@ static int execlists_move_to_gpu(struct drm_i915_gem_request *req, int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request) { struct intel_engine_cs *engine = request->engine; + struct intel_context *ce = &request->ctx->engine[engine->id]; int ret; /* Flush enough space to reduce the likelihood of waiting after @@ -682,13 +684,13 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request */ request->reserved_space += EXECLISTS_REQUEST_SIZE; - if (request->ctx->engine[engine->id].state == NULL) { + if (!ce->state) { ret = execlists_context_deferred_alloc(request->ctx, engine); if (ret) return ret; } - request->ringbuf = request->ctx->engine[engine->id].ringbuf; + request->ringbuf = ce->ringbuf; if (i915.enable_guc_submission) { /* @@ -709,12 +711,12 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request if (ret) goto err_unpin; - if (!request->ctx->engine[engine->id].initialised) { + if (!ce->initialised) { ret = engine->init_context(request); if (ret) goto err_unpin; - request->ctx->engine[engine->id].initialised = true; + ce->initialised = true; } /* Note that after this point, we have committed to using @@ -933,24 +935,22 @@ static int intel_lr_context_pin(struct i915_gem_context *ctx, struct intel_engine_cs *engine) { struct drm_i915_private *dev_priv = ctx->i915; - struct drm_i915_gem_object *ctx_obj; - struct intel_ringbuffer *ringbuf; + struct intel_context *ce = &ctx->engine[engine->id]; void *vaddr; u32 *lrc_reg_state; int ret; lockdep_assert_held(&ctx->i915->dev->struct_mutex); - if (ctx->engine[engine->id].pin_count++) + if (ce->pin_count++) return 0; - ctx_obj = ctx->engine[engine->id].state; - ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN, - PIN_OFFSET_BIAS | GUC_WOPCM_TOP); + ret = i915_gem_obj_ggtt_pin(ce->state, GEN8_LR_CONTEXT_ALIGN, + PIN_OFFSET_BIAS | GUC_WOPCM_TOP); if (ret) goto err; - vaddr = i915_gem_object_pin_map(ctx_obj); + vaddr = i915_gem_object_pin_map(ce->state); if (IS_ERR(vaddr)) { ret = PTR_ERR(vaddr); goto unpin_ctx_obj; @@ -958,17 +958,17 @@ static int intel_lr_context_pin(struct i915_gem_context *ctx, lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE; - ringbuf = ctx->engine[engine->id].ringbuf; - ret = intel_pin_and_map_ringbuffer_obj(dev_priv, ringbuf); + ret = intel_pin_and_map_ringbuffer_obj(dev_priv, ce->ringbuf); if (ret) goto unpin_map; i915_gem_context_reference(ctx); - ctx->engine[engine->id].lrc_vma = i915_gem_obj_to_ggtt(ctx_obj); + ce->lrc_vma = i915_gem_obj_to_ggtt(ce->state); intel_lr_context_descriptor_update(ctx, engine); - lrc_reg_state[CTX_RING_BUFFER_START+1] = ringbuf->vma->node.start; - ctx->engine[engine->id].lrc_reg_state = lrc_reg_state; - ctx_obj->dirty = true; + + lrc_reg_state[CTX_RING_BUFFER_START+1] = ce->ringbuf->vma->node.start; + ce->lrc_reg_state = lrc_reg_state; + ce->state->dirty = true; /* Invalidate GuC TLB. */ if (i915.enable_guc_submission) @@ -977,34 +977,33 @@ static int intel_lr_context_pin(struct i915_gem_context *ctx, return 0; unpin_map: - i915_gem_object_unpin_map(ctx_obj); + i915_gem_object_unpin_map(ce->state); unpin_ctx_obj: - i915_gem_object_ggtt_unpin(ctx_obj); + i915_gem_object_ggtt_unpin(ce->state); err: - ctx->engine[engine->id].pin_count = 0; + ce->pin_count = 0; return ret; } void intel_lr_context_unpin(struct i915_gem_context *ctx, struct intel_engine_cs *engine) { - struct drm_i915_gem_object *ctx_obj; + struct intel_context *ce = &ctx->engine[engine->id]; lockdep_assert_held(&ctx->i915->dev->struct_mutex); - GEM_BUG_ON(ctx->engine[engine->id].pin_count == 0); + GEM_BUG_ON(ce->pin_count == 0); - if (--ctx->engine[engine->id].pin_count) + if (--ce->pin_count) return; - intel_unpin_ringbuffer_obj(ctx->engine[engine->id].ringbuf); + intel_unpin_ringbuffer_obj(ce->ringbuf); - ctx_obj = ctx->engine[engine->id].state; - i915_gem_object_unpin_map(ctx_obj); - i915_gem_object_ggtt_unpin(ctx_obj); + i915_gem_object_unpin_map(ce->state); + i915_gem_object_ggtt_unpin(ce->state); - ctx->engine[engine->id].lrc_vma = NULL; - ctx->engine[engine->id].lrc_desc = 0; - ctx->engine[engine->id].lrc_reg_state = NULL; + ce->lrc_vma = NULL; + ce->lrc_desc = 0; + ce->lrc_reg_state = NULL; i915_gem_context_unreference(ctx); } @@ -2490,12 +2489,13 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx, struct intel_engine_cs *engine) { struct drm_i915_gem_object *ctx_obj; + struct intel_context *ce = &ctx->engine[engine->id]; uint32_t context_size; struct intel_ringbuffer *ringbuf; int ret; WARN_ON(ctx->legacy_hw_ctx.rcs_state != NULL); - WARN_ON(ctx->engine[engine->id].state); + WARN_ON(ce->state); context_size = round_up(intel_lr_context_size(engine), 4096); @@ -2520,9 +2520,9 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx, goto error_ringbuf; } - ctx->engine[engine->id].ringbuf = ringbuf; - ctx->engine[engine->id].state = ctx_obj; - ctx->engine[engine->id].initialised = engine->init_context == NULL; + ce->ringbuf = ringbuf; + ce->state = ctx_obj; + ce->initialised = engine->init_context == NULL; return 0; @@ -2530,8 +2530,8 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx, intel_ringbuffer_free(ringbuf); error_deref_obj: drm_gem_object_unreference(&ctx_obj->base); - ctx->engine[engine->id].ringbuf = NULL; - ctx->engine[engine->id].state = NULL; + ce->ringbuf = NULL; + ce->state = NULL; return ret; } @@ -2541,10 +2541,8 @@ void intel_lr_context_reset(struct drm_i915_private *dev_priv, struct intel_engine_cs *engine; for_each_engine(engine, dev_priv) { - struct drm_i915_gem_object *ctx_obj = - ctx->engine[engine->id].state; - struct intel_ringbuffer *ringbuf = - ctx->engine[engine->id].ringbuf; + struct intel_context *ce = &ctx->engine[engine->id]; + struct drm_i915_gem_object *ctx_obj = ce->state; void *vaddr; uint32_t *reg_state; @@ -2563,7 +2561,7 @@ void intel_lr_context_reset(struct drm_i915_private *dev_priv, i915_gem_object_unpin_map(ctx_obj); - ringbuf->head = 0; - ringbuf->tail = 0; + ce->ringbuf->head = 0; + ce->ringbuf->tail = 0; } }