diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 311225249563bbc81899dd2b1b444c0bd722b9c1..eb8753fb107ae6b31c9909feb65477a9993e6978 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -354,7 +354,7 @@ static int per_file_ctx_stats(int id, void *ptr, void *data) for (n = 0; n < ARRAY_SIZE(ctx->engine); n++) { if (ctx->engine[n].state) - per_file_stats(0, ctx->engine[n].state, data); + per_file_stats(0, ctx->engine[n].state->obj, data); if (ctx->engine[n].ring) per_file_stats(0, ctx->engine[n].ring->obj, data); } @@ -1977,7 +1977,7 @@ static int i915_context_status(struct seq_file *m, void *unused) seq_printf(m, "%s: ", engine->name); seq_putc(m, ce->initialised ? 'I' : 'i'); if (ce->state) - describe_obj(m, ce->state); + describe_obj(m, ce->state->obj); if (ce->ring) describe_ctx_ring(m, ce->ring); seq_putc(m, '\n'); @@ -1995,36 +1995,34 @@ static void i915_dump_lrc_obj(struct seq_file *m, struct i915_gem_context *ctx, struct intel_engine_cs *engine) { - struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state; + struct i915_vma *vma = ctx->engine[engine->id].state; struct page *page; - uint32_t *reg_state; int j; - unsigned long ggtt_offset = 0; seq_printf(m, "CONTEXT: %s %u\n", engine->name, ctx->hw_id); - if (ctx_obj == NULL) { - seq_puts(m, "\tNot allocated\n"); + if (!vma) { + seq_puts(m, "\tFake context\n"); return; } - if (!i915_gem_obj_ggtt_bound(ctx_obj)) - seq_puts(m, "\tNot bound in GGTT\n"); - else - ggtt_offset = i915_gem_obj_ggtt_offset(ctx_obj); + if (vma->flags & I915_VMA_GLOBAL_BIND) + seq_printf(m, "\tBound in GGTT at 0x%08x\n", + lower_32_bits(vma->node.start)); - if (i915_gem_object_get_pages(ctx_obj)) { - seq_puts(m, "\tFailed to get pages for context object\n"); + if (i915_gem_object_get_pages(vma->obj)) { + seq_puts(m, "\tFailed to get pages for context object\n\n"); return; } - page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN); - if (!WARN_ON(page == NULL)) { - reg_state = kmap_atomic(page); + page = i915_gem_object_get_page(vma->obj, LRC_STATE_PN); + if (page) { + u32 *reg_state = kmap_atomic(page); for (j = 0; j < 0x600 / sizeof(u32) / 4; j += 4) { - seq_printf(m, "\t[0x%08lx] 0x%08x 0x%08x 0x%08x 0x%08x\n", - ggtt_offset + 4096 + (j * 4), + seq_printf(m, + "\t[0x%04x] 0x%08x 0x%08x 0x%08x 0x%08x\n", + j * 4, reg_state[j], reg_state[j + 1], reg_state[j + 2], reg_state[j + 3]); } diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 3285c8e2c87aaf439a414702ef16487f183e4e2f..259425d99e177486953d6c55555adf9a714beaec 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -893,9 +893,8 @@ struct i915_gem_context { u32 ggtt_alignment; struct intel_context { - struct drm_i915_gem_object *state; + struct i915_vma *state; struct intel_ring *ring; - struct i915_vma *lrc_vma; uint32_t *lrc_reg_state; u64 lrc_desc; int pin_count; diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 547caf26a6b9cc6d3da7bee62e95b54772213241..3857ce097c844a13a9b48f9ed507fb7b72f9385b 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -155,7 +155,7 @@ void i915_gem_context_free(struct kref *ctx_ref) if (ce->ring) intel_ring_free(ce->ring); - i915_gem_object_put(ce->state); + i915_vma_put(ce->state); } list_del(&ctx->link); @@ -281,13 +281,24 @@ __create_hw_context(struct drm_device *dev, ctx->ggtt_alignment = get_context_alignment(dev_priv); if (dev_priv->hw_context_size) { - struct drm_i915_gem_object *obj = - i915_gem_alloc_context_obj(dev, dev_priv->hw_context_size); + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + + obj = i915_gem_alloc_context_obj(dev, + dev_priv->hw_context_size); if (IS_ERR(obj)) { ret = PTR_ERR(obj); goto err_out; } - ctx->engine[RCS].state = obj; + + vma = i915_vma_create(obj, &dev_priv->ggtt.base, NULL); + if (IS_ERR(vma)) { + i915_gem_object_put(obj); + ret = PTR_ERR(vma); + goto err_out; + } + + ctx->engine[RCS].state = vma; } /* Default context will never have a file_priv */ @@ -399,7 +410,7 @@ static void i915_gem_context_unpin(struct i915_gem_context *ctx, struct intel_context *ce = &ctx->engine[engine->id]; if (ce->state) - i915_gem_object_ggtt_unpin(ce->state); + i915_vma_unpin(ce->state); i915_gem_context_put(ctx); } @@ -620,9 +631,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) intel_ring_emit(ring, MI_NOOP); intel_ring_emit(ring, MI_SET_CONTEXT); - intel_ring_emit(ring, - i915_gem_obj_ggtt_offset(req->ctx->engine[RCS].state) | - flags); + intel_ring_emit(ring, req->ctx->engine[RCS].state->node.start | flags); /* * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP * WaMiSetContext_Hang:snb,ivb,vlv @@ -755,6 +764,7 @@ static int do_rcs_switch(struct drm_i915_gem_request *req) struct i915_gem_context *to = req->ctx; struct intel_engine_cs *engine = req->engine; struct i915_hw_ppgtt *ppgtt = to->ppgtt ?: req->i915->mm.aliasing_ppgtt; + struct i915_vma *vma = to->engine[RCS].state; struct i915_gem_context *from; u32 hw_flags; int ret, i; @@ -763,8 +773,7 @@ static int do_rcs_switch(struct drm_i915_gem_request *req) return 0; /* Trying to pin first makes error handling easier. */ - ret = i915_gem_object_ggtt_pin(to->engine[RCS].state, NULL, 0, - to->ggtt_alignment, 0); + ret = i915_vma_pin(vma, 0, to->ggtt_alignment, PIN_GLOBAL); if (ret) return ret; @@ -785,9 +794,9 @@ static int do_rcs_switch(struct drm_i915_gem_request *req) * * XXX: We need a real interface to do this instead of trickery. */ - ret = i915_gem_object_set_to_gtt_domain(to->engine[RCS].state, false); + ret = i915_gem_object_set_to_gtt_domain(vma->obj, false); if (ret) - goto unpin_out; + goto err; if (needs_pd_load_pre(ppgtt, engine, to)) { /* Older GENs and non render rings still want the load first, @@ -797,7 +806,7 @@ static int do_rcs_switch(struct drm_i915_gem_request *req) trace_switch_mm(engine, to); ret = ppgtt->switch_mm(ppgtt, req); if (ret) - goto unpin_out; + goto err; } if (!to->engine[RCS].initialised || i915_gem_context_is_default(to)) @@ -814,7 +823,7 @@ static int do_rcs_switch(struct drm_i915_gem_request *req) if (to != from || (hw_flags & MI_FORCE_RESTORE)) { ret = mi_set_context(req, hw_flags); if (ret) - goto unpin_out; + goto err; } /* The backing object for the context is done after switching to the @@ -824,8 +833,6 @@ static int do_rcs_switch(struct drm_i915_gem_request *req) * MI_SET_CONTEXT instead of when the next seqno has completed. */ if (from != NULL) { - struct drm_i915_gem_object *obj = from->engine[RCS].state; - /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the * whole damn pipeline, we don't need to explicitly mark the * object dirty. The only exception is that the context must be @@ -833,11 +840,9 @@ static int do_rcs_switch(struct drm_i915_gem_request *req) * able to defer doing this until we know the object would be * swapped, but there is no way to do that yet. */ - obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION; - i915_vma_move_to_active(i915_gem_obj_to_ggtt(obj), req, 0); - - /* obj is kept alive until the next request by its active ref */ - i915_gem_object_ggtt_unpin(obj); + i915_vma_move_to_active(from->engine[RCS].state, req, 0); + /* state is kept alive until the next request */ + i915_vma_unpin(from->engine[RCS].state); i915_gem_context_put(from); } engine->last_context = i915_gem_context_get(to); @@ -882,8 +887,8 @@ static int do_rcs_switch(struct drm_i915_gem_request *req) return 0; -unpin_out: - i915_gem_object_ggtt_unpin(to->engine[RCS].state); +err: + i915_vma_unpin(vma); return ret; } diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 9ba71dd4a312a972116d12d47cc16d61d80ae355..61708faebf793f88b336db48abcb47cfde49af58 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -1103,9 +1103,10 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv, i915_error_ggtt_object_create(dev_priv, engine->scratch.obj); - ee->ctx = - i915_error_ggtt_object_create(dev_priv, - request->ctx->engine[i].state); + if (request->ctx->engine[i].state) { + ee->ctx = i915_error_ggtt_object_create(dev_priv, + request->ctx->engine[i].state->obj); + } if (request->pid) { struct task_struct *task; diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c index 29de8cec1b58b2afda58f20ac266165965137406..4f0f173f9754dc99e6350c1b40a2e3ff12f51470 100644 --- a/drivers/gpu/drm/i915/i915_guc_submission.c +++ b/drivers/gpu/drm/i915/i915_guc_submission.c @@ -358,7 +358,7 @@ static void guc_init_ctx_desc(struct intel_guc *guc, lrc->context_desc = lower_32_bits(ce->lrc_desc); /* The state page is after PPHWSP */ - gfx_addr = i915_gem_obj_ggtt_offset(ce->state); + gfx_addr = ce->state->node.start; lrc->ring_lcra = gfx_addr + LRC_STATE_PN * PAGE_SIZE; lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) | (guc_engine_id << GUC_ELC_ENGINE_OFFSET); @@ -1080,7 +1080,7 @@ int intel_guc_suspend(struct drm_device *dev) /* any value greater than GUC_POWER_D0 */ data[1] = GUC_POWER_D1; /* first page is shared data with GuC */ - data[2] = i915_gem_obj_ggtt_offset(ctx->engine[RCS].state); + data[2] = ctx->engine[RCS].state->node.start; return host2guc_action(guc, data, ARRAY_SIZE(data)); } @@ -1105,7 +1105,7 @@ int intel_guc_resume(struct drm_device *dev) data[0] = HOST2GUC_ACTION_EXIT_S_STATE; data[1] = GUC_POWER_D0; /* first page is shared data with GuC */ - data[2] = i915_gem_obj_ggtt_offset(ctx->engine[RCS].state); + data[2] = ctx->engine[RCS].state->node.start; return host2guc_action(guc, data, ARRAY_SIZE(data)); } diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index c24ac39d51f6c5b273d002b92728a6df162dc566..5538e5c541fe1fecb91cfbf7b19cf3aec5cbafca 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -315,7 +315,7 @@ intel_lr_context_descriptor_update(struct i915_gem_context *ctx, desc = ctx->desc_template; /* bits 3-4 */ desc |= engine->ctx_desc_template; /* bits 0-11 */ - desc |= ce->lrc_vma->node.start + LRC_PPHWSP_PN * PAGE_SIZE; + desc |= ce->state->node.start + LRC_PPHWSP_PN * PAGE_SIZE; /* bits 12-31 */ desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT; /* bits 32-52 */ @@ -763,7 +763,6 @@ void intel_execlists_cancel_requests(struct intel_engine_cs *engine) static int intel_lr_context_pin(struct i915_gem_context *ctx, struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = ctx->i915; struct intel_context *ce = &ctx->engine[engine->id]; void *vaddr; u32 *lrc_reg_state; @@ -774,16 +773,15 @@ static int intel_lr_context_pin(struct i915_gem_context *ctx, if (ce->pin_count++) return 0; - ret = i915_gem_object_ggtt_pin(ce->state, NULL, - 0, GEN8_LR_CONTEXT_ALIGN, - PIN_OFFSET_BIAS | GUC_WOPCM_TOP); + ret = i915_vma_pin(ce->state, 0, GEN8_LR_CONTEXT_ALIGN, + PIN_OFFSET_BIAS | GUC_WOPCM_TOP | PIN_GLOBAL); if (ret) goto err; - vaddr = i915_gem_object_pin_map(ce->state, I915_MAP_WB); + vaddr = i915_gem_object_pin_map(ce->state->obj, I915_MAP_WB); if (IS_ERR(vaddr)) { ret = PTR_ERR(vaddr); - goto unpin_ctx_obj; + goto unpin_vma; } lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE; @@ -792,24 +790,25 @@ static int intel_lr_context_pin(struct i915_gem_context *ctx, if (ret) goto unpin_map; - ce->lrc_vma = i915_gem_obj_to_ggtt(ce->state); intel_lr_context_descriptor_update(ctx, engine); lrc_reg_state[CTX_RING_BUFFER_START+1] = ce->ring->vma->node.start; ce->lrc_reg_state = lrc_reg_state; - ce->state->dirty = true; + ce->state->obj->dirty = true; /* Invalidate GuC TLB. */ - if (i915.enable_guc_submission) + if (i915.enable_guc_submission) { + struct drm_i915_private *dev_priv = ctx->i915; I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE); + } i915_gem_context_get(ctx); return 0; unpin_map: - i915_gem_object_unpin_map(ce->state); -unpin_ctx_obj: - i915_gem_object_ggtt_unpin(ce->state); + i915_gem_object_unpin_map(ce->state->obj); +unpin_vma: + __i915_vma_unpin(ce->state); err: ce->pin_count = 0; return ret; @@ -828,12 +827,8 @@ void intel_lr_context_unpin(struct i915_gem_context *ctx, intel_ring_unpin(ce->ring); - i915_gem_object_unpin_map(ce->state); - i915_gem_object_ggtt_unpin(ce->state); - - ce->lrc_vma = NULL; - ce->lrc_desc = 0; - ce->lrc_reg_state = NULL; + i915_gem_object_unpin_map(ce->state->obj); + i915_vma_unpin(ce->state); i915_gem_context_put(ctx); } @@ -1747,19 +1742,18 @@ logical_ring_default_irqs(struct intel_engine_cs *engine) } static int -lrc_setup_hws(struct intel_engine_cs *engine, - struct drm_i915_gem_object *dctx_obj) +lrc_setup_hws(struct intel_engine_cs *engine, struct i915_vma *vma) { void *hws; /* The HWSP is part of the default context object in LRC mode. */ - engine->status_page.gfx_addr = i915_gem_obj_ggtt_offset(dctx_obj) + - LRC_PPHWSP_PN * PAGE_SIZE; - hws = i915_gem_object_pin_map(dctx_obj, I915_MAP_WB); + engine->status_page.gfx_addr = + vma->node.start + LRC_PPHWSP_PN * PAGE_SIZE; + hws = i915_gem_object_pin_map(vma->obj, I915_MAP_WB); if (IS_ERR(hws)) return PTR_ERR(hws); engine->status_page.page_addr = hws + LRC_PPHWSP_PN * PAGE_SIZE; - engine->status_page.obj = dctx_obj; + engine->status_page.obj = vma->obj; return 0; } @@ -2131,6 +2125,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx, { struct drm_i915_gem_object *ctx_obj; struct intel_context *ce = &ctx->engine[engine->id]; + struct i915_vma *vma; uint32_t context_size; struct intel_ring *ring; int ret; @@ -2148,6 +2143,12 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx, return PTR_ERR(ctx_obj); } + vma = i915_vma_create(ctx_obj, &ctx->i915->ggtt.base, NULL); + if (IS_ERR(vma)) { + ret = PTR_ERR(vma); + goto error_deref_obj; + } + ring = intel_engine_create_ring(engine, ctx->ring_size); if (IS_ERR(ring)) { ret = PTR_ERR(ring); @@ -2161,7 +2162,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx, } ce->ring = ring; - ce->state = ctx_obj; + ce->state = vma; ce->initialised = engine->init_context == NULL; return 0; @@ -2170,8 +2171,6 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx, intel_ring_free(ring); error_deref_obj: i915_gem_object_put(ctx_obj); - ce->ring = NULL; - ce->state = NULL; return ret; } @@ -2182,24 +2181,23 @@ void intel_lr_context_reset(struct drm_i915_private *dev_priv, for_each_engine(engine, dev_priv) { struct intel_context *ce = &ctx->engine[engine->id]; - struct drm_i915_gem_object *ctx_obj = ce->state; void *vaddr; uint32_t *reg_state; - if (!ctx_obj) + if (!ce->state) continue; - vaddr = i915_gem_object_pin_map(ctx_obj, I915_MAP_WB); + vaddr = i915_gem_object_pin_map(ce->state->obj, I915_MAP_WB); if (WARN_ON(IS_ERR(vaddr))) continue; reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE; - ctx_obj->dirty = true; reg_state[CTX_RING_HEAD+1] = 0; reg_state[CTX_RING_TAIL+1] = 0; - i915_gem_object_unpin_map(ctx_obj); + ce->state->obj->dirty = true; + i915_gem_object_unpin_map(ce->state->obj); ce->ring->head = 0; ce->ring->tail = 0; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 75a9f635c3dc5d2dfe13db4b8a1fcc96998c0253..2318a27341c8dbde2cdb94a2f30f08d735b1630f 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -2092,8 +2092,8 @@ static int intel_ring_context_pin(struct i915_gem_context *ctx, return 0; if (ce->state) { - ret = i915_gem_object_ggtt_pin(ce->state, NULL, 0, - ctx->ggtt_alignment, PIN_HIGH); + ret = i915_vma_pin(ce->state, 0, ctx->ggtt_alignment, + PIN_GLOBAL | PIN_HIGH); if (ret) goto error; } @@ -2127,7 +2127,7 @@ static void intel_ring_context_unpin(struct i915_gem_context *ctx, return; if (ce->state) - i915_gem_object_ggtt_unpin(ce->state); + i915_vma_unpin(ce->state); i915_gem_context_put(ctx); }