提交 0766e2ef 编写于 作者: Z Zhenyu Wang

Merge tag 'drm-intel-next-2018-06-06' into gvt-next

Backmerge for recent request->hw_context change and
new vGPU huge page capability definition.
Signed-off-by: NZhenyu Wang <zhenyuw@linux.intel.com>
...@@ -61,7 +61,7 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm) ...@@ -61,7 +61,7 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
} }
mutex_lock(&dev_priv->drm.struct_mutex); mutex_lock(&dev_priv->drm.struct_mutex);
ret = i915_gem_gtt_insert(&dev_priv->ggtt.base, node, ret = i915_gem_gtt_insert(&dev_priv->ggtt.vm, node,
size, I915_GTT_PAGE_SIZE, size, I915_GTT_PAGE_SIZE,
I915_COLOR_UNEVICTABLE, I915_COLOR_UNEVICTABLE,
start, end, flags); start, end, flags);
......
...@@ -273,8 +273,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) ...@@ -273,8 +273,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
for_each_pipe(dev_priv, pipe) { for_each_pipe(dev_priv, pipe) {
vgpu_vreg_t(vgpu, DSPCNTR(pipe)) &= ~DISPLAY_PLANE_ENABLE; vgpu_vreg_t(vgpu, DSPCNTR(pipe)) &= ~DISPLAY_PLANE_ENABLE;
vgpu_vreg_t(vgpu, SPRCTL(pipe)) &= ~SPRITE_ENABLE; vgpu_vreg_t(vgpu, SPRCTL(pipe)) &= ~SPRITE_ENABLE;
vgpu_vreg_t(vgpu, CURCNTR(pipe)) &= ~CURSOR_MODE; vgpu_vreg_t(vgpu, CURCNTR(pipe)) &= ~MCURSOR_MODE;
vgpu_vreg_t(vgpu, CURCNTR(pipe)) |= CURSOR_MODE_DISABLE; vgpu_vreg_t(vgpu, CURCNTR(pipe)) |= MCURSOR_MODE_DISABLE;
} }
vgpu_vreg_t(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE; vgpu_vreg_t(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE;
......
...@@ -301,16 +301,16 @@ static int cursor_mode_to_drm(int mode) ...@@ -301,16 +301,16 @@ static int cursor_mode_to_drm(int mode)
int cursor_pixel_formats_index = 4; int cursor_pixel_formats_index = 4;
switch (mode) { switch (mode) {
case CURSOR_MODE_128_ARGB_AX: case MCURSOR_MODE_128_ARGB_AX:
cursor_pixel_formats_index = 0; cursor_pixel_formats_index = 0;
break; break;
case CURSOR_MODE_256_ARGB_AX: case MCURSOR_MODE_256_ARGB_AX:
cursor_pixel_formats_index = 1; cursor_pixel_formats_index = 1;
break; break;
case CURSOR_MODE_64_ARGB_AX: case MCURSOR_MODE_64_ARGB_AX:
cursor_pixel_formats_index = 2; cursor_pixel_formats_index = 2;
break; break;
case CURSOR_MODE_64_32B_AX: case MCURSOR_MODE_64_32B_AX:
cursor_pixel_formats_index = 3; cursor_pixel_formats_index = 3;
break; break;
...@@ -343,8 +343,8 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu, ...@@ -343,8 +343,8 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu,
return -ENODEV; return -ENODEV;
val = vgpu_vreg_t(vgpu, CURCNTR(pipe)); val = vgpu_vreg_t(vgpu, CURCNTR(pipe));
mode = val & CURSOR_MODE; mode = val & MCURSOR_MODE;
plane->enabled = (mode != CURSOR_MODE_DISABLE); plane->enabled = (mode != MCURSOR_MODE_DISABLE);
if (!plane->enabled) if (!plane->enabled)
return -ENODEV; return -ENODEV;
......
...@@ -377,9 +377,9 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt); ...@@ -377,9 +377,9 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt);
#define gvt_aperture_sz(gvt) (gvt->dev_priv->ggtt.mappable_end) #define gvt_aperture_sz(gvt) (gvt->dev_priv->ggtt.mappable_end)
#define gvt_aperture_pa_base(gvt) (gvt->dev_priv->ggtt.gmadr.start) #define gvt_aperture_pa_base(gvt) (gvt->dev_priv->ggtt.gmadr.start)
#define gvt_ggtt_gm_sz(gvt) (gvt->dev_priv->ggtt.base.total) #define gvt_ggtt_gm_sz(gvt) (gvt->dev_priv->ggtt.vm.total)
#define gvt_ggtt_sz(gvt) \ #define gvt_ggtt_sz(gvt) \
((gvt->dev_priv->ggtt.base.total >> PAGE_SHIFT) << 3) ((gvt->dev_priv->ggtt.vm.total >> PAGE_SHIFT) << 3)
#define gvt_hidden_sz(gvt) (gvt_ggtt_gm_sz(gvt) - gvt_aperture_sz(gvt)) #define gvt_hidden_sz(gvt) (gvt_ggtt_gm_sz(gvt) - gvt_aperture_sz(gvt))
#define gvt_aperture_gmadr_base(gvt) (0) #define gvt_aperture_gmadr_base(gvt) (0)
......
...@@ -446,9 +446,9 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next, ...@@ -446,9 +446,9 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
#define CTX_CONTEXT_CONTROL_VAL 0x03 #define CTX_CONTEXT_CONTROL_VAL 0x03
bool is_inhibit_context(struct i915_gem_context *ctx, int ring_id) bool is_inhibit_context(struct intel_context *ce)
{ {
u32 *reg_state = ctx->__engine[ring_id].lrc_reg_state; const u32 *reg_state = ce->lrc_reg_state;
u32 inhibit_mask = u32 inhibit_mask =
_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT); _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
...@@ -501,7 +501,7 @@ static void switch_mmio(struct intel_vgpu *pre, ...@@ -501,7 +501,7 @@ static void switch_mmio(struct intel_vgpu *pre,
* itself. * itself.
*/ */
if (mmio->in_context && if (mmio->in_context &&
!is_inhibit_context(s->shadow_ctx, ring_id)) !is_inhibit_context(&s->shadow_ctx->__engine[ring_id]))
continue; continue;
if (mmio->mask) if (mmio->mask)
......
...@@ -49,7 +49,7 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre, ...@@ -49,7 +49,7 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre,
void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt); void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt);
bool is_inhibit_context(struct i915_gem_context *ctx, int ring_id); bool is_inhibit_context(struct intel_context *ce);
int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu, int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu,
struct i915_request *req); struct i915_request *req);
......
...@@ -54,11 +54,8 @@ static void set_context_pdp_root_pointer( ...@@ -54,11 +54,8 @@ static void set_context_pdp_root_pointer(
static void update_shadow_pdps(struct intel_vgpu_workload *workload) static void update_shadow_pdps(struct intel_vgpu_workload *workload)
{ {
struct intel_vgpu *vgpu = workload->vgpu;
int ring_id = workload->ring_id;
struct i915_gem_context *shadow_ctx = vgpu->submission.shadow_ctx;
struct drm_i915_gem_object *ctx_obj = struct drm_i915_gem_object *ctx_obj =
shadow_ctx->__engine[ring_id].state->obj; workload->req->hw_context->state->obj;
struct execlist_ring_context *shadow_ring_context; struct execlist_ring_context *shadow_ring_context;
struct page *page; struct page *page;
...@@ -128,9 +125,8 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) ...@@ -128,9 +125,8 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
struct intel_vgpu *vgpu = workload->vgpu; struct intel_vgpu *vgpu = workload->vgpu;
struct intel_gvt *gvt = vgpu->gvt; struct intel_gvt *gvt = vgpu->gvt;
int ring_id = workload->ring_id; int ring_id = workload->ring_id;
struct i915_gem_context *shadow_ctx = vgpu->submission.shadow_ctx;
struct drm_i915_gem_object *ctx_obj = struct drm_i915_gem_object *ctx_obj =
shadow_ctx->__engine[ring_id].state->obj; workload->req->hw_context->state->obj;
struct execlist_ring_context *shadow_ring_context; struct execlist_ring_context *shadow_ring_context;
struct page *page; struct page *page;
void *dst; void *dst;
...@@ -205,7 +201,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) ...@@ -205,7 +201,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
static inline bool is_gvt_request(struct i915_request *req) static inline bool is_gvt_request(struct i915_request *req)
{ {
return i915_gem_context_force_single_submission(req->ctx); return i915_gem_context_force_single_submission(req->gem_context);
} }
static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id) static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id)
...@@ -280,10 +276,8 @@ static int shadow_context_status_change(struct notifier_block *nb, ...@@ -280,10 +276,8 @@ static int shadow_context_status_change(struct notifier_block *nb,
return NOTIFY_OK; return NOTIFY_OK;
} }
static void shadow_context_descriptor_update(struct i915_gem_context *ctx, static void shadow_context_descriptor_update(struct intel_context *ce)
struct intel_engine_cs *engine)
{ {
struct intel_context *ce = to_intel_context(ctx, engine);
u64 desc = 0; u64 desc = 0;
desc = ce->lrc_desc; desc = ce->lrc_desc;
...@@ -292,7 +286,7 @@ static void shadow_context_descriptor_update(struct i915_gem_context *ctx, ...@@ -292,7 +286,7 @@ static void shadow_context_descriptor_update(struct i915_gem_context *ctx,
* like GEN8_CTX_* cached in desc_template * like GEN8_CTX_* cached in desc_template
*/ */
desc &= U64_MAX << 12; desc &= U64_MAX << 12;
desc |= ctx->desc_template & ((1ULL << 12) - 1); desc |= ce->gem_context->desc_template & ((1ULL << 12) - 1);
ce->lrc_desc = desc; ce->lrc_desc = desc;
} }
...@@ -300,12 +294,11 @@ static void shadow_context_descriptor_update(struct i915_gem_context *ctx, ...@@ -300,12 +294,11 @@ static void shadow_context_descriptor_update(struct i915_gem_context *ctx,
static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload) static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
{ {
struct intel_vgpu *vgpu = workload->vgpu; struct intel_vgpu *vgpu = workload->vgpu;
struct i915_request *req = workload->req;
void *shadow_ring_buffer_va; void *shadow_ring_buffer_va;
u32 *cs; u32 *cs;
struct i915_request *req = workload->req;
if (IS_KABYLAKE(req->i915) && if (IS_KABYLAKE(req->i915) && is_inhibit_context(req->hw_context))
is_inhibit_context(req->ctx, req->engine->id))
intel_vgpu_restore_inhibit_context(vgpu, req); intel_vgpu_restore_inhibit_context(vgpu, req);
/* allocate shadow ring buffer */ /* allocate shadow ring buffer */
...@@ -353,92 +346,67 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) ...@@ -353,92 +346,67 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
struct intel_vgpu_submission *s = &vgpu->submission; struct intel_vgpu_submission *s = &vgpu->submission;
struct i915_gem_context *shadow_ctx = s->shadow_ctx; struct i915_gem_context *shadow_ctx = s->shadow_ctx;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
int ring_id = workload->ring_id; struct intel_engine_cs *engine = dev_priv->engine[workload->ring_id];
struct intel_engine_cs *engine = dev_priv->engine[ring_id]; struct intel_context *ce;
struct intel_ring *ring; struct i915_request *rq;
int ret; int ret;
lockdep_assert_held(&dev_priv->drm.struct_mutex); lockdep_assert_held(&dev_priv->drm.struct_mutex);
if (workload->shadowed) if (workload->req)
return 0; return 0;
/* pin shadow context by gvt even the shadow context will be pinned
* when i915 alloc request. That is because gvt will update the guest
* context from shadow context when workload is completed, and at that
* moment, i915 may already unpined the shadow context to make the
* shadow_ctx pages invalid. So gvt need to pin itself. After update
* the guest context, gvt can unpin the shadow_ctx safely.
*/
ce = intel_context_pin(shadow_ctx, engine);
if (IS_ERR(ce)) {
gvt_vgpu_err("fail to pin shadow context\n");
return PTR_ERR(ce);
}
shadow_ctx->desc_template &= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT); shadow_ctx->desc_template &= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT);
shadow_ctx->desc_template |= workload->ctx_desc.addressing_mode << shadow_ctx->desc_template |= workload->ctx_desc.addressing_mode <<
GEN8_CTX_ADDRESSING_MODE_SHIFT; GEN8_CTX_ADDRESSING_MODE_SHIFT;
if (!test_and_set_bit(ring_id, s->shadow_ctx_desc_updated)) if (!test_and_set_bit(workload->ring_id, s->shadow_ctx_desc_updated))
shadow_context_descriptor_update(shadow_ctx, shadow_context_descriptor_update(ce);
dev_priv->engine[ring_id]);
ret = intel_gvt_scan_and_shadow_ringbuffer(workload); ret = intel_gvt_scan_and_shadow_ringbuffer(workload);
if (ret) if (ret)
goto err_scan; goto err_unpin;
if ((workload->ring_id == RCS) && if ((workload->ring_id == RCS) &&
(workload->wa_ctx.indirect_ctx.size != 0)) { (workload->wa_ctx.indirect_ctx.size != 0)) {
ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx); ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
if (ret) if (ret)
goto err_scan; goto err_shadow;
} }
/* pin shadow context by gvt even the shadow context will be pinned rq = i915_request_alloc(engine, shadow_ctx);
* when i915 alloc request. That is because gvt will update the guest if (IS_ERR(rq)) {
* context from shadow context when workload is completed, and at that gvt_vgpu_err("fail to allocate gem request\n");
* moment, i915 may already unpined the shadow context to make the ret = PTR_ERR(rq);
* shadow_ctx pages invalid. So gvt need to pin itself. After update
* the guest context, gvt can unpin the shadow_ctx safely.
*/
ring = intel_context_pin(shadow_ctx, engine);
if (IS_ERR(ring)) {
ret = PTR_ERR(ring);
gvt_vgpu_err("fail to pin shadow context\n");
goto err_shadow; goto err_shadow;
} }
workload->req = i915_request_get(rq);
ret = populate_shadow_context(workload); ret = populate_shadow_context(workload);
if (ret) if (ret)
goto err_unpin; goto err_req;
workload->shadowed = true;
return 0;
err_unpin: return 0;
intel_context_unpin(shadow_ctx, engine); err_req:
rq = fetch_and_zero(&workload->req);
i915_request_put(rq);
err_shadow: err_shadow:
release_shadow_wa_ctx(&workload->wa_ctx); release_shadow_wa_ctx(&workload->wa_ctx);
err_scan:
return ret;
}
static int intel_gvt_generate_request(struct intel_vgpu_workload *workload)
{
int ring_id = workload->ring_id;
struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
struct intel_engine_cs *engine = dev_priv->engine[ring_id];
struct i915_request *rq;
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_vgpu_submission *s = &vgpu->submission;
struct i915_gem_context *shadow_ctx = s->shadow_ctx;
int ret;
rq = i915_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
if (IS_ERR(rq)) {
gvt_vgpu_err("fail to allocate gem request\n");
ret = PTR_ERR(rq);
goto err_unpin;
}
gvt_dbg_sched("ring id %d get i915 gem request %p\n", ring_id, rq);
workload->req = i915_request_get(rq);
ret = copy_workload_to_ring_buffer(workload);
if (ret)
goto err_unpin;
return 0;
err_unpin: err_unpin:
intel_context_unpin(shadow_ctx, engine); intel_context_unpin(ce);
release_shadow_wa_ctx(&workload->wa_ctx);
return ret; return ret;
} }
...@@ -517,21 +485,13 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload) ...@@ -517,21 +485,13 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
return ret; return ret;
} }
static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx) static void update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{ {
struct intel_vgpu_workload *workload = container_of(wa_ctx, struct intel_vgpu_workload *workload =
struct intel_vgpu_workload, container_of(wa_ctx, struct intel_vgpu_workload, wa_ctx);
wa_ctx); struct i915_request *rq = workload->req;
int ring_id = workload->ring_id; struct execlist_ring_context *shadow_ring_context =
struct intel_vgpu_submission *s = &workload->vgpu->submission; (struct execlist_ring_context *)rq->hw_context->lrc_reg_state;
struct i915_gem_context *shadow_ctx = s->shadow_ctx;
struct drm_i915_gem_object *ctx_obj =
shadow_ctx->__engine[ring_id].state->obj;
struct execlist_ring_context *shadow_ring_context;
struct page *page;
page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
shadow_ring_context = kmap_atomic(page);
shadow_ring_context->bb_per_ctx_ptr.val = shadow_ring_context->bb_per_ctx_ptr.val =
(shadow_ring_context->bb_per_ctx_ptr.val & (shadow_ring_context->bb_per_ctx_ptr.val &
...@@ -539,9 +499,6 @@ static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx) ...@@ -539,9 +499,6 @@ static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
shadow_ring_context->rcs_indirect_ctx.val = shadow_ring_context->rcs_indirect_ctx.val =
(shadow_ring_context->rcs_indirect_ctx.val & (shadow_ring_context->rcs_indirect_ctx.val &
(~INDIRECT_CTX_ADDR_MASK)) | wa_ctx->indirect_ctx.shadow_gma; (~INDIRECT_CTX_ADDR_MASK)) | wa_ctx->indirect_ctx.shadow_gma;
kunmap_atomic(shadow_ring_context);
return 0;
} }
static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
...@@ -633,7 +590,7 @@ static int prepare_workload(struct intel_vgpu_workload *workload) ...@@ -633,7 +590,7 @@ static int prepare_workload(struct intel_vgpu_workload *workload)
goto err_unpin_mm; goto err_unpin_mm;
} }
ret = intel_gvt_generate_request(workload); ret = copy_workload_to_ring_buffer(workload);
if (ret) { if (ret) {
gvt_vgpu_err("fail to generate request\n"); gvt_vgpu_err("fail to generate request\n");
goto err_unpin_mm; goto err_unpin_mm;
...@@ -670,12 +627,9 @@ static int prepare_workload(struct intel_vgpu_workload *workload) ...@@ -670,12 +627,9 @@ static int prepare_workload(struct intel_vgpu_workload *workload)
static int dispatch_workload(struct intel_vgpu_workload *workload) static int dispatch_workload(struct intel_vgpu_workload *workload)
{ {
struct intel_vgpu *vgpu = workload->vgpu; struct intel_vgpu *vgpu = workload->vgpu;
struct intel_vgpu_submission *s = &vgpu->submission;
struct i915_gem_context *shadow_ctx = s->shadow_ctx;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
int ring_id = workload->ring_id; int ring_id = workload->ring_id;
struct intel_engine_cs *engine = dev_priv->engine[ring_id]; int ret;
int ret = 0;
gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n", gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
ring_id, workload); ring_id, workload);
...@@ -688,10 +642,6 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) ...@@ -688,10 +642,6 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
goto out; goto out;
ret = prepare_workload(workload); ret = prepare_workload(workload);
if (ret) {
intel_context_unpin(shadow_ctx, engine);
goto out;
}
out: out:
if (ret) if (ret)
...@@ -767,27 +717,23 @@ static struct intel_vgpu_workload *pick_next_workload( ...@@ -767,27 +717,23 @@ static struct intel_vgpu_workload *pick_next_workload(
static void update_guest_context(struct intel_vgpu_workload *workload) static void update_guest_context(struct intel_vgpu_workload *workload)
{ {
struct i915_request *rq = workload->req;
struct intel_vgpu *vgpu = workload->vgpu; struct intel_vgpu *vgpu = workload->vgpu;
struct intel_gvt *gvt = vgpu->gvt; struct intel_gvt *gvt = vgpu->gvt;
struct intel_vgpu_submission *s = &vgpu->submission; struct drm_i915_gem_object *ctx_obj = rq->hw_context->state->obj;
struct i915_gem_context *shadow_ctx = s->shadow_ctx;
int ring_id = workload->ring_id;
struct drm_i915_gem_object *ctx_obj =
shadow_ctx->__engine[ring_id].state->obj;
struct execlist_ring_context *shadow_ring_context; struct execlist_ring_context *shadow_ring_context;
struct page *page; struct page *page;
void *src; void *src;
unsigned long context_gpa, context_page_num; unsigned long context_gpa, context_page_num;
int i; int i;
gvt_dbg_sched("ring id %d workload lrca %x\n", ring_id, gvt_dbg_sched("ring id %d workload lrca %x\n", rq->engine->id,
workload->ctx_desc.lrca); workload->ctx_desc.lrca);
context_page_num = gvt->dev_priv->engine[ring_id]->context_size;
context_page_num = rq->engine->context_size;
context_page_num = context_page_num >> PAGE_SHIFT; context_page_num = context_page_num >> PAGE_SHIFT;
if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS) if (IS_BROADWELL(gvt->dev_priv) && rq->engine->id == RCS)
context_page_num = 19; context_page_num = 19;
i = 2; i = 2;
...@@ -860,6 +806,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) ...@@ -860,6 +806,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
scheduler->current_workload[ring_id]; scheduler->current_workload[ring_id];
struct intel_vgpu *vgpu = workload->vgpu; struct intel_vgpu *vgpu = workload->vgpu;
struct intel_vgpu_submission *s = &vgpu->submission; struct intel_vgpu_submission *s = &vgpu->submission;
struct i915_request *rq = workload->req;
int event; int event;
mutex_lock(&vgpu->vgpu_lock); mutex_lock(&vgpu->vgpu_lock);
...@@ -869,10 +816,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) ...@@ -869,10 +816,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
* switch to make sure request is completed. * switch to make sure request is completed.
* For the workload w/o request, directly complete the workload. * For the workload w/o request, directly complete the workload.
*/ */
if (workload->req) { if (rq) {
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct intel_engine_cs *engine =
dev_priv->engine[workload->ring_id];
wait_event(workload->shadow_ctx_status_wq, wait_event(workload->shadow_ctx_status_wq,
!atomic_read(&workload->shadow_ctx_active)); !atomic_read(&workload->shadow_ctx_active));
...@@ -888,8 +832,6 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) ...@@ -888,8 +832,6 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
workload->status = 0; workload->status = 0;
} }
i915_request_put(fetch_and_zero(&workload->req));
if (!workload->status && !(vgpu->resetting_eng & if (!workload->status && !(vgpu->resetting_eng &
ENGINE_MASK(ring_id))) { ENGINE_MASK(ring_id))) {
update_guest_context(workload); update_guest_context(workload);
...@@ -898,10 +840,13 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) ...@@ -898,10 +840,13 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
INTEL_GVT_EVENT_MAX) INTEL_GVT_EVENT_MAX)
intel_vgpu_trigger_virtual_event(vgpu, event); intel_vgpu_trigger_virtual_event(vgpu, event);
} }
mutex_lock(&dev_priv->drm.struct_mutex);
/* unpin shadow ctx as the shadow_ctx update is done */ /* unpin shadow ctx as the shadow_ctx update is done */
intel_context_unpin(s->shadow_ctx, engine); mutex_lock(&rq->i915->drm.struct_mutex);
mutex_unlock(&dev_priv->drm.struct_mutex); intel_context_unpin(rq->hw_context);
mutex_unlock(&rq->i915->drm.struct_mutex);
i915_request_put(fetch_and_zero(&workload->req));
} }
gvt_dbg_sched("ring id %d complete workload %p status %d\n", gvt_dbg_sched("ring id %d complete workload %p status %d\n",
...@@ -1271,7 +1216,6 @@ alloc_workload(struct intel_vgpu *vgpu) ...@@ -1271,7 +1216,6 @@ alloc_workload(struct intel_vgpu *vgpu)
atomic_set(&workload->shadow_ctx_active, 0); atomic_set(&workload->shadow_ctx_active, 0);
workload->status = -EINPROGRESS; workload->status = -EINPROGRESS;
workload->shadowed = false;
workload->vgpu = vgpu; workload->vgpu = vgpu;
return workload; return workload;
......
...@@ -83,7 +83,6 @@ struct intel_vgpu_workload { ...@@ -83,7 +83,6 @@ struct intel_vgpu_workload {
struct i915_request *req; struct i915_request *req;
/* if this workload has been dispatched to i915? */ /* if this workload has been dispatched to i915? */
bool dispatched; bool dispatched;
bool shadowed;
int status; int status;
struct intel_vgpu_mm *shadow_mm; struct intel_vgpu_mm *shadow_mm;
......
...@@ -328,7 +328,7 @@ static int per_file_stats(int id, void *ptr, void *data) ...@@ -328,7 +328,7 @@ static int per_file_stats(int id, void *ptr, void *data)
} else { } else {
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vma->vm); struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vma->vm);
if (ppgtt->base.file != stats->file_priv) if (ppgtt->vm.file != stats->file_priv)
continue; continue;
} }
...@@ -508,7 +508,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data) ...@@ -508,7 +508,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
dpy_count, dpy_size); dpy_count, dpy_size);
seq_printf(m, "%llu [%pa] gtt total\n", seq_printf(m, "%llu [%pa] gtt total\n",
ggtt->base.total, &ggtt->mappable_end); ggtt->vm.total, &ggtt->mappable_end);
seq_printf(m, "Supported page sizes: %s\n", seq_printf(m, "Supported page sizes: %s\n",
stringify_page_sizes(INTEL_INFO(dev_priv)->page_sizes, stringify_page_sizes(INTEL_INFO(dev_priv)->page_sizes,
buf, sizeof(buf))); buf, sizeof(buf)));
...@@ -542,8 +542,8 @@ static int i915_gem_object_info(struct seq_file *m, void *data) ...@@ -542,8 +542,8 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
struct i915_request, struct i915_request,
client_link); client_link);
rcu_read_lock(); rcu_read_lock();
task = pid_task(request && request->ctx->pid ? task = pid_task(request && request->gem_context->pid ?
request->ctx->pid : file->pid, request->gem_context->pid : file->pid,
PIDTYPE_PID); PIDTYPE_PID);
print_file_stats(m, task ? task->comm : "<unknown>", stats); print_file_stats(m, task ? task->comm : "<unknown>", stats);
rcu_read_unlock(); rcu_read_unlock();
...@@ -1162,19 +1162,28 @@ static int i915_frequency_info(struct seq_file *m, void *unused) ...@@ -1162,19 +1162,28 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) { if (INTEL_GEN(dev_priv) >= 11) {
pm_ier = I915_READ(GEN6_PMIER); pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
pm_imr = I915_READ(GEN6_PMIMR); pm_imr = I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK);
pm_isr = I915_READ(GEN6_PMISR); /*
pm_iir = I915_READ(GEN6_PMIIR); * The equivalent to the PM ISR & IIR cannot be read
pm_mask = I915_READ(GEN6_PMINTRMSK); * without affecting the current state of the system
} else { */
pm_isr = 0;
pm_iir = 0;
} else if (INTEL_GEN(dev_priv) >= 8) {
pm_ier = I915_READ(GEN8_GT_IER(2)); pm_ier = I915_READ(GEN8_GT_IER(2));
pm_imr = I915_READ(GEN8_GT_IMR(2)); pm_imr = I915_READ(GEN8_GT_IMR(2));
pm_isr = I915_READ(GEN8_GT_ISR(2)); pm_isr = I915_READ(GEN8_GT_ISR(2));
pm_iir = I915_READ(GEN8_GT_IIR(2)); pm_iir = I915_READ(GEN8_GT_IIR(2));
pm_mask = I915_READ(GEN6_PMINTRMSK); } else {
pm_ier = I915_READ(GEN6_PMIER);
pm_imr = I915_READ(GEN6_PMIMR);
pm_isr = I915_READ(GEN6_PMISR);
pm_iir = I915_READ(GEN6_PMIIR);
} }
pm_mask = I915_READ(GEN6_PMINTRMSK);
seq_printf(m, "Video Turbo Mode: %s\n", seq_printf(m, "Video Turbo Mode: %s\n",
yesno(rpmodectl & GEN6_RP_MEDIA_TURBO)); yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
seq_printf(m, "HW control enabled: %s\n", seq_printf(m, "HW control enabled: %s\n",
...@@ -1182,8 +1191,12 @@ static int i915_frequency_info(struct seq_file *m, void *unused) ...@@ -1182,8 +1191,12 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
seq_printf(m, "SW control enabled: %s\n", seq_printf(m, "SW control enabled: %s\n",
yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) == yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
GEN6_RP_MEDIA_SW_MODE)); GEN6_RP_MEDIA_SW_MODE));
seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n",
pm_ier, pm_imr, pm_isr, pm_iir, pm_mask); seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
pm_ier, pm_imr, pm_mask);
if (INTEL_GEN(dev_priv) <= 10)
seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
pm_isr, pm_iir);
seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n", seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
rps->pm_intrmsk_mbz); rps->pm_intrmsk_mbz);
seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
...@@ -1895,7 +1908,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data) ...@@ -1895,7 +1908,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
fbdev_fb->base.format->cpp[0] * 8, fbdev_fb->base.format->cpp[0] * 8,
fbdev_fb->base.modifier, fbdev_fb->base.modifier,
drm_framebuffer_read_refcount(&fbdev_fb->base)); drm_framebuffer_read_refcount(&fbdev_fb->base));
describe_obj(m, fbdev_fb->obj); describe_obj(m, intel_fb_obj(&fbdev_fb->base));
seq_putc(m, '\n'); seq_putc(m, '\n');
} }
#endif #endif
...@@ -1913,7 +1926,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data) ...@@ -1913,7 +1926,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
fb->base.format->cpp[0] * 8, fb->base.format->cpp[0] * 8,
fb->base.modifier, fb->base.modifier,
drm_framebuffer_read_refcount(&fb->base)); drm_framebuffer_read_refcount(&fb->base));
describe_obj(m, fb->obj); describe_obj(m, intel_fb_obj(&fb->base));
seq_putc(m, '\n'); seq_putc(m, '\n');
} }
mutex_unlock(&dev->mode_config.fb_lock); mutex_unlock(&dev->mode_config.fb_lock);
...@@ -2630,8 +2643,6 @@ static int i915_edp_psr_status(struct seq_file *m, void *data) ...@@ -2630,8 +2643,6 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
{ {
struct drm_i915_private *dev_priv = node_to_i915(m->private); struct drm_i915_private *dev_priv = node_to_i915(m->private);
u32 psrperf = 0; u32 psrperf = 0;
u32 stat[3];
enum pipe pipe;
bool enabled = false; bool enabled = false;
bool sink_support; bool sink_support;
...@@ -2652,47 +2663,17 @@ static int i915_edp_psr_status(struct seq_file *m, void *data) ...@@ -2652,47 +2663,17 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
seq_printf(m, "Re-enable work scheduled: %s\n", seq_printf(m, "Re-enable work scheduled: %s\n",
yesno(work_busy(&dev_priv->psr.work.work))); yesno(work_busy(&dev_priv->psr.work.work)));
if (HAS_DDI(dev_priv)) { if (dev_priv->psr.psr2_enabled)
if (dev_priv->psr.psr2_enabled) enabled = I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE;
enabled = I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE; else
else enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
} else {
for_each_pipe(dev_priv, pipe) {
enum transcoder cpu_transcoder =
intel_pipe_to_cpu_transcoder(dev_priv, pipe);
enum intel_display_power_domain power_domain;
power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
if (!intel_display_power_get_if_enabled(dev_priv,
power_domain))
continue;
stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) &
VLV_EDP_PSR_CURR_STATE_MASK;
if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
(stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
enabled = true;
intel_display_power_put(dev_priv, power_domain);
}
}
seq_printf(m, "Main link in standby mode: %s\n", seq_printf(m, "Main link in standby mode: %s\n",
yesno(dev_priv->psr.link_standby)); yesno(dev_priv->psr.link_standby));
seq_printf(m, "HW Enabled & Active bit: %s", yesno(enabled)); seq_printf(m, "HW Enabled & Active bit: %s\n", yesno(enabled));
if (!HAS_DDI(dev_priv))
for_each_pipe(dev_priv, pipe) {
if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
(stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
seq_printf(m, " pipe %c", pipe_name(pipe));
}
seq_puts(m, "\n");
/* /*
* VLV/CHV PSR has no kind of performance counter
* SKL+ Perf counter is reset to 0 everytime DC state is entered * SKL+ Perf counter is reset to 0 everytime DC state is entered
*/ */
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
...@@ -4245,8 +4226,13 @@ i915_drop_caches_set(void *data, u64 val) ...@@ -4245,8 +4226,13 @@ i915_drop_caches_set(void *data, u64 val)
i915_gem_shrink_all(dev_priv); i915_gem_shrink_all(dev_priv);
fs_reclaim_release(GFP_KERNEL); fs_reclaim_release(GFP_KERNEL);
if (val & DROP_IDLE) if (val & DROP_IDLE) {
drain_delayed_work(&dev_priv->gt.idle_work); do {
if (READ_ONCE(dev_priv->gt.active_requests))
flush_delayed_work(&dev_priv->gt.retire_work);
drain_delayed_work(&dev_priv->gt.idle_work);
} while (READ_ONCE(dev_priv->gt.awake));
}
if (val & DROP_FREED) if (val & DROP_FREED)
i915_gem_drain_freed_objects(dev_priv); i915_gem_drain_freed_objects(dev_priv);
......
...@@ -67,6 +67,7 @@ bool __i915_inject_load_failure(const char *func, int line) ...@@ -67,6 +67,7 @@ bool __i915_inject_load_failure(const char *func, int line)
if (++i915_load_fail_count == i915_modparams.inject_load_failure) { if (++i915_load_fail_count == i915_modparams.inject_load_failure) {
DRM_INFO("Injecting failure at checkpoint %u [%s:%d]\n", DRM_INFO("Injecting failure at checkpoint %u [%s:%d]\n",
i915_modparams.inject_load_failure, func, line); i915_modparams.inject_load_failure, func, line);
i915_modparams.inject_load_failure = 0;
return true; return true;
} }
...@@ -117,16 +118,15 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level, ...@@ -117,16 +118,15 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level,
static bool i915_error_injected(struct drm_i915_private *dev_priv) static bool i915_error_injected(struct drm_i915_private *dev_priv)
{ {
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG) #if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
return i915_modparams.inject_load_failure && return i915_load_fail_count && !i915_modparams.inject_load_failure;
i915_load_fail_count == i915_modparams.inject_load_failure;
#else #else
return false; return false;
#endif #endif
} }
#define i915_load_error(dev_priv, fmt, ...) \ #define i915_load_error(i915, fmt, ...) \
__i915_printk(dev_priv, \ __i915_printk(i915, \
i915_error_injected(dev_priv) ? KERN_DEBUG : KERN_ERR, \ i915_error_injected(i915) ? KERN_DEBUG : KERN_ERR, \
fmt, ##__VA_ARGS__) fmt, ##__VA_ARGS__)
/* Map PCH device id to PCH type, or PCH_NONE if unknown. */ /* Map PCH device id to PCH type, or PCH_NONE if unknown. */
...@@ -233,6 +233,8 @@ intel_virt_detect_pch(const struct drm_i915_private *dev_priv) ...@@ -233,6 +233,8 @@ intel_virt_detect_pch(const struct drm_i915_private *dev_priv)
id = INTEL_PCH_SPT_DEVICE_ID_TYPE; id = INTEL_PCH_SPT_DEVICE_ID_TYPE;
else if (IS_COFFEELAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) else if (IS_COFFEELAKE(dev_priv) || IS_CANNONLAKE(dev_priv))
id = INTEL_PCH_CNP_DEVICE_ID_TYPE; id = INTEL_PCH_CNP_DEVICE_ID_TYPE;
else if (IS_ICELAKE(dev_priv))
id = INTEL_PCH_ICP_DEVICE_ID_TYPE;
if (id) if (id)
DRM_DEBUG_KMS("Assuming PCH ID %04x\n", id); DRM_DEBUG_KMS("Assuming PCH ID %04x\n", id);
...@@ -634,26 +636,6 @@ static const struct vga_switcheroo_client_ops i915_switcheroo_ops = { ...@@ -634,26 +636,6 @@ static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
.can_switch = i915_switcheroo_can_switch, .can_switch = i915_switcheroo_can_switch,
}; };
static void i915_gem_fini(struct drm_i915_private *dev_priv)
{
/* Flush any outstanding unpin_work. */
i915_gem_drain_workqueue(dev_priv);
mutex_lock(&dev_priv->drm.struct_mutex);
intel_uc_fini_hw(dev_priv);
intel_uc_fini(dev_priv);
i915_gem_cleanup_engines(dev_priv);
i915_gem_contexts_fini(dev_priv);
mutex_unlock(&dev_priv->drm.struct_mutex);
intel_uc_fini_misc(dev_priv);
i915_gem_cleanup_userptr(dev_priv);
i915_gem_drain_freed_objects(dev_priv);
WARN_ON(!list_empty(&dev_priv->contexts.list));
}
static int i915_load_modeset_init(struct drm_device *dev) static int i915_load_modeset_init(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_private *dev_priv = to_i915(dev);
...@@ -1553,12 +1535,30 @@ static bool suspend_to_idle(struct drm_i915_private *dev_priv) ...@@ -1553,12 +1535,30 @@ static bool suspend_to_idle(struct drm_i915_private *dev_priv)
return false; return false;
} }
static int i915_drm_prepare(struct drm_device *dev)
{
struct drm_i915_private *i915 = to_i915(dev);
int err;
/*
* NB intel_display_suspend() may issue new requests after we've
* ostensibly marked the GPU as ready-to-sleep here. We need to
* split out that work and pull it forward so that after point,
* the GPU is not woken again.
*/
err = i915_gem_suspend(i915);
if (err)
dev_err(&i915->drm.pdev->dev,
"GEM idle failed, suspend/resume might fail\n");
return err;
}
static int i915_drm_suspend(struct drm_device *dev) static int i915_drm_suspend(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_private *dev_priv = to_i915(dev);
struct pci_dev *pdev = dev_priv->drm.pdev; struct pci_dev *pdev = dev_priv->drm.pdev;
pci_power_t opregion_target_state; pci_power_t opregion_target_state;
int error;
/* ignore lid events during suspend */ /* ignore lid events during suspend */
mutex_lock(&dev_priv->modeset_restore_lock); mutex_lock(&dev_priv->modeset_restore_lock);
...@@ -1575,13 +1575,6 @@ static int i915_drm_suspend(struct drm_device *dev) ...@@ -1575,13 +1575,6 @@ static int i915_drm_suspend(struct drm_device *dev)
pci_save_state(pdev); pci_save_state(pdev);
error = i915_gem_suspend(dev_priv);
if (error) {
dev_err(&pdev->dev,
"GEM idle failed, resume might fail\n");
goto out;
}
intel_display_suspend(dev); intel_display_suspend(dev);
intel_dp_mst_suspend(dev); intel_dp_mst_suspend(dev);
...@@ -1600,7 +1593,6 @@ static int i915_drm_suspend(struct drm_device *dev) ...@@ -1600,7 +1593,6 @@ static int i915_drm_suspend(struct drm_device *dev)
opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold; opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
intel_opregion_notify_adapter(dev_priv, opregion_target_state); intel_opregion_notify_adapter(dev_priv, opregion_target_state);
intel_uncore_suspend(dev_priv);
intel_opregion_unregister(dev_priv); intel_opregion_unregister(dev_priv);
intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true); intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
...@@ -1609,10 +1601,9 @@ static int i915_drm_suspend(struct drm_device *dev) ...@@ -1609,10 +1601,9 @@ static int i915_drm_suspend(struct drm_device *dev)
intel_csr_ucode_suspend(dev_priv); intel_csr_ucode_suspend(dev_priv);
out:
enable_rpm_wakeref_asserts(dev_priv); enable_rpm_wakeref_asserts(dev_priv);
return error; return 0;
} }
static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation) static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
...@@ -1623,7 +1614,10 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation) ...@@ -1623,7 +1614,10 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
disable_rpm_wakeref_asserts(dev_priv); disable_rpm_wakeref_asserts(dev_priv);
i915_gem_suspend_late(dev_priv);
intel_display_set_init_power(dev_priv, false); intel_display_set_init_power(dev_priv, false);
intel_uncore_suspend(dev_priv);
/* /*
* In case of firmware assisted context save/restore don't manually * In case of firmware assisted context save/restore don't manually
...@@ -2081,6 +2075,22 @@ int i915_reset_engine(struct intel_engine_cs *engine, const char *msg) ...@@ -2081,6 +2075,22 @@ int i915_reset_engine(struct intel_engine_cs *engine, const char *msg)
return ret; return ret;
} }
static int i915_pm_prepare(struct device *kdev)
{
struct pci_dev *pdev = to_pci_dev(kdev);
struct drm_device *dev = pci_get_drvdata(pdev);
if (!dev) {
dev_err(kdev, "DRM not initialized, aborting suspend.\n");
return -ENODEV;
}
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
return i915_drm_prepare(dev);
}
static int i915_pm_suspend(struct device *kdev) static int i915_pm_suspend(struct device *kdev)
{ {
struct pci_dev *pdev = to_pci_dev(kdev); struct pci_dev *pdev = to_pci_dev(kdev);
...@@ -2731,6 +2741,7 @@ const struct dev_pm_ops i915_pm_ops = { ...@@ -2731,6 +2741,7 @@ const struct dev_pm_ops i915_pm_ops = {
* S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND, * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
* PMSG_RESUME] * PMSG_RESUME]
*/ */
.prepare = i915_pm_prepare,
.suspend = i915_pm_suspend, .suspend = i915_pm_suspend,
.suspend_late = i915_pm_suspend_late, .suspend_late = i915_pm_suspend_late,
.resume_early = i915_pm_resume_early, .resume_early = i915_pm_resume_early,
......
...@@ -85,8 +85,8 @@ ...@@ -85,8 +85,8 @@
#define DRIVER_NAME "i915" #define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics" #define DRIVER_DESC "Intel Graphics"
#define DRIVER_DATE "20180413" #define DRIVER_DATE "20180606"
#define DRIVER_TIMESTAMP 1523611258 #define DRIVER_TIMESTAMP 1528323047
/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
* WARN_ON()) for hw state sanity checks to check for unexpected conditions * WARN_ON()) for hw state sanity checks to check for unexpected conditions
...@@ -607,7 +607,6 @@ struct i915_psr { ...@@ -607,7 +607,6 @@ struct i915_psr {
bool link_standby; bool link_standby;
bool colorimetry_support; bool colorimetry_support;
bool alpm; bool alpm;
bool has_hw_tracking;
bool psr2_enabled; bool psr2_enabled;
u8 sink_sync_latency; u8 sink_sync_latency;
bool debug; bool debug;
...@@ -1049,9 +1048,9 @@ struct intel_vbt_data { ...@@ -1049,9 +1048,9 @@ struct intel_vbt_data {
/* Feature bits */ /* Feature bits */
unsigned int int_tv_support:1; unsigned int int_tv_support:1;
unsigned int lvds_dither:1; unsigned int lvds_dither:1;
unsigned int lvds_vbt:1;
unsigned int int_crt_support:1; unsigned int int_crt_support:1;
unsigned int lvds_use_ssc:1; unsigned int lvds_use_ssc:1;
unsigned int int_lvds_support:1;
unsigned int display_clock_mode:1; unsigned int display_clock_mode:1;
unsigned int fdi_rx_polarity_inverted:1; unsigned int fdi_rx_polarity_inverted:1;
unsigned int panel_type:4; unsigned int panel_type:4;
...@@ -1067,7 +1066,6 @@ struct intel_vbt_data { ...@@ -1067,7 +1066,6 @@ struct intel_vbt_data {
int vswing; int vswing;
bool low_vswing; bool low_vswing;
bool initialized; bool initialized;
bool support;
int bpp; int bpp;
struct edp_power_seq pps; struct edp_power_seq pps;
} edp; } edp;
...@@ -1078,8 +1076,8 @@ struct intel_vbt_data { ...@@ -1078,8 +1076,8 @@ struct intel_vbt_data {
bool require_aux_wakeup; bool require_aux_wakeup;
int idle_frames; int idle_frames;
enum psr_lines_to_wait lines_to_wait; enum psr_lines_to_wait lines_to_wait;
int tp1_wakeup_time; int tp1_wakeup_time_us;
int tp2_tp3_wakeup_time; int tp2_tp3_wakeup_time_us;
} psr; } psr;
struct { struct {
...@@ -1843,6 +1841,7 @@ struct drm_i915_private { ...@@ -1843,6 +1841,7 @@ struct drm_i915_private {
*/ */
struct ida hw_ida; struct ida hw_ida;
#define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */ #define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */
#define MAX_GUC_CONTEXT_HW_ID (1 << 20) /* exclusive */
#define GEN11_MAX_CONTEXT_HW_ID (1<<11) /* exclusive */ #define GEN11_MAX_CONTEXT_HW_ID (1<<11) /* exclusive */
} contexts; } contexts;
...@@ -1950,7 +1949,9 @@ struct drm_i915_private { ...@@ -1950,7 +1949,9 @@ struct drm_i915_private {
*/ */
struct i915_perf_stream *exclusive_stream; struct i915_perf_stream *exclusive_stream;
struct intel_context *pinned_ctx;
u32 specific_ctx_id; u32 specific_ctx_id;
u32 specific_ctx_id_mask;
struct hrtimer poll_check_timer; struct hrtimer poll_check_timer;
wait_queue_head_t poll_wq; wait_queue_head_t poll_wq;
...@@ -2743,6 +2744,8 @@ int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on); ...@@ -2743,6 +2744,8 @@ int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
int intel_engines_init_mmio(struct drm_i915_private *dev_priv); int intel_engines_init_mmio(struct drm_i915_private *dev_priv);
int intel_engines_init(struct drm_i915_private *dev_priv); int intel_engines_init(struct drm_i915_private *dev_priv);
u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv);
/* intel_hotplug.c */ /* intel_hotplug.c */
void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
u32 pin_mask, u32 long_mask); u32 pin_mask, u32 long_mask);
...@@ -3164,10 +3167,12 @@ void i915_gem_init_mmio(struct drm_i915_private *i915); ...@@ -3164,10 +3167,12 @@ void i915_gem_init_mmio(struct drm_i915_private *i915);
int __must_check i915_gem_init(struct drm_i915_private *dev_priv); int __must_check i915_gem_init(struct drm_i915_private *dev_priv);
int __must_check i915_gem_init_hw(struct drm_i915_private *dev_priv); int __must_check i915_gem_init_hw(struct drm_i915_private *dev_priv);
void i915_gem_init_swizzling(struct drm_i915_private *dev_priv); void i915_gem_init_swizzling(struct drm_i915_private *dev_priv);
void i915_gem_fini(struct drm_i915_private *dev_priv);
void i915_gem_cleanup_engines(struct drm_i915_private *dev_priv); void i915_gem_cleanup_engines(struct drm_i915_private *dev_priv);
int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv, int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
unsigned int flags); unsigned int flags);
int __must_check i915_gem_suspend(struct drm_i915_private *dev_priv); int __must_check i915_gem_suspend(struct drm_i915_private *dev_priv);
void i915_gem_suspend_late(struct drm_i915_private *dev_priv);
void i915_gem_resume(struct drm_i915_private *dev_priv); void i915_gem_resume(struct drm_i915_private *dev_priv);
int i915_gem_fault(struct vm_fault *vmf); int i915_gem_fault(struct vm_fault *vmf);
int i915_gem_object_wait(struct drm_i915_gem_object *obj, int i915_gem_object_wait(struct drm_i915_gem_object *obj,
...@@ -3208,7 +3213,7 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev, ...@@ -3208,7 +3213,7 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
static inline struct i915_hw_ppgtt * static inline struct i915_hw_ppgtt *
i915_vm_to_ppgtt(struct i915_address_space *vm) i915_vm_to_ppgtt(struct i915_address_space *vm)
{ {
return container_of(vm, struct i915_hw_ppgtt, base); return container_of(vm, struct i915_hw_ppgtt, vm);
} }
/* i915_gem_fence_reg.c */ /* i915_gem_fence_reg.c */
......
此差异已折叠。
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#define __I915_GEM_H__ #define __I915_GEM_H__
#include <linux/bug.h> #include <linux/bug.h>
#include <linux/interrupt.h>
struct drm_i915_private; struct drm_i915_private;
...@@ -62,9 +63,12 @@ struct drm_i915_private; ...@@ -62,9 +63,12 @@ struct drm_i915_private;
#if IS_ENABLED(CONFIG_DRM_I915_TRACE_GEM) #if IS_ENABLED(CONFIG_DRM_I915_TRACE_GEM)
#define GEM_TRACE(...) trace_printk(__VA_ARGS__) #define GEM_TRACE(...) trace_printk(__VA_ARGS__)
#define GEM_TRACE_DUMP() ftrace_dump(DUMP_ALL) #define GEM_TRACE_DUMP() ftrace_dump(DUMP_ALL)
#define GEM_TRACE_DUMP_ON(expr) \
do { if (expr) ftrace_dump(DUMP_ALL); } while (0)
#else #else
#define GEM_TRACE(...) do { } while (0) #define GEM_TRACE(...) do { } while (0)
#define GEM_TRACE_DUMP() do { } while (0) #define GEM_TRACE_DUMP() do { } while (0)
#define GEM_TRACE_DUMP_ON(expr) BUILD_BUG_ON_INVALID(expr)
#endif #endif
#define I915_NUM_ENGINES 8 #define I915_NUM_ENGINES 8
...@@ -72,4 +76,16 @@ struct drm_i915_private; ...@@ -72,4 +76,16 @@ struct drm_i915_private;
void i915_gem_park(struct drm_i915_private *i915); void i915_gem_park(struct drm_i915_private *i915);
void i915_gem_unpark(struct drm_i915_private *i915); void i915_gem_unpark(struct drm_i915_private *i915);
static inline void __tasklet_disable_sync_once(struct tasklet_struct *t)
{
if (atomic_inc_return(&t->count) == 1)
tasklet_unlock_wait(t);
}
static inline void __tasklet_enable_sync_once(struct tasklet_struct *t)
{
if (atomic_dec_return(&t->count) == 0)
tasklet_kill(t);
}
#endif /* __I915_GEM_H__ */ #endif /* __I915_GEM_H__ */
...@@ -127,14 +127,8 @@ static void i915_gem_context_free(struct i915_gem_context *ctx) ...@@ -127,14 +127,8 @@ static void i915_gem_context_free(struct i915_gem_context *ctx)
for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) { for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) {
struct intel_context *ce = &ctx->__engine[n]; struct intel_context *ce = &ctx->__engine[n];
if (!ce->state) if (ce->ops)
continue; ce->ops->destroy(ce);
WARN_ON(ce->pin_count);
if (ce->ring)
intel_ring_free(ce->ring);
__i915_gem_object_release_unless_active(ce->state->obj);
} }
kfree(ctx->name); kfree(ctx->name);
...@@ -203,7 +197,7 @@ static void context_close(struct i915_gem_context *ctx) ...@@ -203,7 +197,7 @@ static void context_close(struct i915_gem_context *ctx)
*/ */
lut_close(ctx); lut_close(ctx);
if (ctx->ppgtt) if (ctx->ppgtt)
i915_ppgtt_close(&ctx->ppgtt->base); i915_ppgtt_close(&ctx->ppgtt->vm);
ctx->file_priv = ERR_PTR(-EBADF); ctx->file_priv = ERR_PTR(-EBADF);
i915_gem_context_put(ctx); i915_gem_context_put(ctx);
...@@ -214,10 +208,19 @@ static int assign_hw_id(struct drm_i915_private *dev_priv, unsigned *out) ...@@ -214,10 +208,19 @@ static int assign_hw_id(struct drm_i915_private *dev_priv, unsigned *out)
int ret; int ret;
unsigned int max; unsigned int max;
if (INTEL_GEN(dev_priv) >= 11) if (INTEL_GEN(dev_priv) >= 11) {
max = GEN11_MAX_CONTEXT_HW_ID; max = GEN11_MAX_CONTEXT_HW_ID;
else } else {
max = MAX_CONTEXT_HW_ID; /*
* When using GuC in proxy submission, GuC consumes the
* highest bit in the context id to indicate proxy submission.
*/
if (USES_GUC_SUBMISSION(dev_priv))
max = MAX_GUC_CONTEXT_HW_ID;
else
max = MAX_CONTEXT_HW_ID;
}
ret = ida_simple_get(&dev_priv->contexts.hw_ida, ret = ida_simple_get(&dev_priv->contexts.hw_ida,
0, max, GFP_KERNEL); 0, max, GFP_KERNEL);
...@@ -246,7 +249,7 @@ static u32 default_desc_template(const struct drm_i915_private *i915, ...@@ -246,7 +249,7 @@ static u32 default_desc_template(const struct drm_i915_private *i915,
desc = GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE; desc = GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE;
address_mode = INTEL_LEGACY_32B_CONTEXT; address_mode = INTEL_LEGACY_32B_CONTEXT;
if (ppgtt && i915_vm_is_48bit(&ppgtt->base)) if (ppgtt && i915_vm_is_48bit(&ppgtt->vm))
address_mode = INTEL_LEGACY_64B_CONTEXT; address_mode = INTEL_LEGACY_64B_CONTEXT;
desc |= address_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT; desc |= address_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT;
...@@ -266,6 +269,7 @@ __create_hw_context(struct drm_i915_private *dev_priv, ...@@ -266,6 +269,7 @@ __create_hw_context(struct drm_i915_private *dev_priv,
struct drm_i915_file_private *file_priv) struct drm_i915_file_private *file_priv)
{ {
struct i915_gem_context *ctx; struct i915_gem_context *ctx;
unsigned int n;
int ret; int ret;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
...@@ -283,6 +287,12 @@ __create_hw_context(struct drm_i915_private *dev_priv, ...@@ -283,6 +287,12 @@ __create_hw_context(struct drm_i915_private *dev_priv,
ctx->i915 = dev_priv; ctx->i915 = dev_priv;
ctx->sched.priority = I915_PRIORITY_NORMAL; ctx->sched.priority = I915_PRIORITY_NORMAL;
for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) {
struct intel_context *ce = &ctx->__engine[n];
ce->gem_context = ctx;
}
INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL); INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
INIT_LIST_HEAD(&ctx->handles_list); INIT_LIST_HEAD(&ctx->handles_list);
...@@ -514,16 +524,8 @@ void i915_gem_contexts_lost(struct drm_i915_private *dev_priv) ...@@ -514,16 +524,8 @@ void i915_gem_contexts_lost(struct drm_i915_private *dev_priv)
lockdep_assert_held(&dev_priv->drm.struct_mutex); lockdep_assert_held(&dev_priv->drm.struct_mutex);
for_each_engine(engine, dev_priv, id) { for_each_engine(engine, dev_priv, id)
engine->legacy_active_context = NULL; intel_engine_lost_context(engine);
engine->legacy_active_ppgtt = NULL;
if (!engine->last_retired_context)
continue;
intel_context_unpin(engine->last_retired_context, engine);
engine->last_retired_context = NULL;
}
} }
void i915_gem_contexts_fini(struct drm_i915_private *i915) void i915_gem_contexts_fini(struct drm_i915_private *i915)
...@@ -583,58 +585,119 @@ last_request_on_engine(struct i915_timeline *timeline, ...@@ -583,58 +585,119 @@ last_request_on_engine(struct i915_timeline *timeline,
{ {
struct i915_request *rq; struct i915_request *rq;
if (timeline == &engine->timeline) GEM_BUG_ON(timeline == &engine->timeline);
return NULL;
rq = i915_gem_active_raw(&timeline->last_request, rq = i915_gem_active_raw(&timeline->last_request,
&engine->i915->drm.struct_mutex); &engine->i915->drm.struct_mutex);
if (rq && rq->engine == engine) if (rq && rq->engine == engine) {
GEM_TRACE("last request for %s on engine %s: %llx:%d\n",
timeline->name, engine->name,
rq->fence.context, rq->fence.seqno);
GEM_BUG_ON(rq->timeline != timeline);
return rq; return rq;
}
return NULL; return NULL;
} }
static bool engine_has_idle_kernel_context(struct intel_engine_cs *engine) static bool engine_has_kernel_context_barrier(struct intel_engine_cs *engine)
{ {
struct i915_timeline *timeline; struct drm_i915_private *i915 = engine->i915;
const struct intel_context * const ce =
to_intel_context(i915->kernel_context, engine);
struct i915_timeline *barrier = ce->ring->timeline;
struct intel_ring *ring;
bool any_active = false;
list_for_each_entry(timeline, &engine->i915->gt.timelines, link) { lockdep_assert_held(&i915->drm.struct_mutex);
if (last_request_on_engine(timeline, engine)) list_for_each_entry(ring, &i915->gt.active_rings, active_link) {
struct i915_request *rq;
rq = last_request_on_engine(ring->timeline, engine);
if (!rq)
continue;
any_active = true;
if (rq->hw_context == ce)
continue;
/*
* Was this request submitted after the previous
* switch-to-kernel-context?
*/
if (!i915_timeline_sync_is_later(barrier, &rq->fence)) {
GEM_TRACE("%s needs barrier for %llx:%d\n",
ring->timeline->name,
rq->fence.context,
rq->fence.seqno);
return false; return false;
}
GEM_TRACE("%s has barrier after %llx:%d\n",
ring->timeline->name,
rq->fence.context,
rq->fence.seqno);
} }
return intel_engine_has_kernel_context(engine); /*
* If any other timeline was still active and behind the last barrier,
* then our last switch-to-kernel-context must still be queued and
* will run last (leaving the engine in the kernel context when it
* eventually idles).
*/
if (any_active)
return true;
/* The engine is idle; check that it is idling in the kernel context. */
return engine->last_retired_context == ce;
} }
int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv) int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915)
{ {
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
struct i915_timeline *timeline;
enum intel_engine_id id; enum intel_engine_id id;
lockdep_assert_held(&dev_priv->drm.struct_mutex); GEM_TRACE("awake?=%s\n", yesno(i915->gt.awake));
lockdep_assert_held(&i915->drm.struct_mutex);
GEM_BUG_ON(!i915->kernel_context);
i915_retire_requests(dev_priv); i915_retire_requests(i915);
for_each_engine(engine, dev_priv, id) { for_each_engine(engine, i915, id) {
struct intel_ring *ring;
struct i915_request *rq; struct i915_request *rq;
if (engine_has_idle_kernel_context(engine)) GEM_BUG_ON(!to_intel_context(i915->kernel_context, engine));
if (engine_has_kernel_context_barrier(engine))
continue; continue;
rq = i915_request_alloc(engine, dev_priv->kernel_context); GEM_TRACE("emit barrier on %s\n", engine->name);
rq = i915_request_alloc(engine, i915->kernel_context);
if (IS_ERR(rq)) if (IS_ERR(rq))
return PTR_ERR(rq); return PTR_ERR(rq);
/* Queue this switch after all other activity */ /* Queue this switch after all other activity */
list_for_each_entry(timeline, &dev_priv->gt.timelines, link) { list_for_each_entry(ring, &i915->gt.active_rings, active_link) {
struct i915_request *prev; struct i915_request *prev;
prev = last_request_on_engine(timeline, engine); prev = last_request_on_engine(ring->timeline, engine);
if (prev) if (!prev)
i915_sw_fence_await_sw_fence_gfp(&rq->submit, continue;
&prev->submit,
I915_FENCE_GFP); if (prev->gem_context == i915->kernel_context)
continue;
GEM_TRACE("add barrier on %s for %llx:%d\n",
engine->name,
prev->fence.context,
prev->fence.seqno);
i915_sw_fence_await_sw_fence_gfp(&rq->submit,
&prev->submit,
I915_FENCE_GFP);
i915_timeline_sync_set(rq->timeline, &prev->fence);
} }
/* /*
...@@ -747,11 +810,11 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, ...@@ -747,11 +810,11 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
break; break;
case I915_CONTEXT_PARAM_GTT_SIZE: case I915_CONTEXT_PARAM_GTT_SIZE:
if (ctx->ppgtt) if (ctx->ppgtt)
args->value = ctx->ppgtt->base.total; args->value = ctx->ppgtt->vm.total;
else if (to_i915(dev)->mm.aliasing_ppgtt) else if (to_i915(dev)->mm.aliasing_ppgtt)
args->value = to_i915(dev)->mm.aliasing_ppgtt->base.total; args->value = to_i915(dev)->mm.aliasing_ppgtt->vm.total;
else else
args->value = to_i915(dev)->ggtt.base.total; args->value = to_i915(dev)->ggtt.vm.total;
break; break;
case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE: case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
args->value = i915_gem_context_no_error_capture(ctx); args->value = i915_gem_context_no_error_capture(ctx);
......
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
#include <linux/radix-tree.h> #include <linux/radix-tree.h>
#include "i915_gem.h" #include "i915_gem.h"
#include "i915_scheduler.h"
struct pid; struct pid;
...@@ -45,6 +46,13 @@ struct intel_ring; ...@@ -45,6 +46,13 @@ struct intel_ring;
#define DEFAULT_CONTEXT_HANDLE 0 #define DEFAULT_CONTEXT_HANDLE 0
struct intel_context;
struct intel_context_ops {
void (*unpin)(struct intel_context *ce);
void (*destroy)(struct intel_context *ce);
};
/** /**
* struct i915_gem_context - client state * struct i915_gem_context - client state
* *
...@@ -144,11 +152,14 @@ struct i915_gem_context { ...@@ -144,11 +152,14 @@ struct i915_gem_context {
/** engine: per-engine logical HW state */ /** engine: per-engine logical HW state */
struct intel_context { struct intel_context {
struct i915_gem_context *gem_context;
struct i915_vma *state; struct i915_vma *state;
struct intel_ring *ring; struct intel_ring *ring;
u32 *lrc_reg_state; u32 *lrc_reg_state;
u64 lrc_desc; u64 lrc_desc;
int pin_count; int pin_count;
const struct intel_context_ops *ops;
} __engine[I915_NUM_ENGINES]; } __engine[I915_NUM_ENGINES];
/** ring_size: size for allocating the per-engine ring buffer */ /** ring_size: size for allocating the per-engine ring buffer */
...@@ -263,25 +274,26 @@ to_intel_context(struct i915_gem_context *ctx, ...@@ -263,25 +274,26 @@ to_intel_context(struct i915_gem_context *ctx,
return &ctx->__engine[engine->id]; return &ctx->__engine[engine->id];
} }
static inline struct intel_ring * static inline struct intel_context *
intel_context_pin(struct i915_gem_context *ctx, struct intel_engine_cs *engine) intel_context_pin(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
{ {
return engine->context_pin(engine, ctx); return engine->context_pin(engine, ctx);
} }
static inline void __intel_context_pin(struct i915_gem_context *ctx, static inline void __intel_context_pin(struct intel_context *ce)
const struct intel_engine_cs *engine)
{ {
struct intel_context *ce = to_intel_context(ctx, engine);
GEM_BUG_ON(!ce->pin_count); GEM_BUG_ON(!ce->pin_count);
ce->pin_count++; ce->pin_count++;
} }
static inline void intel_context_unpin(struct i915_gem_context *ctx, static inline void intel_context_unpin(struct intel_context *ce)
struct intel_engine_cs *engine)
{ {
engine->context_unpin(engine, ctx); GEM_BUG_ON(!ce->pin_count);
if (--ce->pin_count)
return;
GEM_BUG_ON(!ce->ops);
ce->ops->unpin(ce);
} }
/* i915_gem_context.c */ /* i915_gem_context.c */
......
...@@ -703,7 +703,7 @@ static int eb_select_context(struct i915_execbuffer *eb) ...@@ -703,7 +703,7 @@ static int eb_select_context(struct i915_execbuffer *eb)
return -ENOENT; return -ENOENT;
eb->ctx = ctx; eb->ctx = ctx;
eb->vm = ctx->ppgtt ? &ctx->ppgtt->base : &eb->i915->ggtt.base; eb->vm = ctx->ppgtt ? &ctx->ppgtt->vm : &eb->i915->ggtt.vm;
eb->context_flags = 0; eb->context_flags = 0;
if (ctx->flags & CONTEXT_NO_ZEROMAP) if (ctx->flags & CONTEXT_NO_ZEROMAP)
...@@ -943,9 +943,9 @@ static void reloc_cache_reset(struct reloc_cache *cache) ...@@ -943,9 +943,9 @@ static void reloc_cache_reset(struct reloc_cache *cache)
if (cache->node.allocated) { if (cache->node.allocated) {
struct i915_ggtt *ggtt = cache_to_ggtt(cache); struct i915_ggtt *ggtt = cache_to_ggtt(cache);
ggtt->base.clear_range(&ggtt->base, ggtt->vm.clear_range(&ggtt->vm,
cache->node.start, cache->node.start,
cache->node.size); cache->node.size);
drm_mm_remove_node(&cache->node); drm_mm_remove_node(&cache->node);
} else { } else {
i915_vma_unpin((struct i915_vma *)cache->node.mm); i915_vma_unpin((struct i915_vma *)cache->node.mm);
...@@ -1016,7 +1016,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj, ...@@ -1016,7 +1016,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
memset(&cache->node, 0, sizeof(cache->node)); memset(&cache->node, 0, sizeof(cache->node));
err = drm_mm_insert_node_in_range err = drm_mm_insert_node_in_range
(&ggtt->base.mm, &cache->node, (&ggtt->vm.mm, &cache->node,
PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE, PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
0, ggtt->mappable_end, 0, ggtt->mappable_end,
DRM_MM_INSERT_LOW); DRM_MM_INSERT_LOW);
...@@ -1037,9 +1037,9 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj, ...@@ -1037,9 +1037,9 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
offset = cache->node.start; offset = cache->node.start;
if (cache->node.allocated) { if (cache->node.allocated) {
wmb(); wmb();
ggtt->base.insert_page(&ggtt->base, ggtt->vm.insert_page(&ggtt->vm,
i915_gem_object_get_dma_address(obj, page), i915_gem_object_get_dma_address(obj, page),
offset, I915_CACHE_NONE, 0); offset, I915_CACHE_NONE, 0);
} else { } else {
offset += page << PAGE_SHIFT; offset += page << PAGE_SHIFT;
} }
......
此差异已折叠。
...@@ -65,7 +65,7 @@ typedef u64 gen8_pde_t; ...@@ -65,7 +65,7 @@ typedef u64 gen8_pde_t;
typedef u64 gen8_ppgtt_pdpe_t; typedef u64 gen8_ppgtt_pdpe_t;
typedef u64 gen8_ppgtt_pml4e_t; typedef u64 gen8_ppgtt_pml4e_t;
#define ggtt_total_entries(ggtt) ((ggtt)->base.total >> PAGE_SHIFT) #define ggtt_total_entries(ggtt) ((ggtt)->vm.total >> PAGE_SHIFT)
/* gen6-hsw has bit 11-4 for physical addr bit 39-32 */ /* gen6-hsw has bit 11-4 for physical addr bit 39-32 */
#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0)) #define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
...@@ -367,7 +367,7 @@ i915_vm_has_scratch_64K(struct i915_address_space *vm) ...@@ -367,7 +367,7 @@ i915_vm_has_scratch_64K(struct i915_address_space *vm)
* the spec. * the spec.
*/ */
struct i915_ggtt { struct i915_ggtt {
struct i915_address_space base; struct i915_address_space vm;
struct io_mapping iomap; /* Mapping to our CPU mappable region */ struct io_mapping iomap; /* Mapping to our CPU mappable region */
struct resource gmadr; /* GMADR resource */ struct resource gmadr; /* GMADR resource */
...@@ -385,7 +385,7 @@ struct i915_ggtt { ...@@ -385,7 +385,7 @@ struct i915_ggtt {
}; };
struct i915_hw_ppgtt { struct i915_hw_ppgtt {
struct i915_address_space base; struct i915_address_space vm;
struct kref ref; struct kref ref;
struct drm_mm_node node; struct drm_mm_node node;
unsigned long pd_dirty_rings; unsigned long pd_dirty_rings;
...@@ -543,7 +543,7 @@ static inline struct i915_ggtt * ...@@ -543,7 +543,7 @@ static inline struct i915_ggtt *
i915_vm_to_ggtt(struct i915_address_space *vm) i915_vm_to_ggtt(struct i915_address_space *vm)
{ {
GEM_BUG_ON(!i915_is_ggtt(vm)); GEM_BUG_ON(!i915_is_ggtt(vm));
return container_of(vm, struct i915_ggtt, base); return container_of(vm, struct i915_ggtt, vm);
} }
#define INTEL_MAX_PPAT_ENTRIES 8 #define INTEL_MAX_PPAT_ENTRIES 8
......
...@@ -194,7 +194,7 @@ int i915_gem_render_state_emit(struct i915_request *rq) ...@@ -194,7 +194,7 @@ int i915_gem_render_state_emit(struct i915_request *rq)
if (IS_ERR(so.obj)) if (IS_ERR(so.obj))
return PTR_ERR(so.obj); return PTR_ERR(so.obj);
so.vma = i915_vma_instance(so.obj, &engine->i915->ggtt.base, NULL); so.vma = i915_vma_instance(so.obj, &engine->i915->ggtt.vm, NULL);
if (IS_ERR(so.vma)) { if (IS_ERR(so.vma)) {
err = PTR_ERR(so.vma); err = PTR_ERR(so.vma);
goto err_obj; goto err_obj;
......
...@@ -480,7 +480,7 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr ...@@ -480,7 +480,7 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
/* We also want to clear any cached iomaps as they wrap vmap */ /* We also want to clear any cached iomaps as they wrap vmap */
list_for_each_entry_safe(vma, next, list_for_each_entry_safe(vma, next,
&i915->ggtt.base.inactive_list, vm_link) { &i915->ggtt.vm.inactive_list, vm_link) {
unsigned long count = vma->node.size >> PAGE_SHIFT; unsigned long count = vma->node.size >> PAGE_SHIFT;
if (vma->iomap && i915_vma_unbind(vma) == 0) if (vma->iomap && i915_vma_unbind(vma) == 0)
freed_pages += count; freed_pages += count;
......
...@@ -642,7 +642,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv ...@@ -642,7 +642,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
if (ret) if (ret)
goto err; goto err;
vma = i915_vma_instance(obj, &ggtt->base, NULL); vma = i915_vma_instance(obj, &ggtt->vm, NULL);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
ret = PTR_ERR(vma); ret = PTR_ERR(vma);
goto err_pages; goto err_pages;
...@@ -653,7 +653,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv ...@@ -653,7 +653,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
* setting up the GTT space. The actual reservation will occur * setting up the GTT space. The actual reservation will occur
* later. * later.
*/ */
ret = i915_gem_gtt_reserve(&ggtt->base, &vma->node, ret = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
size, gtt_offset, obj->cache_level, size, gtt_offset, obj->cache_level,
0); 0);
if (ret) { if (ret) {
...@@ -666,7 +666,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv ...@@ -666,7 +666,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
vma->pages = obj->mm.pages; vma->pages = obj->mm.pages;
vma->flags |= I915_VMA_GLOBAL_BIND; vma->flags |= I915_VMA_GLOBAL_BIND;
__i915_vma_set_map_and_fenceable(vma); __i915_vma_set_map_and_fenceable(vma);
list_move_tail(&vma->vm_link, &ggtt->base.inactive_list); list_move_tail(&vma->vm_link, &ggtt->vm.inactive_list);
spin_lock(&dev_priv->mm.obj_lock); spin_lock(&dev_priv->mm.obj_lock);
list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list); list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list);
......
...@@ -973,8 +973,7 @@ i915_error_object_create(struct drm_i915_private *i915, ...@@ -973,8 +973,7 @@ i915_error_object_create(struct drm_i915_private *i915,
void __iomem *s; void __iomem *s;
int ret; int ret;
ggtt->base.insert_page(&ggtt->base, dma, slot, ggtt->vm.insert_page(&ggtt->vm, dma, slot, I915_CACHE_NONE, 0);
I915_CACHE_NONE, 0);
s = io_mapping_map_atomic_wc(&ggtt->iomap, slot); s = io_mapping_map_atomic_wc(&ggtt->iomap, slot);
ret = compress_page(&compress, (void __force *)s, dst); ret = compress_page(&compress, (void __force *)s, dst);
...@@ -993,7 +992,7 @@ i915_error_object_create(struct drm_i915_private *i915, ...@@ -993,7 +992,7 @@ i915_error_object_create(struct drm_i915_private *i915,
out: out:
compress_fini(&compress, dst); compress_fini(&compress, dst);
ggtt->base.clear_range(&ggtt->base, slot, PAGE_SIZE); ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
return dst; return dst;
} }
...@@ -1287,9 +1286,11 @@ static void error_record_engine_registers(struct i915_gpu_state *error, ...@@ -1287,9 +1286,11 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
static void record_request(struct i915_request *request, static void record_request(struct i915_request *request,
struct drm_i915_error_request *erq) struct drm_i915_error_request *erq)
{ {
erq->context = request->ctx->hw_id; struct i915_gem_context *ctx = request->gem_context;
erq->context = ctx->hw_id;
erq->sched_attr = request->sched.attr; erq->sched_attr = request->sched.attr;
erq->ban_score = atomic_read(&request->ctx->ban_score); erq->ban_score = atomic_read(&ctx->ban_score);
erq->seqno = request->global_seqno; erq->seqno = request->global_seqno;
erq->jiffies = request->emitted_jiffies; erq->jiffies = request->emitted_jiffies;
erq->start = i915_ggtt_offset(request->ring->vma); erq->start = i915_ggtt_offset(request->ring->vma);
...@@ -1297,7 +1298,7 @@ static void record_request(struct i915_request *request, ...@@ -1297,7 +1298,7 @@ static void record_request(struct i915_request *request,
erq->tail = request->tail; erq->tail = request->tail;
rcu_read_lock(); rcu_read_lock();
erq->pid = request->ctx->pid ? pid_nr(request->ctx->pid) : 0; erq->pid = ctx->pid ? pid_nr(ctx->pid) : 0;
rcu_read_unlock(); rcu_read_unlock();
} }
...@@ -1461,12 +1462,12 @@ static void gem_record_rings(struct i915_gpu_state *error) ...@@ -1461,12 +1462,12 @@ static void gem_record_rings(struct i915_gpu_state *error)
request = i915_gem_find_active_request(engine); request = i915_gem_find_active_request(engine);
if (request) { if (request) {
struct i915_gem_context *ctx = request->gem_context;
struct intel_ring *ring; struct intel_ring *ring;
ee->vm = request->ctx->ppgtt ? ee->vm = ctx->ppgtt ? &ctx->ppgtt->vm : &ggtt->vm;
&request->ctx->ppgtt->base : &ggtt->base;
record_context(&ee->context, request->ctx); record_context(&ee->context, ctx);
/* We need to copy these to an anonymous buffer /* We need to copy these to an anonymous buffer
* as the simplest method to avoid being overwritten * as the simplest method to avoid being overwritten
...@@ -1483,11 +1484,10 @@ static void gem_record_rings(struct i915_gpu_state *error) ...@@ -1483,11 +1484,10 @@ static void gem_record_rings(struct i915_gpu_state *error)
ee->ctx = ee->ctx =
i915_error_object_create(i915, i915_error_object_create(i915,
to_intel_context(request->ctx, request->hw_context->state);
engine)->state);
error->simulated |= error->simulated |=
i915_gem_context_no_error_capture(request->ctx); i915_gem_context_no_error_capture(ctx);
ee->rq_head = request->head; ee->rq_head = request->head;
ee->rq_post = request->postfix; ee->rq_post = request->postfix;
...@@ -1563,17 +1563,17 @@ static void capture_active_buffers(struct i915_gpu_state *error) ...@@ -1563,17 +1563,17 @@ static void capture_active_buffers(struct i915_gpu_state *error)
static void capture_pinned_buffers(struct i915_gpu_state *error) static void capture_pinned_buffers(struct i915_gpu_state *error)
{ {
struct i915_address_space *vm = &error->i915->ggtt.base; struct i915_address_space *vm = &error->i915->ggtt.vm;
struct drm_i915_error_buffer *bo; struct drm_i915_error_buffer *bo;
struct i915_vma *vma; struct i915_vma *vma;
int count_inactive, count_active; int count_inactive, count_active;
count_inactive = 0; count_inactive = 0;
list_for_each_entry(vma, &vm->active_list, vm_link) list_for_each_entry(vma, &vm->inactive_list, vm_link)
count_inactive++; count_inactive++;
count_active = 0; count_active = 0;
list_for_each_entry(vma, &vm->inactive_list, vm_link) list_for_each_entry(vma, &vm->active_list, vm_link)
count_active++; count_active++;
bo = NULL; bo = NULL;
...@@ -1667,7 +1667,16 @@ static void capture_reg_state(struct i915_gpu_state *error) ...@@ -1667,7 +1667,16 @@ static void capture_reg_state(struct i915_gpu_state *error)
} }
/* 4: Everything else */ /* 4: Everything else */
if (INTEL_GEN(dev_priv) >= 8) { if (INTEL_GEN(dev_priv) >= 11) {
error->ier = I915_READ(GEN8_DE_MISC_IER);
error->gtier[0] = I915_READ(GEN11_RENDER_COPY_INTR_ENABLE);
error->gtier[1] = I915_READ(GEN11_VCS_VECS_INTR_ENABLE);
error->gtier[2] = I915_READ(GEN11_GUC_SG_INTR_ENABLE);
error->gtier[3] = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
error->gtier[4] = I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE);
error->gtier[5] = I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE);
error->ngtier = 6;
} else if (INTEL_GEN(dev_priv) >= 8) {
error->ier = I915_READ(GEN8_DE_MISC_IER); error->ier = I915_READ(GEN8_DE_MISC_IER);
for (i = 0; i < 4; i++) for (i = 0; i < 4; i++)
error->gtier[i] = I915_READ(GEN8_GT_IER(i)); error->gtier[i] = I915_READ(GEN8_GT_IER(i));
......
...@@ -58,7 +58,7 @@ struct i915_gpu_state { ...@@ -58,7 +58,7 @@ struct i915_gpu_state {
u32 eir; u32 eir;
u32 pgtbl_er; u32 pgtbl_er;
u32 ier; u32 ier;
u32 gtier[4], ngtier; u32 gtier[6], ngtier;
u32 ccid; u32 ccid;
u32 derrmr; u32 derrmr;
u32 forcewake; u32 forcewake;
......
...@@ -2640,7 +2640,8 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) ...@@ -2640,7 +2640,8 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
GEN9_AUX_CHANNEL_C | GEN9_AUX_CHANNEL_C |
GEN9_AUX_CHANNEL_D; GEN9_AUX_CHANNEL_D;
if (IS_CNL_WITH_PORT_F(dev_priv)) if (IS_CNL_WITH_PORT_F(dev_priv) ||
INTEL_GEN(dev_priv) >= 11)
tmp_mask |= CNL_AUX_CHANNEL_F; tmp_mask |= CNL_AUX_CHANNEL_F;
if (iir & tmp_mask) { if (iir & tmp_mask) {
...@@ -3920,7 +3921,7 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) ...@@ -3920,7 +3921,7 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
de_pipe_masked |= GEN8_DE_PIPE_IRQ_FAULT_ERRORS; de_pipe_masked |= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
} }
if (IS_CNL_WITH_PORT_F(dev_priv)) if (IS_CNL_WITH_PORT_F(dev_priv) || INTEL_GEN(dev_priv) >= 11)
de_port_masked |= CNL_AUX_CHANNEL_F; de_port_masked |= CNL_AUX_CHANNEL_F;
de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK | de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
......
...@@ -130,9 +130,6 @@ i915_param_named_unsafe(invert_brightness, int, 0600, ...@@ -130,9 +130,6 @@ i915_param_named_unsafe(invert_brightness, int, 0600,
i915_param_named(disable_display, bool, 0400, i915_param_named(disable_display, bool, 0400,
"Disable display (default: false)"); "Disable display (default: false)");
i915_param_named_unsafe(enable_cmd_parser, bool, 0400,
"Enable command parsing (true=enabled [default], false=disabled)");
i915_param_named(mmio_debug, int, 0600, i915_param_named(mmio_debug, int, 0600,
"Enable the MMIO debug code for the first N failures (default: off). " "Enable the MMIO debug code for the first N failures (default: off). "
"This may negatively affect performance."); "This may negatively affect performance.");
......
...@@ -58,7 +58,6 @@ struct drm_printer; ...@@ -58,7 +58,6 @@ struct drm_printer;
param(unsigned int, inject_load_failure, 0) \ param(unsigned int, inject_load_failure, 0) \
/* leave bools at the end to not create holes */ \ /* leave bools at the end to not create holes */ \
param(bool, alpha_support, IS_ENABLED(CONFIG_DRM_I915_ALPHA_SUPPORT)) \ param(bool, alpha_support, IS_ENABLED(CONFIG_DRM_I915_ALPHA_SUPPORT)) \
param(bool, enable_cmd_parser, true) \
param(bool, enable_hangcheck, true) \ param(bool, enable_hangcheck, true) \
param(bool, fastboot, false) \ param(bool, fastboot, false) \
param(bool, prefault_disable, false) \ param(bool, prefault_disable, false) \
......
...@@ -340,7 +340,6 @@ static const struct intel_device_info intel_valleyview_info = { ...@@ -340,7 +340,6 @@ static const struct intel_device_info intel_valleyview_info = {
GEN(7), GEN(7),
.is_lp = 1, .is_lp = 1,
.num_pipes = 2, .num_pipes = 2,
.has_psr = 1,
.has_runtime_pm = 1, .has_runtime_pm = 1,
.has_rc6 = 1, .has_rc6 = 1,
.has_gmch_display = 1, .has_gmch_display = 1,
...@@ -433,7 +432,6 @@ static const struct intel_device_info intel_cherryview_info = { ...@@ -433,7 +432,6 @@ static const struct intel_device_info intel_cherryview_info = {
.is_lp = 1, .is_lp = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
.has_64bit_reloc = 1, .has_64bit_reloc = 1,
.has_psr = 1,
.has_runtime_pm = 1, .has_runtime_pm = 1,
.has_resource_streamer = 1, .has_resource_streamer = 1,
.has_rc6 = 1, .has_rc6 = 1,
......
...@@ -737,12 +737,7 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream, ...@@ -737,12 +737,7 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
continue; continue;
} }
/* ctx_id = report32[2] & dev_priv->perf.oa.specific_ctx_id_mask;
* XXX: Just keep the lower 21 bits for now since I'm not
* entirely sure if the HW touches any of the higher bits in
* this field
*/
ctx_id = report32[2] & 0x1fffff;
/* /*
* Squash whatever is in the CTX_ID field if it's marked as * Squash whatever is in the CTX_ID field if it's marked as
...@@ -1203,6 +1198,33 @@ static int i915_oa_read(struct i915_perf_stream *stream, ...@@ -1203,6 +1198,33 @@ static int i915_oa_read(struct i915_perf_stream *stream,
return dev_priv->perf.oa.ops.read(stream, buf, count, offset); return dev_priv->perf.oa.ops.read(stream, buf, count, offset);
} }
static struct intel_context *oa_pin_context(struct drm_i915_private *i915,
struct i915_gem_context *ctx)
{
struct intel_engine_cs *engine = i915->engine[RCS];
struct intel_context *ce;
int ret;
ret = i915_mutex_lock_interruptible(&i915->drm);
if (ret)
return ERR_PTR(ret);
/*
* As the ID is the gtt offset of the context's vma we
* pin the vma to ensure the ID remains fixed.
*
* NB: implied RCS engine...
*/
ce = intel_context_pin(ctx, engine);
mutex_unlock(&i915->drm.struct_mutex);
if (IS_ERR(ce))
return ce;
i915->perf.oa.pinned_ctx = ce;
return ce;
}
/** /**
* oa_get_render_ctx_id - determine and hold ctx hw id * oa_get_render_ctx_id - determine and hold ctx hw id
* @stream: An i915-perf stream opened for OA metrics * @stream: An i915-perf stream opened for OA metrics
...@@ -1215,40 +1237,76 @@ static int i915_oa_read(struct i915_perf_stream *stream, ...@@ -1215,40 +1237,76 @@ static int i915_oa_read(struct i915_perf_stream *stream,
*/ */
static int oa_get_render_ctx_id(struct i915_perf_stream *stream) static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
{ {
struct drm_i915_private *dev_priv = stream->dev_priv; struct drm_i915_private *i915 = stream->dev_priv;
struct intel_context *ce;
if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) { ce = oa_pin_context(i915, stream->ctx);
dev_priv->perf.oa.specific_ctx_id = stream->ctx->hw_id; if (IS_ERR(ce))
} else { return PTR_ERR(ce);
struct intel_engine_cs *engine = dev_priv->engine[RCS];
struct intel_ring *ring;
int ret;
ret = i915_mutex_lock_interruptible(&dev_priv->drm);
if (ret)
return ret;
switch (INTEL_GEN(i915)) {
case 7: {
/* /*
* As the ID is the gtt offset of the context's vma we * On Haswell we don't do any post processing of the reports
* pin the vma to ensure the ID remains fixed. * and don't need to use the mask.
*
* NB: implied RCS engine...
*/ */
ring = intel_context_pin(stream->ctx, engine); i915->perf.oa.specific_ctx_id = i915_ggtt_offset(ce->state);
mutex_unlock(&dev_priv->drm.struct_mutex); i915->perf.oa.specific_ctx_id_mask = 0;
if (IS_ERR(ring)) break;
return PTR_ERR(ring); }
case 8:
case 9:
case 10:
if (USES_GUC_SUBMISSION(i915)) {
/*
* When using GuC, the context descriptor we write in
* i915 is read by GuC and rewritten before it's
* actually written into the hardware. The LRCA is
* what is put into the context id field of the
* context descriptor by GuC. Because it's aligned to
* a page, the lower 12bits are always at 0 and
* dropped by GuC. They won't be part of the context
* ID in the OA reports, so squash those lower bits.
*/
i915->perf.oa.specific_ctx_id =
lower_32_bits(ce->lrc_desc) >> 12;
/* /*
* Explicitly track the ID (instead of calling * GuC uses the top bit to signal proxy submission, so
* i915_ggtt_offset() on the fly) considering the difference * ignore that bit.
* with gen8+ and execlists */
*/ i915->perf.oa.specific_ctx_id_mask =
dev_priv->perf.oa.specific_ctx_id = (1U << (GEN8_CTX_ID_WIDTH - 1)) - 1;
i915_ggtt_offset(to_intel_context(stream->ctx, engine)->state); } else {
i915->perf.oa.specific_ctx_id = stream->ctx->hw_id;
i915->perf.oa.specific_ctx_id_mask =
(1U << GEN8_CTX_ID_WIDTH) - 1;
}
break;
case 11: {
struct intel_engine_cs *engine = i915->engine[RCS];
i915->perf.oa.specific_ctx_id =
stream->ctx->hw_id << (GEN11_SW_CTX_ID_SHIFT - 32) |
engine->instance << (GEN11_ENGINE_INSTANCE_SHIFT - 32) |
engine->class << (GEN11_ENGINE_INSTANCE_SHIFT - 32);
i915->perf.oa.specific_ctx_id_mask =
((1U << GEN11_SW_CTX_ID_WIDTH) - 1) << (GEN11_SW_CTX_ID_SHIFT - 32) |
((1U << GEN11_ENGINE_INSTANCE_WIDTH) - 1) << (GEN11_ENGINE_INSTANCE_SHIFT - 32) |
((1 << GEN11_ENGINE_CLASS_WIDTH) - 1) << (GEN11_ENGINE_CLASS_SHIFT - 32);
break;
}
default:
MISSING_CASE(INTEL_GEN(i915));
} }
DRM_DEBUG_DRIVER("filtering on ctx_id=0x%x ctx_id_mask=0x%x\n",
i915->perf.oa.specific_ctx_id,
i915->perf.oa.specific_ctx_id_mask);
return 0; return 0;
} }
...@@ -1262,17 +1320,15 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream) ...@@ -1262,17 +1320,15 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
static void oa_put_render_ctx_id(struct i915_perf_stream *stream) static void oa_put_render_ctx_id(struct i915_perf_stream *stream)
{ {
struct drm_i915_private *dev_priv = stream->dev_priv; struct drm_i915_private *dev_priv = stream->dev_priv;
struct intel_context *ce;
if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) { dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID;
dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID; dev_priv->perf.oa.specific_ctx_id_mask = 0;
} else {
struct intel_engine_cs *engine = dev_priv->engine[RCS];
ce = fetch_and_zero(&dev_priv->perf.oa.pinned_ctx);
if (ce) {
mutex_lock(&dev_priv->drm.struct_mutex); mutex_lock(&dev_priv->drm.struct_mutex);
intel_context_unpin(ce);
dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID;
intel_context_unpin(stream->ctx, engine);
mutex_unlock(&dev_priv->drm.struct_mutex); mutex_unlock(&dev_priv->drm.struct_mutex);
} }
} }
......
...@@ -127,6 +127,7 @@ static void __i915_pmu_maybe_start_timer(struct drm_i915_private *i915) ...@@ -127,6 +127,7 @@ static void __i915_pmu_maybe_start_timer(struct drm_i915_private *i915)
{ {
if (!i915->pmu.timer_enabled && pmu_needs_timer(i915, true)) { if (!i915->pmu.timer_enabled && pmu_needs_timer(i915, true)) {
i915->pmu.timer_enabled = true; i915->pmu.timer_enabled = true;
i915->pmu.timer_last = ktime_get();
hrtimer_start_range_ns(&i915->pmu.timer, hrtimer_start_range_ns(&i915->pmu.timer,
ns_to_ktime(PERIOD), 0, ns_to_ktime(PERIOD), 0,
HRTIMER_MODE_REL_PINNED); HRTIMER_MODE_REL_PINNED);
...@@ -155,12 +156,13 @@ static bool grab_forcewake(struct drm_i915_private *i915, bool fw) ...@@ -155,12 +156,13 @@ static bool grab_forcewake(struct drm_i915_private *i915, bool fw)
} }
static void static void
update_sample(struct i915_pmu_sample *sample, u32 unit, u32 val) add_sample(struct i915_pmu_sample *sample, u32 val)
{ {
sample->cur += mul_u32_u32(val, unit); sample->cur += val;
} }
static void engines_sample(struct drm_i915_private *dev_priv) static void
engines_sample(struct drm_i915_private *dev_priv, unsigned int period_ns)
{ {
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
enum intel_engine_id id; enum intel_engine_id id;
...@@ -182,8 +184,9 @@ static void engines_sample(struct drm_i915_private *dev_priv) ...@@ -182,8 +184,9 @@ static void engines_sample(struct drm_i915_private *dev_priv)
val = !i915_seqno_passed(current_seqno, last_seqno); val = !i915_seqno_passed(current_seqno, last_seqno);
update_sample(&engine->pmu.sample[I915_SAMPLE_BUSY], if (val)
PERIOD, val); add_sample(&engine->pmu.sample[I915_SAMPLE_BUSY],
period_ns);
if (val && (engine->pmu.enable & if (val && (engine->pmu.enable &
(BIT(I915_SAMPLE_WAIT) | BIT(I915_SAMPLE_SEMA)))) { (BIT(I915_SAMPLE_WAIT) | BIT(I915_SAMPLE_SEMA)))) {
...@@ -194,11 +197,13 @@ static void engines_sample(struct drm_i915_private *dev_priv) ...@@ -194,11 +197,13 @@ static void engines_sample(struct drm_i915_private *dev_priv)
val = 0; val = 0;
} }
update_sample(&engine->pmu.sample[I915_SAMPLE_WAIT], if (val & RING_WAIT)
PERIOD, !!(val & RING_WAIT)); add_sample(&engine->pmu.sample[I915_SAMPLE_WAIT],
period_ns);
update_sample(&engine->pmu.sample[I915_SAMPLE_SEMA], if (val & RING_WAIT_SEMAPHORE)
PERIOD, !!(val & RING_WAIT_SEMAPHORE)); add_sample(&engine->pmu.sample[I915_SAMPLE_SEMA],
period_ns);
} }
if (fw) if (fw)
...@@ -207,7 +212,14 @@ static void engines_sample(struct drm_i915_private *dev_priv) ...@@ -207,7 +212,14 @@ static void engines_sample(struct drm_i915_private *dev_priv)
intel_runtime_pm_put(dev_priv); intel_runtime_pm_put(dev_priv);
} }
static void frequency_sample(struct drm_i915_private *dev_priv) static void
add_sample_mult(struct i915_pmu_sample *sample, u32 val, u32 mul)
{
sample->cur += mul_u32_u32(val, mul);
}
static void
frequency_sample(struct drm_i915_private *dev_priv, unsigned int period_ns)
{ {
if (dev_priv->pmu.enable & if (dev_priv->pmu.enable &
config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY)) { config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY)) {
...@@ -221,15 +233,17 @@ static void frequency_sample(struct drm_i915_private *dev_priv) ...@@ -221,15 +233,17 @@ static void frequency_sample(struct drm_i915_private *dev_priv)
intel_runtime_pm_put(dev_priv); intel_runtime_pm_put(dev_priv);
} }
update_sample(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_ACT], add_sample_mult(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_ACT],
1, intel_gpu_freq(dev_priv, val)); intel_gpu_freq(dev_priv, val),
period_ns / 1000);
} }
if (dev_priv->pmu.enable & if (dev_priv->pmu.enable &
config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY)) { config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY)) {
update_sample(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_REQ], 1, add_sample_mult(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_REQ],
intel_gpu_freq(dev_priv, intel_gpu_freq(dev_priv,
dev_priv->gt_pm.rps.cur_freq)); dev_priv->gt_pm.rps.cur_freq),
period_ns / 1000);
} }
} }
...@@ -237,14 +251,27 @@ static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer) ...@@ -237,14 +251,27 @@ static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer)
{ {
struct drm_i915_private *i915 = struct drm_i915_private *i915 =
container_of(hrtimer, struct drm_i915_private, pmu.timer); container_of(hrtimer, struct drm_i915_private, pmu.timer);
unsigned int period_ns;
ktime_t now;
if (!READ_ONCE(i915->pmu.timer_enabled)) if (!READ_ONCE(i915->pmu.timer_enabled))
return HRTIMER_NORESTART; return HRTIMER_NORESTART;
engines_sample(i915); now = ktime_get();
frequency_sample(i915); period_ns = ktime_to_ns(ktime_sub(now, i915->pmu.timer_last));
i915->pmu.timer_last = now;
/*
* Strictly speaking the passed in period may not be 100% accurate for
* all internal calculation, since some amount of time can be spent on
* grabbing the forcewake. However the potential error from timer call-
* back delay greatly dominates this so we keep it simple.
*/
engines_sample(i915, period_ns);
frequency_sample(i915, period_ns);
hrtimer_forward(hrtimer, now, ns_to_ktime(PERIOD));
hrtimer_forward_now(hrtimer, ns_to_ktime(PERIOD));
return HRTIMER_RESTART; return HRTIMER_RESTART;
} }
...@@ -519,12 +546,12 @@ static u64 __i915_pmu_event_read(struct perf_event *event) ...@@ -519,12 +546,12 @@ static u64 __i915_pmu_event_read(struct perf_event *event)
case I915_PMU_ACTUAL_FREQUENCY: case I915_PMU_ACTUAL_FREQUENCY:
val = val =
div_u64(i915->pmu.sample[__I915_SAMPLE_FREQ_ACT].cur, div_u64(i915->pmu.sample[__I915_SAMPLE_FREQ_ACT].cur,
FREQUENCY); USEC_PER_SEC /* to MHz */);
break; break;
case I915_PMU_REQUESTED_FREQUENCY: case I915_PMU_REQUESTED_FREQUENCY:
val = val =
div_u64(i915->pmu.sample[__I915_SAMPLE_FREQ_REQ].cur, div_u64(i915->pmu.sample[__I915_SAMPLE_FREQ_REQ].cur,
FREQUENCY); USEC_PER_SEC /* to MHz */);
break; break;
case I915_PMU_INTERRUPTS: case I915_PMU_INTERRUPTS:
val = count_interrupts(i915); val = count_interrupts(i915);
......
...@@ -65,6 +65,14 @@ struct i915_pmu { ...@@ -65,6 +65,14 @@ struct i915_pmu {
* event types. * event types.
*/ */
u64 enable; u64 enable;
/**
* @timer_last:
*
* Timestmap of the previous timer invocation.
*/
ktime_t timer_last;
/** /**
* @enable_count: Reference counts for the enabled events. * @enable_count: Reference counts for the enabled events.
* *
......
...@@ -54,6 +54,7 @@ enum vgt_g2v_type { ...@@ -54,6 +54,7 @@ enum vgt_g2v_type {
*/ */
#define VGT_CAPS_FULL_48BIT_PPGTT BIT(2) #define VGT_CAPS_FULL_48BIT_PPGTT BIT(2)
#define VGT_CAPS_HWSP_EMULATION BIT(3) #define VGT_CAPS_HWSP_EMULATION BIT(3)
#define VGT_CAPS_HUGE_GTT BIT(4)
struct vgt_if { struct vgt_if {
u64 magic; /* VGT_MAGIC */ u64 magic; /* VGT_MAGIC */
......
...@@ -4,6 +4,8 @@ ...@@ -4,6 +4,8 @@
* Copyright © 2018 Intel Corporation * Copyright © 2018 Intel Corporation
*/ */
#include <linux/nospec.h>
#include "i915_drv.h" #include "i915_drv.h"
#include "i915_query.h" #include "i915_query.h"
#include <uapi/drm/i915_drm.h> #include <uapi/drm/i915_drm.h>
...@@ -100,7 +102,7 @@ int i915_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file) ...@@ -100,7 +102,7 @@ int i915_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
for (i = 0; i < args->num_items; i++, user_item_ptr++) { for (i = 0; i < args->num_items; i++, user_item_ptr++) {
struct drm_i915_query_item item; struct drm_i915_query_item item;
u64 func_idx; unsigned long func_idx;
int ret; int ret;
if (copy_from_user(&item, user_item_ptr, sizeof(item))) if (copy_from_user(&item, user_item_ptr, sizeof(item)))
...@@ -109,12 +111,17 @@ int i915_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file) ...@@ -109,12 +111,17 @@ int i915_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
if (item.query_id == 0) if (item.query_id == 0)
return -EINVAL; return -EINVAL;
if (overflows_type(item.query_id - 1, unsigned long))
return -EINVAL;
func_idx = item.query_id - 1; func_idx = item.query_id - 1;
if (func_idx < ARRAY_SIZE(i915_query_funcs)) ret = -EINVAL;
if (func_idx < ARRAY_SIZE(i915_query_funcs)) {
func_idx = array_index_nospec(func_idx,
ARRAY_SIZE(i915_query_funcs));
ret = i915_query_funcs[func_idx](dev_priv, &item); ret = i915_query_funcs[func_idx](dev_priv, &item);
else }
ret = -EINVAL;
/* Only write the length back to userspace if they differ. */ /* Only write the length back to userspace if they differ. */
if (ret != item.length && put_user(ret, &user_item_ptr->length)) if (ret != item.length && put_user(ret, &user_item_ptr->length))
......
...@@ -1990,6 +1990,11 @@ enum i915_power_well_id { ...@@ -1990,6 +1990,11 @@ enum i915_power_well_id {
_ICL_PORT_COMP_DW10_A, \ _ICL_PORT_COMP_DW10_A, \
_ICL_PORT_COMP_DW10_B) _ICL_PORT_COMP_DW10_B)
/* ICL PHY DFLEX registers */
#define PORT_TX_DFLEXDPMLE1 _MMIO(0x1638C0)
#define DFLEXDPMLE1_DPMLETC_MASK(n) (0xf << (4 * (n)))
#define DFLEXDPMLE1_DPMLETC(n, x) ((x) << (4 * (n)))
/* BXT PHY Ref registers */ /* BXT PHY Ref registers */
#define _PORT_REF_DW3_A 0x16218C #define _PORT_REF_DW3_A 0x16218C
#define _PORT_REF_DW3_BC 0x6C18C #define _PORT_REF_DW3_BC 0x6C18C
...@@ -2306,8 +2311,9 @@ enum i915_power_well_id { ...@@ -2306,8 +2311,9 @@ enum i915_power_well_id {
#define GAMW_ECO_ENABLE_64K_IPS_FIELD 0xF #define GAMW_ECO_ENABLE_64K_IPS_FIELD 0xF
#define GAMT_CHKN_BIT_REG _MMIO(0x4ab8) #define GAMT_CHKN_BIT_REG _MMIO(0x4ab8)
#define GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING (1<<28) #define GAMT_CHKN_DISABLE_L3_COH_PIPE (1 << 31)
#define GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT (1<<24) #define GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING (1 << 28)
#define GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT (1 << 24)
#if 0 #if 0
#define PRB0_TAIL _MMIO(0x2030) #define PRB0_TAIL _MMIO(0x2030)
...@@ -2663,6 +2669,9 @@ enum i915_power_well_id { ...@@ -2663,6 +2669,9 @@ enum i915_power_well_id {
#define GEN8_4x4_STC_OPTIMIZATION_DISABLE (1<<6) #define GEN8_4x4_STC_OPTIMIZATION_DISABLE (1<<6)
#define GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE (1<<1) #define GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE (1<<1)
#define GEN10_CACHE_MODE_SS _MMIO(0xe420)
#define FLOAT_BLEND_OPTIMIZATION_ENABLE (1 << 4)
#define GEN6_BLITTER_ECOSKPD _MMIO(0x221d0) #define GEN6_BLITTER_ECOSKPD _MMIO(0x221d0)
#define GEN6_BLITTER_LOCK_SHIFT 16 #define GEN6_BLITTER_LOCK_SHIFT 16
#define GEN6_BLITTER_FBC_NOTIFY (1<<3) #define GEN6_BLITTER_FBC_NOTIFY (1<<3)
...@@ -2709,6 +2718,10 @@ enum i915_power_well_id { ...@@ -2709,6 +2718,10 @@ enum i915_power_well_id {
#define GEN10_F2_SS_DIS_SHIFT 18 #define GEN10_F2_SS_DIS_SHIFT 18
#define GEN10_F2_SS_DIS_MASK (0xf << GEN10_F2_SS_DIS_SHIFT) #define GEN10_F2_SS_DIS_MASK (0xf << GEN10_F2_SS_DIS_SHIFT)
#define GEN10_MIRROR_FUSE3 _MMIO(0x9118)
#define GEN10_L3BANK_PAIR_COUNT 4
#define GEN10_L3BANK_MASK 0x0F
#define GEN8_EU_DISABLE0 _MMIO(0x9134) #define GEN8_EU_DISABLE0 _MMIO(0x9134)
#define GEN8_EU_DIS0_S0_MASK 0xffffff #define GEN8_EU_DIS0_S0_MASK 0xffffff
#define GEN8_EU_DIS0_S1_SHIFT 24 #define GEN8_EU_DIS0_S1_SHIFT 24
...@@ -4088,10 +4101,10 @@ enum { ...@@ -4088,10 +4101,10 @@ enum {
#define EDP_Y_COORDINATE_ENABLE (1<<25) /* GLK and CNL+ */ #define EDP_Y_COORDINATE_ENABLE (1<<25) /* GLK and CNL+ */
#define EDP_MAX_SU_DISABLE_TIME(t) ((t)<<20) #define EDP_MAX_SU_DISABLE_TIME(t) ((t)<<20)
#define EDP_MAX_SU_DISABLE_TIME_MASK (0x1f<<20) #define EDP_MAX_SU_DISABLE_TIME_MASK (0x1f<<20)
#define EDP_PSR2_TP2_TIME_500 (0<<8) #define EDP_PSR2_TP2_TIME_500us (0<<8)
#define EDP_PSR2_TP2_TIME_100 (1<<8) #define EDP_PSR2_TP2_TIME_100us (1<<8)
#define EDP_PSR2_TP2_TIME_2500 (2<<8) #define EDP_PSR2_TP2_TIME_2500us (2<<8)
#define EDP_PSR2_TP2_TIME_50 (3<<8) #define EDP_PSR2_TP2_TIME_50us (3<<8)
#define EDP_PSR2_TP2_TIME_MASK (3<<8) #define EDP_PSR2_TP2_TIME_MASK (3<<8)
#define EDP_PSR2_FRAME_BEFORE_SU_SHIFT 4 #define EDP_PSR2_FRAME_BEFORE_SU_SHIFT 4
#define EDP_PSR2_FRAME_BEFORE_SU_MASK (0xf<<4) #define EDP_PSR2_FRAME_BEFORE_SU_MASK (0xf<<4)
...@@ -4133,11 +4146,12 @@ enum { ...@@ -4133,11 +4146,12 @@ enum {
#define ADPA_DAC_ENABLE (1<<31) #define ADPA_DAC_ENABLE (1<<31)
#define ADPA_DAC_DISABLE 0 #define ADPA_DAC_DISABLE 0
#define ADPA_PIPE_SELECT_MASK (1<<30) #define ADPA_PIPE_SEL_SHIFT 30
#define ADPA_PIPE_A_SELECT 0 #define ADPA_PIPE_SEL_MASK (1<<30)
#define ADPA_PIPE_B_SELECT (1<<30) #define ADPA_PIPE_SEL(pipe) ((pipe) << 30)
#define ADPA_PIPE_SELECT(pipe) ((pipe) << 30) #define ADPA_PIPE_SEL_SHIFT_CPT 29
/* CPT uses bits 29:30 for pch transcoder select */ #define ADPA_PIPE_SEL_MASK_CPT (3<<29)
#define ADPA_PIPE_SEL_CPT(pipe) ((pipe) << 29)
#define ADPA_CRT_HOTPLUG_MASK 0x03ff0000 /* bit 25-16 */ #define ADPA_CRT_HOTPLUG_MASK 0x03ff0000 /* bit 25-16 */
#define ADPA_CRT_HOTPLUG_MONITOR_NONE (0<<24) #define ADPA_CRT_HOTPLUG_MONITOR_NONE (0<<24)
#define ADPA_CRT_HOTPLUG_MONITOR_MASK (3<<24) #define ADPA_CRT_HOTPLUG_MONITOR_MASK (3<<24)
...@@ -4296,9 +4310,9 @@ enum { ...@@ -4296,9 +4310,9 @@ enum {
/* Gen 3 SDVO bits: */ /* Gen 3 SDVO bits: */
#define SDVO_ENABLE (1 << 31) #define SDVO_ENABLE (1 << 31)
#define SDVO_PIPE_SEL(pipe) ((pipe) << 30) #define SDVO_PIPE_SEL_SHIFT 30
#define SDVO_PIPE_SEL_MASK (1 << 30) #define SDVO_PIPE_SEL_MASK (1 << 30)
#define SDVO_PIPE_B_SELECT (1 << 30) #define SDVO_PIPE_SEL(pipe) ((pipe) << 30)
#define SDVO_STALL_SELECT (1 << 29) #define SDVO_STALL_SELECT (1 << 29)
#define SDVO_INTERRUPT_ENABLE (1 << 26) #define SDVO_INTERRUPT_ENABLE (1 << 26)
/* /*
...@@ -4338,12 +4352,14 @@ enum { ...@@ -4338,12 +4352,14 @@ enum {
#define SDVOB_HOTPLUG_ENABLE (1 << 23) /* SDVO only */ #define SDVOB_HOTPLUG_ENABLE (1 << 23) /* SDVO only */
/* Gen 6 (CPT) SDVO/HDMI bits: */ /* Gen 6 (CPT) SDVO/HDMI bits: */
#define SDVO_PIPE_SEL_CPT(pipe) ((pipe) << 29) #define SDVO_PIPE_SEL_SHIFT_CPT 29
#define SDVO_PIPE_SEL_MASK_CPT (3 << 29) #define SDVO_PIPE_SEL_MASK_CPT (3 << 29)
#define SDVO_PIPE_SEL_CPT(pipe) ((pipe) << 29)
/* CHV SDVO/HDMI bits: */ /* CHV SDVO/HDMI bits: */
#define SDVO_PIPE_SEL_CHV(pipe) ((pipe) << 24) #define SDVO_PIPE_SEL_SHIFT_CHV 24
#define SDVO_PIPE_SEL_MASK_CHV (3 << 24) #define SDVO_PIPE_SEL_MASK_CHV (3 << 24)
#define SDVO_PIPE_SEL_CHV(pipe) ((pipe) << 24)
/* DVO port control */ /* DVO port control */
...@@ -4354,7 +4370,9 @@ enum { ...@@ -4354,7 +4370,9 @@ enum {
#define _DVOC 0x61160 #define _DVOC 0x61160
#define DVOC _MMIO(_DVOC) #define DVOC _MMIO(_DVOC)
#define DVO_ENABLE (1 << 31) #define DVO_ENABLE (1 << 31)
#define DVO_PIPE_B_SELECT (1 << 30) #define DVO_PIPE_SEL_SHIFT 30
#define DVO_PIPE_SEL_MASK (1 << 30)
#define DVO_PIPE_SEL(pipe) ((pipe) << 30)
#define DVO_PIPE_STALL_UNUSED (0 << 28) #define DVO_PIPE_STALL_UNUSED (0 << 28)
#define DVO_PIPE_STALL (1 << 28) #define DVO_PIPE_STALL (1 << 28)
#define DVO_PIPE_STALL_TV (2 << 28) #define DVO_PIPE_STALL_TV (2 << 28)
...@@ -4391,9 +4409,12 @@ enum { ...@@ -4391,9 +4409,12 @@ enum {
*/ */
#define LVDS_PORT_EN (1 << 31) #define LVDS_PORT_EN (1 << 31)
/* Selects pipe B for LVDS data. Must be set on pre-965. */ /* Selects pipe B for LVDS data. Must be set on pre-965. */
#define LVDS_PIPEB_SELECT (1 << 30) #define LVDS_PIPE_SEL_SHIFT 30
#define LVDS_PIPE_MASK (1 << 30) #define LVDS_PIPE_SEL_MASK (1 << 30)
#define LVDS_PIPE(pipe) ((pipe) << 30) #define LVDS_PIPE_SEL(pipe) ((pipe) << 30)
#define LVDS_PIPE_SEL_SHIFT_CPT 29
#define LVDS_PIPE_SEL_MASK_CPT (3 << 29)
#define LVDS_PIPE_SEL_CPT(pipe) ((pipe) << 29)
/* LVDS dithering flag on 965/g4x platform */ /* LVDS dithering flag on 965/g4x platform */
#define LVDS_ENABLE_DITHER (1 << 25) #define LVDS_ENABLE_DITHER (1 << 25)
/* LVDS sync polarity flags. Set to invert (i.e. negative) */ /* LVDS sync polarity flags. Set to invert (i.e. negative) */
...@@ -4690,7 +4711,9 @@ enum { ...@@ -4690,7 +4711,9 @@ enum {
/* Enables the TV encoder */ /* Enables the TV encoder */
# define TV_ENC_ENABLE (1 << 31) # define TV_ENC_ENABLE (1 << 31)
/* Sources the TV encoder input from pipe B instead of A. */ /* Sources the TV encoder input from pipe B instead of A. */
# define TV_ENC_PIPEB_SELECT (1 << 30) # define TV_ENC_PIPE_SEL_SHIFT 30
# define TV_ENC_PIPE_SEL_MASK (1 << 30)
# define TV_ENC_PIPE_SEL(pipe) ((pipe) << 30)
/* Outputs composite video (DAC A only) */ /* Outputs composite video (DAC A only) */
# define TV_ENC_OUTPUT_COMPOSITE (0 << 28) # define TV_ENC_OUTPUT_COMPOSITE (0 << 28)
/* Outputs SVideo video (DAC B/C) */ /* Outputs SVideo video (DAC B/C) */
...@@ -5172,10 +5195,15 @@ enum { ...@@ -5172,10 +5195,15 @@ enum {
#define CHV_DP_D _MMIO(VLV_DISPLAY_BASE + 0x64300) #define CHV_DP_D _MMIO(VLV_DISPLAY_BASE + 0x64300)
#define DP_PORT_EN (1 << 31) #define DP_PORT_EN (1 << 31)
#define DP_PIPEB_SELECT (1 << 30) #define DP_PIPE_SEL_SHIFT 30
#define DP_PIPE_MASK (1 << 30) #define DP_PIPE_SEL_MASK (1 << 30)
#define DP_PIPE_SELECT_CHV(pipe) ((pipe) << 16) #define DP_PIPE_SEL(pipe) ((pipe) << 30)
#define DP_PIPE_MASK_CHV (3 << 16) #define DP_PIPE_SEL_SHIFT_IVB 29
#define DP_PIPE_SEL_MASK_IVB (3 << 29)
#define DP_PIPE_SEL_IVB(pipe) ((pipe) << 29)
#define DP_PIPE_SEL_SHIFT_CHV 16
#define DP_PIPE_SEL_MASK_CHV (3 << 16)
#define DP_PIPE_SEL_CHV(pipe) ((pipe) << 16)
/* Link training mode - select a suitable mode for each stage */ /* Link training mode - select a suitable mode for each stage */
#define DP_LINK_TRAIN_PAT_1 (0 << 28) #define DP_LINK_TRAIN_PAT_1 (0 << 28)
...@@ -5896,7 +5924,6 @@ enum { ...@@ -5896,7 +5924,6 @@ enum {
#define CURSOR_GAMMA_ENABLE 0x40000000 #define CURSOR_GAMMA_ENABLE 0x40000000
#define CURSOR_STRIDE_SHIFT 28 #define CURSOR_STRIDE_SHIFT 28
#define CURSOR_STRIDE(x) ((ffs(x)-9) << CURSOR_STRIDE_SHIFT) /* 256,512,1k,2k */ #define CURSOR_STRIDE(x) ((ffs(x)-9) << CURSOR_STRIDE_SHIFT) /* 256,512,1k,2k */
#define CURSOR_PIPE_CSC_ENABLE (1<<24)
#define CURSOR_FORMAT_SHIFT 24 #define CURSOR_FORMAT_SHIFT 24
#define CURSOR_FORMAT_MASK (0x07 << CURSOR_FORMAT_SHIFT) #define CURSOR_FORMAT_MASK (0x07 << CURSOR_FORMAT_SHIFT)
#define CURSOR_FORMAT_2C (0x00 << CURSOR_FORMAT_SHIFT) #define CURSOR_FORMAT_2C (0x00 << CURSOR_FORMAT_SHIFT)
...@@ -5905,18 +5932,21 @@ enum { ...@@ -5905,18 +5932,21 @@ enum {
#define CURSOR_FORMAT_ARGB (0x04 << CURSOR_FORMAT_SHIFT) #define CURSOR_FORMAT_ARGB (0x04 << CURSOR_FORMAT_SHIFT)
#define CURSOR_FORMAT_XRGB (0x05 << CURSOR_FORMAT_SHIFT) #define CURSOR_FORMAT_XRGB (0x05 << CURSOR_FORMAT_SHIFT)
/* New style CUR*CNTR flags */ /* New style CUR*CNTR flags */
#define CURSOR_MODE 0x27 #define MCURSOR_MODE 0x27
#define CURSOR_MODE_DISABLE 0x00 #define MCURSOR_MODE_DISABLE 0x00
#define CURSOR_MODE_128_32B_AX 0x02 #define MCURSOR_MODE_128_32B_AX 0x02
#define CURSOR_MODE_256_32B_AX 0x03 #define MCURSOR_MODE_256_32B_AX 0x03
#define CURSOR_MODE_64_32B_AX 0x07 #define MCURSOR_MODE_64_32B_AX 0x07
#define CURSOR_MODE_128_ARGB_AX ((1 << 5) | CURSOR_MODE_128_32B_AX) #define MCURSOR_MODE_128_ARGB_AX ((1 << 5) | MCURSOR_MODE_128_32B_AX)
#define CURSOR_MODE_256_ARGB_AX ((1 << 5) | CURSOR_MODE_256_32B_AX) #define MCURSOR_MODE_256_ARGB_AX ((1 << 5) | MCURSOR_MODE_256_32B_AX)
#define CURSOR_MODE_64_ARGB_AX ((1 << 5) | CURSOR_MODE_64_32B_AX) #define MCURSOR_MODE_64_ARGB_AX ((1 << 5) | MCURSOR_MODE_64_32B_AX)
#define MCURSOR_PIPE_SELECT_MASK (0x3 << 28)
#define MCURSOR_PIPE_SELECT_SHIFT 28
#define MCURSOR_PIPE_SELECT(pipe) ((pipe) << 28) #define MCURSOR_PIPE_SELECT(pipe) ((pipe) << 28)
#define MCURSOR_GAMMA_ENABLE (1 << 26) #define MCURSOR_GAMMA_ENABLE (1 << 26)
#define CURSOR_ROTATE_180 (1<<15) #define MCURSOR_PIPE_CSC_ENABLE (1<<24)
#define CURSOR_TRICKLE_FEED_DISABLE (1 << 14) #define MCURSOR_ROTATE_180 (1<<15)
#define MCURSOR_TRICKLE_FEED_DISABLE (1 << 14)
#define _CURABASE 0x70084 #define _CURABASE 0x70084
#define _CURAPOS 0x70088 #define _CURAPOS 0x70088
#define CURSOR_POS_MASK 0x007FF #define CURSOR_POS_MASK 0x007FF
...@@ -6764,6 +6794,10 @@ enum { ...@@ -6764,6 +6794,10 @@ enum {
#define _PS_VPHASE_1B 0x68988 #define _PS_VPHASE_1B 0x68988
#define _PS_VPHASE_2B 0x68A88 #define _PS_VPHASE_2B 0x68A88
#define _PS_VPHASE_1C 0x69188 #define _PS_VPHASE_1C 0x69188
#define PS_Y_PHASE(x) ((x) << 16)
#define PS_UV_RGB_PHASE(x) ((x) << 0)
#define PS_PHASE_MASK (0x7fff << 1) /* u2.13 */
#define PS_PHASE_TRIP (1 << 0)
#define _PS_HPHASE_1A 0x68194 #define _PS_HPHASE_1A 0x68194
#define _PS_HPHASE_2A 0x68294 #define _PS_HPHASE_2A 0x68294
...@@ -7192,13 +7226,17 @@ enum { ...@@ -7192,13 +7226,17 @@ enum {
/* GEN7 chicken */ /* GEN7 chicken */
#define GEN7_COMMON_SLICE_CHICKEN1 _MMIO(0x7010) #define GEN7_COMMON_SLICE_CHICKEN1 _MMIO(0x7010)
# define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC ((1<<10) | (1<<26)) #define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC ((1 << 10) | (1 << 26))
# define GEN9_RHWO_OPTIMIZATION_DISABLE (1<<14) #define GEN9_RHWO_OPTIMIZATION_DISABLE (1 << 14)
#define COMMON_SLICE_CHICKEN2 _MMIO(0x7014)
# define GEN9_PBE_COMPRESSED_HASH_SELECTION (1<<13) #define COMMON_SLICE_CHICKEN2 _MMIO(0x7014)
# define GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE (1<<12) #define GEN9_PBE_COMPRESSED_HASH_SELECTION (1 << 13)
# define GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION (1<<8) #define GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE (1 << 12)
# define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1<<0) #define GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION (1 << 8)
#define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1 << 0)
#define GEN11_COMMON_SLICE_CHICKEN3 _MMIO(0x7304)
#define GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC (1 << 11)
#define HIZ_CHICKEN _MMIO(0x7018) #define HIZ_CHICKEN _MMIO(0x7018)
# define CHV_HZ_8X8_MODE_IN_1X (1<<15) # define CHV_HZ_8X8_MODE_IN_1X (1<<15)
...@@ -7208,6 +7246,7 @@ enum { ...@@ -7208,6 +7246,7 @@ enum {
#define DISABLE_PIXEL_MASK_CAMMING (1<<14) #define DISABLE_PIXEL_MASK_CAMMING (1<<14)
#define GEN9_SLICE_COMMON_ECO_CHICKEN1 _MMIO(0x731c) #define GEN9_SLICE_COMMON_ECO_CHICKEN1 _MMIO(0x731c)
#define GEN11_STATE_CACHE_REDIRECT_TO_CS (1 << 11)
#define GEN7_L3SQCREG1 _MMIO(0xB010) #define GEN7_L3SQCREG1 _MMIO(0xB010)
#define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000 #define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000
...@@ -7862,27 +7901,14 @@ enum { ...@@ -7862,27 +7901,14 @@ enum {
#define PCH_DP_AUX_CH_DATA(aux_ch, i) _MMIO(_PORT((aux_ch) - AUX_CH_B, _PCH_DPB_AUX_CH_DATA1, _PCH_DPC_AUX_CH_DATA1) + (i) * 4) /* 5 registers */ #define PCH_DP_AUX_CH_DATA(aux_ch, i) _MMIO(_PORT((aux_ch) - AUX_CH_B, _PCH_DPB_AUX_CH_DATA1, _PCH_DPC_AUX_CH_DATA1) + (i) * 4) /* 5 registers */
/* CPT */ /* CPT */
#define PORT_TRANS_A_SEL_CPT 0
#define PORT_TRANS_B_SEL_CPT (1<<29)
#define PORT_TRANS_C_SEL_CPT (2<<29)
#define PORT_TRANS_SEL_MASK (3<<29)
#define PORT_TRANS_SEL_CPT(pipe) ((pipe) << 29)
#define PORT_TO_PIPE(val) (((val) & (1<<30)) >> 30)
#define PORT_TO_PIPE_CPT(val) (((val) & PORT_TRANS_SEL_MASK) >> 29)
#define SDVO_PORT_TO_PIPE_CHV(val) (((val) & (3<<24)) >> 24)
#define DP_PORT_TO_PIPE_CHV(val) (((val) & (3<<16)) >> 16)
#define _TRANS_DP_CTL_A 0xe0300 #define _TRANS_DP_CTL_A 0xe0300
#define _TRANS_DP_CTL_B 0xe1300 #define _TRANS_DP_CTL_B 0xe1300
#define _TRANS_DP_CTL_C 0xe2300 #define _TRANS_DP_CTL_C 0xe2300
#define TRANS_DP_CTL(pipe) _MMIO_PIPE(pipe, _TRANS_DP_CTL_A, _TRANS_DP_CTL_B) #define TRANS_DP_CTL(pipe) _MMIO_PIPE(pipe, _TRANS_DP_CTL_A, _TRANS_DP_CTL_B)
#define TRANS_DP_OUTPUT_ENABLE (1<<31) #define TRANS_DP_OUTPUT_ENABLE (1<<31)
#define TRANS_DP_PORT_SEL_B (0<<29) #define TRANS_DP_PORT_SEL_MASK (3 << 29)
#define TRANS_DP_PORT_SEL_C (1<<29) #define TRANS_DP_PORT_SEL_NONE (3 << 29)
#define TRANS_DP_PORT_SEL_D (2<<29) #define TRANS_DP_PORT_SEL(port) (((port) - PORT_B) << 29)
#define TRANS_DP_PORT_SEL_NONE (3<<29)
#define TRANS_DP_PORT_SEL_MASK (3<<29)
#define TRANS_DP_PIPE_TO_PORT(val) ((((val) & TRANS_DP_PORT_SEL_MASK) >> 29) + PORT_B)
#define TRANS_DP_AUDIO_ONLY (1<<26) #define TRANS_DP_AUDIO_ONLY (1<<26)
#define TRANS_DP_ENH_FRAMING (1<<18) #define TRANS_DP_ENH_FRAMING (1<<18)
#define TRANS_DP_8BPC (0<<9) #define TRANS_DP_8BPC (0<<9)
...@@ -8322,8 +8348,9 @@ enum { ...@@ -8322,8 +8348,9 @@ enum {
#define GEN7_ROW_CHICKEN2 _MMIO(0xe4f4) #define GEN7_ROW_CHICKEN2 _MMIO(0xe4f4)
#define GEN7_ROW_CHICKEN2_GT2 _MMIO(0xf4f4) #define GEN7_ROW_CHICKEN2_GT2 _MMIO(0xf4f4)
#define DOP_CLOCK_GATING_DISABLE (1<<0) #define DOP_CLOCK_GATING_DISABLE (1 << 0)
#define PUSH_CONSTANT_DEREF_DISABLE (1<<8) #define PUSH_CONSTANT_DEREF_DISABLE (1 << 8)
#define GEN11_TDL_CLOCK_GATING_FIX_DISABLE (1 << 1)
#define HSW_ROW_CHICKEN3 _MMIO(0xe49c) #define HSW_ROW_CHICKEN3 _MMIO(0xe49c)
#define HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE (1 << 6) #define HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE (1 << 6)
...@@ -9100,13 +9127,16 @@ enum skl_power_gate { ...@@ -9100,13 +9127,16 @@ enum skl_power_gate {
#define DPLL_CFGCR1_QDIV_RATIO_MASK (0xff << 10) #define DPLL_CFGCR1_QDIV_RATIO_MASK (0xff << 10)
#define DPLL_CFGCR1_QDIV_RATIO_SHIFT (10) #define DPLL_CFGCR1_QDIV_RATIO_SHIFT (10)
#define DPLL_CFGCR1_QDIV_RATIO(x) ((x) << 10) #define DPLL_CFGCR1_QDIV_RATIO(x) ((x) << 10)
#define DPLL_CFGCR1_QDIV_MODE_SHIFT (9)
#define DPLL_CFGCR1_QDIV_MODE(x) ((x) << 9) #define DPLL_CFGCR1_QDIV_MODE(x) ((x) << 9)
#define DPLL_CFGCR1_KDIV_MASK (7 << 6) #define DPLL_CFGCR1_KDIV_MASK (7 << 6)
#define DPLL_CFGCR1_KDIV_SHIFT (6)
#define DPLL_CFGCR1_KDIV(x) ((x) << 6) #define DPLL_CFGCR1_KDIV(x) ((x) << 6)
#define DPLL_CFGCR1_KDIV_1 (1 << 6) #define DPLL_CFGCR1_KDIV_1 (1 << 6)
#define DPLL_CFGCR1_KDIV_2 (2 << 6) #define DPLL_CFGCR1_KDIV_2 (2 << 6)
#define DPLL_CFGCR1_KDIV_4 (4 << 6) #define DPLL_CFGCR1_KDIV_4 (4 << 6)
#define DPLL_CFGCR1_PDIV_MASK (0xf << 2) #define DPLL_CFGCR1_PDIV_MASK (0xf << 2)
#define DPLL_CFGCR1_PDIV_SHIFT (2)
#define DPLL_CFGCR1_PDIV(x) ((x) << 2) #define DPLL_CFGCR1_PDIV(x) ((x) << 2)
#define DPLL_CFGCR1_PDIV_2 (1 << 2) #define DPLL_CFGCR1_PDIV_2 (1 << 2)
#define DPLL_CFGCR1_PDIV_3 (2 << 2) #define DPLL_CFGCR1_PDIV_3 (2 << 2)
......
...@@ -320,6 +320,7 @@ static void advance_ring(struct i915_request *request) ...@@ -320,6 +320,7 @@ static void advance_ring(struct i915_request *request)
* is just about to be. Either works, if we miss the last two * is just about to be. Either works, if we miss the last two
* noops - they are safe to be replayed on a reset. * noops - they are safe to be replayed on a reset.
*/ */
GEM_TRACE("marking %s as inactive\n", ring->timeline->name);
tail = READ_ONCE(request->tail); tail = READ_ONCE(request->tail);
list_del(&ring->active_link); list_del(&ring->active_link);
} else { } else {
...@@ -383,8 +384,8 @@ static void __retire_engine_request(struct intel_engine_cs *engine, ...@@ -383,8 +384,8 @@ static void __retire_engine_request(struct intel_engine_cs *engine,
* the subsequent request. * the subsequent request.
*/ */
if (engine->last_retired_context) if (engine->last_retired_context)
intel_context_unpin(engine->last_retired_context, engine); intel_context_unpin(engine->last_retired_context);
engine->last_retired_context = rq->ctx; engine->last_retired_context = rq->hw_context;
} }
static void __retire_engine_upto(struct intel_engine_cs *engine, static void __retire_engine_upto(struct intel_engine_cs *engine,
...@@ -455,8 +456,8 @@ static void i915_request_retire(struct i915_request *request) ...@@ -455,8 +456,8 @@ static void i915_request_retire(struct i915_request *request)
i915_request_remove_from_client(request); i915_request_remove_from_client(request);
/* Retirement decays the ban score as it is a sign of ctx progress */ /* Retirement decays the ban score as it is a sign of ctx progress */
atomic_dec_if_positive(&request->ctx->ban_score); atomic_dec_if_positive(&request->gem_context->ban_score);
intel_context_unpin(request->ctx, request->engine); intel_context_unpin(request->hw_context);
__retire_engine_upto(request->engine, request); __retire_engine_upto(request->engine, request);
...@@ -657,7 +658,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx) ...@@ -657,7 +658,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
{ {
struct drm_i915_private *i915 = engine->i915; struct drm_i915_private *i915 = engine->i915;
struct i915_request *rq; struct i915_request *rq;
struct intel_ring *ring; struct intel_context *ce;
int ret; int ret;
lockdep_assert_held(&i915->drm.struct_mutex); lockdep_assert_held(&i915->drm.struct_mutex);
...@@ -681,22 +682,21 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx) ...@@ -681,22 +682,21 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
* GGTT space, so do this first before we reserve a seqno for * GGTT space, so do this first before we reserve a seqno for
* ourselves. * ourselves.
*/ */
ring = intel_context_pin(ctx, engine); ce = intel_context_pin(ctx, engine);
if (IS_ERR(ring)) if (IS_ERR(ce))
return ERR_CAST(ring); return ERR_CAST(ce);
GEM_BUG_ON(!ring);
ret = reserve_gt(i915); ret = reserve_gt(i915);
if (ret) if (ret)
goto err_unpin; goto err_unpin;
ret = intel_ring_wait_for_space(ring, MIN_SPACE_FOR_ADD_REQUEST); ret = intel_ring_wait_for_space(ce->ring, MIN_SPACE_FOR_ADD_REQUEST);
if (ret) if (ret)
goto err_unreserve; goto err_unreserve;
/* Move our oldest request to the slab-cache (if not in use!) */ /* Move our oldest request to the slab-cache (if not in use!) */
rq = list_first_entry(&ring->request_list, typeof(*rq), ring_link); rq = list_first_entry(&ce->ring->request_list, typeof(*rq), ring_link);
if (!list_is_last(&rq->ring_link, &ring->request_list) && if (!list_is_last(&rq->ring_link, &ce->ring->request_list) &&
i915_request_completed(rq)) i915_request_completed(rq))
i915_request_retire(rq); i915_request_retire(rq);
...@@ -760,9 +760,10 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx) ...@@ -760,9 +760,10 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
INIT_LIST_HEAD(&rq->active_list); INIT_LIST_HEAD(&rq->active_list);
rq->i915 = i915; rq->i915 = i915;
rq->engine = engine; rq->engine = engine;
rq->ctx = ctx; rq->gem_context = ctx;
rq->ring = ring; rq->hw_context = ce;
rq->timeline = ring->timeline; rq->ring = ce->ring;
rq->timeline = ce->ring->timeline;
GEM_BUG_ON(rq->timeline == &engine->timeline); GEM_BUG_ON(rq->timeline == &engine->timeline);
spin_lock_init(&rq->lock); spin_lock_init(&rq->lock);
...@@ -814,14 +815,14 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx) ...@@ -814,14 +815,14 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
goto err_unwind; goto err_unwind;
/* Keep a second pin for the dual retirement along engine and ring */ /* Keep a second pin for the dual retirement along engine and ring */
__intel_context_pin(rq->ctx, engine); __intel_context_pin(ce);
/* Check that we didn't interrupt ourselves with a new request */ /* Check that we didn't interrupt ourselves with a new request */
GEM_BUG_ON(rq->timeline->seqno != rq->fence.seqno); GEM_BUG_ON(rq->timeline->seqno != rq->fence.seqno);
return rq; return rq;
err_unwind: err_unwind:
rq->ring->emit = rq->head; ce->ring->emit = rq->head;
/* Make sure we didn't add ourselves to external state before freeing */ /* Make sure we didn't add ourselves to external state before freeing */
GEM_BUG_ON(!list_empty(&rq->active_list)); GEM_BUG_ON(!list_empty(&rq->active_list));
...@@ -832,7 +833,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx) ...@@ -832,7 +833,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
err_unreserve: err_unreserve:
unreserve_gt(i915); unreserve_gt(i915);
err_unpin: err_unpin:
intel_context_unpin(ctx, engine); intel_context_unpin(ce);
return ERR_PTR(ret); return ERR_PTR(ret);
} }
...@@ -1018,8 +1019,8 @@ i915_request_await_object(struct i915_request *to, ...@@ -1018,8 +1019,8 @@ i915_request_await_object(struct i915_request *to,
void __i915_request_add(struct i915_request *request, bool flush_caches) void __i915_request_add(struct i915_request *request, bool flush_caches)
{ {
struct intel_engine_cs *engine = request->engine; struct intel_engine_cs *engine = request->engine;
struct intel_ring *ring = request->ring;
struct i915_timeline *timeline = request->timeline; struct i915_timeline *timeline = request->timeline;
struct intel_ring *ring = request->ring;
struct i915_request *prev; struct i915_request *prev;
u32 *cs; u32 *cs;
int err; int err;
...@@ -1095,8 +1096,10 @@ void __i915_request_add(struct i915_request *request, bool flush_caches) ...@@ -1095,8 +1096,10 @@ void __i915_request_add(struct i915_request *request, bool flush_caches)
i915_gem_active_set(&timeline->last_request, request); i915_gem_active_set(&timeline->last_request, request);
list_add_tail(&request->ring_link, &ring->request_list); list_add_tail(&request->ring_link, &ring->request_list);
if (list_is_first(&request->ring_link, &ring->request_list)) if (list_is_first(&request->ring_link, &ring->request_list)) {
GEM_TRACE("marking %s as active\n", ring->timeline->name);
list_add(&ring->active_link, &request->i915->gt.active_rings); list_add(&ring->active_link, &request->i915->gt.active_rings);
}
request->emitted_jiffies = jiffies; request->emitted_jiffies = jiffies;
/* /*
...@@ -1113,7 +1116,7 @@ void __i915_request_add(struct i915_request *request, bool flush_caches) ...@@ -1113,7 +1116,7 @@ void __i915_request_add(struct i915_request *request, bool flush_caches)
local_bh_disable(); local_bh_disable();
rcu_read_lock(); /* RCU serialisation for set-wedged protection */ rcu_read_lock(); /* RCU serialisation for set-wedged protection */
if (engine->schedule) if (engine->schedule)
engine->schedule(request, &request->ctx->sched); engine->schedule(request, &request->gem_context->sched);
rcu_read_unlock(); rcu_read_unlock();
i915_sw_fence_commit(&request->submit); i915_sw_fence_commit(&request->submit);
local_bh_enable(); /* Kick the execlists tasklet if just scheduled */ local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
......
...@@ -93,8 +93,9 @@ struct i915_request { ...@@ -93,8 +93,9 @@ struct i915_request {
* i915_request_free() will then decrement the refcount on the * i915_request_free() will then decrement the refcount on the
* context. * context.
*/ */
struct i915_gem_context *ctx; struct i915_gem_context *gem_context;
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
struct intel_context *hw_context;
struct intel_ring *ring; struct intel_ring *ring;
struct i915_timeline *timeline; struct i915_timeline *timeline;
struct intel_signal_node signaling; struct intel_signal_node signaling;
...@@ -266,6 +267,7 @@ long i915_request_wait(struct i915_request *rq, ...@@ -266,6 +267,7 @@ long i915_request_wait(struct i915_request *rq,
#define I915_WAIT_INTERRUPTIBLE BIT(0) #define I915_WAIT_INTERRUPTIBLE BIT(0)
#define I915_WAIT_LOCKED BIT(1) /* struct_mutex held, handle GPU reset */ #define I915_WAIT_LOCKED BIT(1) /* struct_mutex held, handle GPU reset */
#define I915_WAIT_ALL BIT(2) /* used by i915_gem_object_wait() */ #define I915_WAIT_ALL BIT(2) /* used by i915_gem_object_wait() */
#define I915_WAIT_FOR_IDLE_BOOST BIT(3)
static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine); static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine);
......
...@@ -591,21 +591,26 @@ TRACE_EVENT(i915_gem_ring_sync_to, ...@@ -591,21 +591,26 @@ TRACE_EVENT(i915_gem_ring_sync_to,
TP_STRUCT__entry( TP_STRUCT__entry(
__field(u32, dev) __field(u32, dev)
__field(u32, sync_from) __field(u32, from_class)
__field(u32, sync_to) __field(u32, from_instance)
__field(u32, to_class)
__field(u32, to_instance)
__field(u32, seqno) __field(u32, seqno)
), ),
TP_fast_assign( TP_fast_assign(
__entry->dev = from->i915->drm.primary->index; __entry->dev = from->i915->drm.primary->index;
__entry->sync_from = from->engine->id; __entry->from_class = from->engine->uabi_class;
__entry->sync_to = to->engine->id; __entry->from_instance = from->engine->instance;
__entry->to_class = to->engine->uabi_class;
__entry->to_instance = to->engine->instance;
__entry->seqno = from->global_seqno; __entry->seqno = from->global_seqno;
), ),
TP_printk("dev=%u, sync-from=%u, sync-to=%u, seqno=%u", TP_printk("dev=%u, sync-from=%u:%u, sync-to=%u:%u, seqno=%u",
__entry->dev, __entry->dev,
__entry->sync_from, __entry->sync_to, __entry->from_class, __entry->from_instance,
__entry->to_class, __entry->to_instance,
__entry->seqno) __entry->seqno)
); );
...@@ -616,24 +621,27 @@ TRACE_EVENT(i915_request_queue, ...@@ -616,24 +621,27 @@ TRACE_EVENT(i915_request_queue,
TP_STRUCT__entry( TP_STRUCT__entry(
__field(u32, dev) __field(u32, dev)
__field(u32, hw_id) __field(u32, hw_id)
__field(u32, ring) __field(u64, ctx)
__field(u32, ctx) __field(u16, class)
__field(u16, instance)
__field(u32, seqno) __field(u32, seqno)
__field(u32, flags) __field(u32, flags)
), ),
TP_fast_assign( TP_fast_assign(
__entry->dev = rq->i915->drm.primary->index; __entry->dev = rq->i915->drm.primary->index;
__entry->hw_id = rq->ctx->hw_id; __entry->hw_id = rq->gem_context->hw_id;
__entry->ring = rq->engine->id; __entry->class = rq->engine->uabi_class;
__entry->instance = rq->engine->instance;
__entry->ctx = rq->fence.context; __entry->ctx = rq->fence.context;
__entry->seqno = rq->fence.seqno; __entry->seqno = rq->fence.seqno;
__entry->flags = flags; __entry->flags = flags;
), ),
TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u, flags=0x%x", TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, flags=0x%x",
__entry->dev, __entry->hw_id, __entry->ring, __entry->ctx, __entry->dev, __entry->class, __entry->instance,
__entry->seqno, __entry->flags) __entry->hw_id, __entry->ctx, __entry->seqno,
__entry->flags)
); );
DECLARE_EVENT_CLASS(i915_request, DECLARE_EVENT_CLASS(i915_request,
...@@ -643,24 +651,27 @@ DECLARE_EVENT_CLASS(i915_request, ...@@ -643,24 +651,27 @@ DECLARE_EVENT_CLASS(i915_request,
TP_STRUCT__entry( TP_STRUCT__entry(
__field(u32, dev) __field(u32, dev)
__field(u32, hw_id) __field(u32, hw_id)
__field(u32, ring) __field(u64, ctx)
__field(u32, ctx) __field(u16, class)
__field(u16, instance)
__field(u32, seqno) __field(u32, seqno)
__field(u32, global) __field(u32, global)
), ),
TP_fast_assign( TP_fast_assign(
__entry->dev = rq->i915->drm.primary->index; __entry->dev = rq->i915->drm.primary->index;
__entry->hw_id = rq->ctx->hw_id; __entry->hw_id = rq->gem_context->hw_id;
__entry->ring = rq->engine->id; __entry->class = rq->engine->uabi_class;
__entry->instance = rq->engine->instance;
__entry->ctx = rq->fence.context; __entry->ctx = rq->fence.context;
__entry->seqno = rq->fence.seqno; __entry->seqno = rq->fence.seqno;
__entry->global = rq->global_seqno; __entry->global = rq->global_seqno;
), ),
TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u, global=%u", TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, global=%u",
__entry->dev, __entry->hw_id, __entry->ring, __entry->ctx, __entry->dev, __entry->class, __entry->instance,
__entry->seqno, __entry->global) __entry->hw_id, __entry->ctx, __entry->seqno,
__entry->global)
); );
DEFINE_EVENT(i915_request, i915_request_add, DEFINE_EVENT(i915_request, i915_request_add,
...@@ -686,8 +697,9 @@ TRACE_EVENT(i915_request_in, ...@@ -686,8 +697,9 @@ TRACE_EVENT(i915_request_in,
TP_STRUCT__entry( TP_STRUCT__entry(
__field(u32, dev) __field(u32, dev)
__field(u32, hw_id) __field(u32, hw_id)
__field(u32, ring) __field(u64, ctx)
__field(u32, ctx) __field(u16, class)
__field(u16, instance)
__field(u32, seqno) __field(u32, seqno)
__field(u32, global_seqno) __field(u32, global_seqno)
__field(u32, port) __field(u32, port)
...@@ -696,8 +708,9 @@ TRACE_EVENT(i915_request_in, ...@@ -696,8 +708,9 @@ TRACE_EVENT(i915_request_in,
TP_fast_assign( TP_fast_assign(
__entry->dev = rq->i915->drm.primary->index; __entry->dev = rq->i915->drm.primary->index;
__entry->hw_id = rq->ctx->hw_id; __entry->hw_id = rq->gem_context->hw_id;
__entry->ring = rq->engine->id; __entry->class = rq->engine->uabi_class;
__entry->instance = rq->engine->instance;
__entry->ctx = rq->fence.context; __entry->ctx = rq->fence.context;
__entry->seqno = rq->fence.seqno; __entry->seqno = rq->fence.seqno;
__entry->global_seqno = rq->global_seqno; __entry->global_seqno = rq->global_seqno;
...@@ -705,10 +718,10 @@ TRACE_EVENT(i915_request_in, ...@@ -705,10 +718,10 @@ TRACE_EVENT(i915_request_in,
__entry->port = port; __entry->port = port;
), ),
TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u, prio=%u, global=%u, port=%u", TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, prio=%u, global=%u, port=%u",
__entry->dev, __entry->hw_id, __entry->ring, __entry->ctx, __entry->dev, __entry->class, __entry->instance,
__entry->seqno, __entry->prio, __entry->global_seqno, __entry->hw_id, __entry->ctx, __entry->seqno,
__entry->port) __entry->prio, __entry->global_seqno, __entry->port)
); );
TRACE_EVENT(i915_request_out, TRACE_EVENT(i915_request_out,
...@@ -718,8 +731,9 @@ TRACE_EVENT(i915_request_out, ...@@ -718,8 +731,9 @@ TRACE_EVENT(i915_request_out,
TP_STRUCT__entry( TP_STRUCT__entry(
__field(u32, dev) __field(u32, dev)
__field(u32, hw_id) __field(u32, hw_id)
__field(u32, ring) __field(u64, ctx)
__field(u32, ctx) __field(u16, class)
__field(u16, instance)
__field(u32, seqno) __field(u32, seqno)
__field(u32, global_seqno) __field(u32, global_seqno)
__field(u32, completed) __field(u32, completed)
...@@ -727,17 +741,18 @@ TRACE_EVENT(i915_request_out, ...@@ -727,17 +741,18 @@ TRACE_EVENT(i915_request_out,
TP_fast_assign( TP_fast_assign(
__entry->dev = rq->i915->drm.primary->index; __entry->dev = rq->i915->drm.primary->index;
__entry->hw_id = rq->ctx->hw_id; __entry->hw_id = rq->gem_context->hw_id;
__entry->ring = rq->engine->id; __entry->class = rq->engine->uabi_class;
__entry->instance = rq->engine->instance;
__entry->ctx = rq->fence.context; __entry->ctx = rq->fence.context;
__entry->seqno = rq->fence.seqno; __entry->seqno = rq->fence.seqno;
__entry->global_seqno = rq->global_seqno; __entry->global_seqno = rq->global_seqno;
__entry->completed = i915_request_completed(rq); __entry->completed = i915_request_completed(rq);
), ),
TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u, global=%u, completed?=%u", TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, global=%u, completed?=%u",
__entry->dev, __entry->hw_id, __entry->ring, __entry->dev, __entry->class, __entry->instance,
__entry->ctx, __entry->seqno, __entry->hw_id, __entry->ctx, __entry->seqno,
__entry->global_seqno, __entry->completed) __entry->global_seqno, __entry->completed)
); );
...@@ -771,21 +786,23 @@ TRACE_EVENT(intel_engine_notify, ...@@ -771,21 +786,23 @@ TRACE_EVENT(intel_engine_notify,
TP_STRUCT__entry( TP_STRUCT__entry(
__field(u32, dev) __field(u32, dev)
__field(u32, ring) __field(u16, class)
__field(u16, instance)
__field(u32, seqno) __field(u32, seqno)
__field(bool, waiters) __field(bool, waiters)
), ),
TP_fast_assign( TP_fast_assign(
__entry->dev = engine->i915->drm.primary->index; __entry->dev = engine->i915->drm.primary->index;
__entry->ring = engine->id; __entry->class = engine->uabi_class;
__entry->instance = engine->instance;
__entry->seqno = intel_engine_get_seqno(engine); __entry->seqno = intel_engine_get_seqno(engine);
__entry->waiters = waiters; __entry->waiters = waiters;
), ),
TP_printk("dev=%u, ring=%u, seqno=%u, waiters=%u", TP_printk("dev=%u, engine=%u:%u, seqno=%u, waiters=%u",
__entry->dev, __entry->ring, __entry->seqno, __entry->dev, __entry->class, __entry->instance,
__entry->waiters) __entry->seqno, __entry->waiters)
); );
DEFINE_EVENT(i915_request, i915_request_retire, DEFINE_EVENT(i915_request, i915_request_retire,
...@@ -800,8 +817,9 @@ TRACE_EVENT(i915_request_wait_begin, ...@@ -800,8 +817,9 @@ TRACE_EVENT(i915_request_wait_begin,
TP_STRUCT__entry( TP_STRUCT__entry(
__field(u32, dev) __field(u32, dev)
__field(u32, hw_id) __field(u32, hw_id)
__field(u32, ring) __field(u64, ctx)
__field(u32, ctx) __field(u16, class)
__field(u16, instance)
__field(u32, seqno) __field(u32, seqno)
__field(u32, global) __field(u32, global)
__field(unsigned int, flags) __field(unsigned int, flags)
...@@ -815,18 +833,20 @@ TRACE_EVENT(i915_request_wait_begin, ...@@ -815,18 +833,20 @@ TRACE_EVENT(i915_request_wait_begin,
*/ */
TP_fast_assign( TP_fast_assign(
__entry->dev = rq->i915->drm.primary->index; __entry->dev = rq->i915->drm.primary->index;
__entry->hw_id = rq->ctx->hw_id; __entry->hw_id = rq->gem_context->hw_id;
__entry->ring = rq->engine->id; __entry->class = rq->engine->uabi_class;
__entry->instance = rq->engine->instance;
__entry->ctx = rq->fence.context; __entry->ctx = rq->fence.context;
__entry->seqno = rq->fence.seqno; __entry->seqno = rq->fence.seqno;
__entry->global = rq->global_seqno; __entry->global = rq->global_seqno;
__entry->flags = flags; __entry->flags = flags;
), ),
TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u, global=%u, blocking=%u, flags=0x%x", TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, global=%u, blocking=%u, flags=0x%x",
__entry->dev, __entry->hw_id, __entry->ring, __entry->ctx, __entry->dev, __entry->class, __entry->instance,
__entry->seqno, __entry->global, __entry->hw_id, __entry->ctx, __entry->seqno,
!!(__entry->flags & I915_WAIT_LOCKED), __entry->flags) __entry->global, !!(__entry->flags & I915_WAIT_LOCKED),
__entry->flags)
); );
DEFINE_EVENT(i915_request, i915_request_wait_end, DEFINE_EVENT(i915_request, i915_request_wait_end,
...@@ -936,7 +956,7 @@ DECLARE_EVENT_CLASS(i915_context, ...@@ -936,7 +956,7 @@ DECLARE_EVENT_CLASS(i915_context,
__entry->dev = ctx->i915->drm.primary->index; __entry->dev = ctx->i915->drm.primary->index;
__entry->ctx = ctx; __entry->ctx = ctx;
__entry->hw_id = ctx->hw_id; __entry->hw_id = ctx->hw_id;
__entry->vm = ctx->ppgtt ? &ctx->ppgtt->base : NULL; __entry->vm = ctx->ppgtt ? &ctx->ppgtt->vm : NULL;
), ),
TP_printk("dev=%u, ctx=%p, ctx_vm=%p, hw_id=%u", TP_printk("dev=%u, ctx=%p, ctx_vm=%p, hw_id=%u",
...@@ -966,21 +986,24 @@ TRACE_EVENT(switch_mm, ...@@ -966,21 +986,24 @@ TRACE_EVENT(switch_mm,
TP_ARGS(engine, to), TP_ARGS(engine, to),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(u32, ring) __field(u16, class)
__field(u16, instance)
__field(struct i915_gem_context *, to) __field(struct i915_gem_context *, to)
__field(struct i915_address_space *, vm) __field(struct i915_address_space *, vm)
__field(u32, dev) __field(u32, dev)
), ),
TP_fast_assign( TP_fast_assign(
__entry->ring = engine->id; __entry->class = engine->uabi_class;
__entry->instance = engine->instance;
__entry->to = to; __entry->to = to;
__entry->vm = to->ppgtt? &to->ppgtt->base : NULL; __entry->vm = to->ppgtt ? &to->ppgtt->vm : NULL;
__entry->dev = engine->i915->drm.primary->index; __entry->dev = engine->i915->drm.primary->index;
), ),
TP_printk("dev=%u, ring=%u, ctx=%p, ctx_vm=%p", TP_printk("dev=%u, engine=%u:%u, ctx=%p, ctx_vm=%p",
__entry->dev, __entry->ring, __entry->to, __entry->vm) __entry->dev, __entry->class, __entry->instance, __entry->to,
__entry->vm)
); );
#endif /* _I915_TRACE_H_ */ #endif /* _I915_TRACE_H_ */
......
...@@ -105,7 +105,7 @@ static void vgt_deballoon_space(struct i915_ggtt *ggtt, ...@@ -105,7 +105,7 @@ static void vgt_deballoon_space(struct i915_ggtt *ggtt,
node->start + node->size, node->start + node->size,
node->size / 1024); node->size / 1024);
ggtt->base.reserved -= node->size; ggtt->vm.reserved -= node->size;
drm_mm_remove_node(node); drm_mm_remove_node(node);
} }
...@@ -141,11 +141,11 @@ static int vgt_balloon_space(struct i915_ggtt *ggtt, ...@@ -141,11 +141,11 @@ static int vgt_balloon_space(struct i915_ggtt *ggtt,
DRM_INFO("balloon space: range [ 0x%lx - 0x%lx ] %lu KiB.\n", DRM_INFO("balloon space: range [ 0x%lx - 0x%lx ] %lu KiB.\n",
start, end, size / 1024); start, end, size / 1024);
ret = i915_gem_gtt_reserve(&ggtt->base, node, ret = i915_gem_gtt_reserve(&ggtt->vm, node,
size, start, I915_COLOR_UNEVICTABLE, size, start, I915_COLOR_UNEVICTABLE,
0); 0);
if (!ret) if (!ret)
ggtt->base.reserved += size; ggtt->vm.reserved += size;
return ret; return ret;
} }
...@@ -197,7 +197,7 @@ static int vgt_balloon_space(struct i915_ggtt *ggtt, ...@@ -197,7 +197,7 @@ static int vgt_balloon_space(struct i915_ggtt *ggtt,
int intel_vgt_balloon(struct drm_i915_private *dev_priv) int intel_vgt_balloon(struct drm_i915_private *dev_priv)
{ {
struct i915_ggtt *ggtt = &dev_priv->ggtt; struct i915_ggtt *ggtt = &dev_priv->ggtt;
unsigned long ggtt_end = ggtt->base.total; unsigned long ggtt_end = ggtt->vm.total;
unsigned long mappable_base, mappable_size, mappable_end; unsigned long mappable_base, mappable_size, mappable_end;
unsigned long unmappable_base, unmappable_size, unmappable_end; unsigned long unmappable_base, unmappable_size, unmappable_end;
......
...@@ -36,6 +36,12 @@ intel_vgpu_has_hwsp_emulation(struct drm_i915_private *dev_priv) ...@@ -36,6 +36,12 @@ intel_vgpu_has_hwsp_emulation(struct drm_i915_private *dev_priv)
return dev_priv->vgpu.caps & VGT_CAPS_HWSP_EMULATION; return dev_priv->vgpu.caps & VGT_CAPS_HWSP_EMULATION;
} }
static inline bool
intel_vgpu_has_huge_gtt(struct drm_i915_private *dev_priv)
{
return dev_priv->vgpu.caps & VGT_CAPS_HUGE_GTT;
}
int intel_vgt_balloon(struct drm_i915_private *dev_priv); int intel_vgt_balloon(struct drm_i915_private *dev_priv);
void intel_vgt_deballoon(struct drm_i915_private *dev_priv); void intel_vgt_deballoon(struct drm_i915_private *dev_priv);
......
...@@ -85,7 +85,7 @@ vma_create(struct drm_i915_gem_object *obj, ...@@ -85,7 +85,7 @@ vma_create(struct drm_i915_gem_object *obj,
int i; int i;
/* The aliasing_ppgtt should never be used directly! */ /* The aliasing_ppgtt should never be used directly! */
GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base); GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm);
vma = kmem_cache_zalloc(vm->i915->vmas, GFP_KERNEL); vma = kmem_cache_zalloc(vm->i915->vmas, GFP_KERNEL);
if (vma == NULL) if (vma == NULL)
...@@ -459,6 +459,18 @@ bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level) ...@@ -459,6 +459,18 @@ bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level)
return true; return true;
} }
static void assert_bind_count(const struct drm_i915_gem_object *obj)
{
/*
* Combine the assertion that the object is bound and that we have
* pinned its pages. But we should never have bound the object
* more than we have pinned its pages. (For complete accuracy, we
* assume that no else is pinning the pages, but as a rough assertion
* that we will not run into problems later, this will do!)
*/
GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
}
/** /**
* i915_vma_insert - finds a slot for the vma in its address space * i915_vma_insert - finds a slot for the vma in its address space
* @vma: the vma * @vma: the vma
...@@ -595,7 +607,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) ...@@ -595,7 +607,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
obj->bind_count++; obj->bind_count++;
spin_unlock(&dev_priv->mm.obj_lock); spin_unlock(&dev_priv->mm.obj_lock);
GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count); assert_bind_count(obj);
return 0; return 0;
...@@ -633,7 +645,7 @@ i915_vma_remove(struct i915_vma *vma) ...@@ -633,7 +645,7 @@ i915_vma_remove(struct i915_vma *vma)
* reaped by the shrinker. * reaped by the shrinker.
*/ */
i915_gem_object_unpin_pages(obj); i915_gem_object_unpin_pages(obj);
GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count); assert_bind_count(obj);
} }
int __i915_vma_do_pin(struct i915_vma *vma, int __i915_vma_do_pin(struct i915_vma *vma,
......
...@@ -267,8 +267,6 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv, ...@@ -267,8 +267,6 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
if (!lvds_lfp_data_ptrs) if (!lvds_lfp_data_ptrs)
return; return;
dev_priv->vbt.lvds_vbt = 1;
panel_dvo_timing = get_lvds_dvo_timing(lvds_lfp_data, panel_dvo_timing = get_lvds_dvo_timing(lvds_lfp_data,
lvds_lfp_data_ptrs, lvds_lfp_data_ptrs,
panel_type); panel_type);
...@@ -518,8 +516,31 @@ parse_driver_features(struct drm_i915_private *dev_priv, ...@@ -518,8 +516,31 @@ parse_driver_features(struct drm_i915_private *dev_priv,
if (!driver) if (!driver)
return; return;
if (driver->lvds_config == BDB_DRIVER_FEATURE_EDP) if (INTEL_GEN(dev_priv) >= 5) {
dev_priv->vbt.edp.support = 1; /*
* Note that we consider BDB_DRIVER_FEATURE_INT_SDVO_LVDS
* to mean "eDP". The VBT spec doesn't agree with that
* interpretation, but real world VBTs seem to.
*/
if (driver->lvds_config != BDB_DRIVER_FEATURE_INT_LVDS)
dev_priv->vbt.int_lvds_support = 0;
} else {
/*
* FIXME it's not clear which BDB version has the LVDS config
* bits defined. Revision history in the VBT spec says:
* "0.92 | Add two definitions for VBT value of LVDS Active
* Config (00b and 11b values defined) | 06/13/2005"
* but does not the specify the BDB version.
*
* So far version 134 (on i945gm) is the oldest VBT observed
* in the wild with the bits correctly populated. Version
* 108 (on i85x) does not have the bits correctly populated.
*/
if (bdb->version >= 134 &&
driver->lvds_config != BDB_DRIVER_FEATURE_INT_LVDS &&
driver->lvds_config != BDB_DRIVER_FEATURE_INT_SDVO_LVDS)
dev_priv->vbt.int_lvds_support = 0;
}
DRM_DEBUG_KMS("DRRS State Enabled:%d\n", driver->drrs_enabled); DRM_DEBUG_KMS("DRRS State Enabled:%d\n", driver->drrs_enabled);
/* /*
...@@ -542,11 +563,8 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb) ...@@ -542,11 +563,8 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
int panel_type = dev_priv->vbt.panel_type; int panel_type = dev_priv->vbt.panel_type;
edp = find_section(bdb, BDB_EDP); edp = find_section(bdb, BDB_EDP);
if (!edp) { if (!edp)
if (dev_priv->vbt.edp.support)
DRM_DEBUG_KMS("No eDP BDB found but eDP panel supported.\n");
return; return;
}
switch ((edp->color_depth >> (panel_type * 2)) & 3) { switch ((edp->color_depth >> (panel_type * 2)) & 3) {
case EDP_18BPP: case EDP_18BPP:
...@@ -688,8 +706,52 @@ parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb) ...@@ -688,8 +706,52 @@ parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
break; break;
} }
dev_priv->vbt.psr.tp1_wakeup_time = psr_table->tp1_wakeup_time; /*
dev_priv->vbt.psr.tp2_tp3_wakeup_time = psr_table->tp2_tp3_wakeup_time; * New psr options 0=500us, 1=100us, 2=2500us, 3=0us
* Old decimal value is wake up time in multiples of 100 us.
*/
if (bdb->version >= 209 && IS_GEN9_BC(dev_priv)) {
switch (psr_table->tp1_wakeup_time) {
case 0:
dev_priv->vbt.psr.tp1_wakeup_time_us = 500;
break;
case 1:
dev_priv->vbt.psr.tp1_wakeup_time_us = 100;
break;
case 3:
dev_priv->vbt.psr.tp1_wakeup_time_us = 0;
break;
default:
DRM_DEBUG_KMS("VBT tp1 wakeup time value %d is outside range[0-3], defaulting to max value 2500us\n",
psr_table->tp1_wakeup_time);
/* fallthrough */
case 2:
dev_priv->vbt.psr.tp1_wakeup_time_us = 2500;
break;
}
switch (psr_table->tp2_tp3_wakeup_time) {
case 0:
dev_priv->vbt.psr.tp2_tp3_wakeup_time_us = 500;
break;
case 1:
dev_priv->vbt.psr.tp2_tp3_wakeup_time_us = 100;
break;
case 3:
dev_priv->vbt.psr.tp1_wakeup_time_us = 0;
break;
default:
DRM_DEBUG_KMS("VBT tp2_tp3 wakeup time value %d is outside range[0-3], defaulting to max value 2500us\n",
psr_table->tp2_tp3_wakeup_time);
/* fallthrough */
case 2:
dev_priv->vbt.psr.tp2_tp3_wakeup_time_us = 2500;
break;
}
} else {
dev_priv->vbt.psr.tp1_wakeup_time_us = psr_table->tp1_wakeup_time * 100;
dev_priv->vbt.psr.tp2_tp3_wakeup_time_us = psr_table->tp2_tp3_wakeup_time * 100;
}
} }
static void parse_dsi_backlight_ports(struct drm_i915_private *dev_priv, static void parse_dsi_backlight_ports(struct drm_i915_private *dev_priv,
...@@ -1197,18 +1259,37 @@ static const u8 cnp_ddc_pin_map[] = { ...@@ -1197,18 +1259,37 @@ static const u8 cnp_ddc_pin_map[] = {
[DDC_BUS_DDI_F] = GMBUS_PIN_3_BXT, /* sic */ [DDC_BUS_DDI_F] = GMBUS_PIN_3_BXT, /* sic */
}; };
static const u8 icp_ddc_pin_map[] = {
[ICL_DDC_BUS_DDI_A] = GMBUS_PIN_1_BXT,
[ICL_DDC_BUS_DDI_B] = GMBUS_PIN_2_BXT,
[ICL_DDC_BUS_PORT_1] = GMBUS_PIN_9_TC1_ICP,
[ICL_DDC_BUS_PORT_2] = GMBUS_PIN_10_TC2_ICP,
[ICL_DDC_BUS_PORT_3] = GMBUS_PIN_11_TC3_ICP,
[ICL_DDC_BUS_PORT_4] = GMBUS_PIN_12_TC4_ICP,
};
static u8 map_ddc_pin(struct drm_i915_private *dev_priv, u8 vbt_pin) static u8 map_ddc_pin(struct drm_i915_private *dev_priv, u8 vbt_pin)
{ {
if (HAS_PCH_CNP(dev_priv)) { const u8 *ddc_pin_map;
if (vbt_pin < ARRAY_SIZE(cnp_ddc_pin_map)) { int n_entries;
return cnp_ddc_pin_map[vbt_pin];
} else { if (HAS_PCH_ICP(dev_priv)) {
DRM_DEBUG_KMS("Ignoring alternate pin: VBT claims DDC pin %d, which is not valid for this platform\n", vbt_pin); ddc_pin_map = icp_ddc_pin_map;
return 0; n_entries = ARRAY_SIZE(icp_ddc_pin_map);
} } else if (HAS_PCH_CNP(dev_priv)) {
ddc_pin_map = cnp_ddc_pin_map;
n_entries = ARRAY_SIZE(cnp_ddc_pin_map);
} else {
/* Assuming direct map */
return vbt_pin;
} }
return vbt_pin; if (vbt_pin < n_entries && ddc_pin_map[vbt_pin] != 0)
return ddc_pin_map[vbt_pin];
DRM_DEBUG_KMS("Ignoring alternate pin: VBT claims DDC pin %d, which is not valid for this platform\n",
vbt_pin);
return 0;
} }
static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port, static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
...@@ -1504,7 +1585,6 @@ init_vbt_defaults(struct drm_i915_private *dev_priv) ...@@ -1504,7 +1585,6 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
/* LFP panel data */ /* LFP panel data */
dev_priv->vbt.lvds_dither = 1; dev_priv->vbt.lvds_dither = 1;
dev_priv->vbt.lvds_vbt = 0;
/* SDVO panel data */ /* SDVO panel data */
dev_priv->vbt.sdvo_lvds_vbt_mode = NULL; dev_priv->vbt.sdvo_lvds_vbt_mode = NULL;
...@@ -1513,6 +1593,9 @@ init_vbt_defaults(struct drm_i915_private *dev_priv) ...@@ -1513,6 +1593,9 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
dev_priv->vbt.int_tv_support = 1; dev_priv->vbt.int_tv_support = 1;
dev_priv->vbt.int_crt_support = 1; dev_priv->vbt.int_crt_support = 1;
/* driver features */
dev_priv->vbt.int_lvds_support = 1;
/* Default to using SSC */ /* Default to using SSC */
dev_priv->vbt.lvds_use_ssc = 1; dev_priv->vbt.lvds_use_ssc = 1;
/* /*
......
...@@ -846,8 +846,9 @@ static void cancel_fake_irq(struct intel_engine_cs *engine) ...@@ -846,8 +846,9 @@ static void cancel_fake_irq(struct intel_engine_cs *engine)
void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine) void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine)
{ {
struct intel_breadcrumbs *b = &engine->breadcrumbs; struct intel_breadcrumbs *b = &engine->breadcrumbs;
unsigned long flags;
spin_lock_irq(&b->irq_lock); spin_lock_irqsave(&b->irq_lock, flags);
/* /*
* Leave the fake_irq timer enabled (if it is running), but clear the * Leave the fake_irq timer enabled (if it is running), but clear the
...@@ -871,7 +872,7 @@ void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine) ...@@ -871,7 +872,7 @@ void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine)
*/ */
clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted); clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
spin_unlock_irq(&b->irq_lock); spin_unlock_irqrestore(&b->irq_lock, flags);
} }
void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine) void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
......
...@@ -63,33 +63,35 @@ static struct intel_crt *intel_attached_crt(struct drm_connector *connector) ...@@ -63,33 +63,35 @@ static struct intel_crt *intel_attached_crt(struct drm_connector *connector)
return intel_encoder_to_crt(intel_attached_encoder(connector)); return intel_encoder_to_crt(intel_attached_encoder(connector));
} }
bool intel_crt_port_enabled(struct drm_i915_private *dev_priv,
i915_reg_t adpa_reg, enum pipe *pipe)
{
u32 val;
val = I915_READ(adpa_reg);
/* asserts want to know the pipe even if the port is disabled */
if (HAS_PCH_CPT(dev_priv))
*pipe = (val & ADPA_PIPE_SEL_MASK_CPT) >> ADPA_PIPE_SEL_SHIFT_CPT;
else
*pipe = (val & ADPA_PIPE_SEL_MASK) >> ADPA_PIPE_SEL_SHIFT;
return val & ADPA_DAC_ENABLE;
}
static bool intel_crt_get_hw_state(struct intel_encoder *encoder, static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe) enum pipe *pipe)
{ {
struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crt *crt = intel_encoder_to_crt(encoder); struct intel_crt *crt = intel_encoder_to_crt(encoder);
u32 tmp;
bool ret; bool ret;
if (!intel_display_power_get_if_enabled(dev_priv, if (!intel_display_power_get_if_enabled(dev_priv,
encoder->power_domain)) encoder->power_domain))
return false; return false;
ret = false; ret = intel_crt_port_enabled(dev_priv, crt->adpa_reg, pipe);
tmp = I915_READ(crt->adpa_reg);
if (!(tmp & ADPA_DAC_ENABLE))
goto out;
if (HAS_PCH_CPT(dev_priv))
*pipe = PORT_TO_PIPE_CPT(tmp);
else
*pipe = PORT_TO_PIPE(tmp);
ret = true;
out:
intel_display_power_put(dev_priv, encoder->power_domain); intel_display_power_put(dev_priv, encoder->power_domain);
return ret; return ret;
...@@ -168,11 +170,9 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder, ...@@ -168,11 +170,9 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder,
if (HAS_PCH_LPT(dev_priv)) if (HAS_PCH_LPT(dev_priv))
; /* Those bits don't exist here */ ; /* Those bits don't exist here */
else if (HAS_PCH_CPT(dev_priv)) else if (HAS_PCH_CPT(dev_priv))
adpa |= PORT_TRANS_SEL_CPT(crtc->pipe); adpa |= ADPA_PIPE_SEL_CPT(crtc->pipe);
else if (crtc->pipe == 0)
adpa |= ADPA_PIPE_A_SELECT;
else else
adpa |= ADPA_PIPE_B_SELECT; adpa |= ADPA_PIPE_SEL(crtc->pipe);
if (!HAS_PCH_SPLIT(dev_priv)) if (!HAS_PCH_SPLIT(dev_priv))
I915_WRITE(BCLRPAT(crtc->pipe), 0); I915_WRITE(BCLRPAT(crtc->pipe), 0);
......
...@@ -1243,35 +1243,6 @@ intel_ddi_get_crtc_encoder(struct intel_crtc *crtc) ...@@ -1243,35 +1243,6 @@ intel_ddi_get_crtc_encoder(struct intel_crtc *crtc)
return ret; return ret;
} }
/* Finds the only possible encoder associated with the given CRTC. */
struct intel_encoder *
intel_ddi_get_crtc_new_encoder(struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct intel_encoder *ret = NULL;
struct drm_atomic_state *state;
struct drm_connector *connector;
struct drm_connector_state *connector_state;
int num_encoders = 0;
int i;
state = crtc_state->base.state;
for_each_new_connector_in_state(state, connector, connector_state, i) {
if (connector_state->crtc != crtc_state->base.crtc)
continue;
ret = to_intel_encoder(connector_state->best_encoder);
num_encoders++;
}
WARN(num_encoders != 1, "%d encoders on crtc for pipe %c\n", num_encoders,
pipe_name(crtc->pipe));
BUG_ON(ret == NULL);
return ret;
}
#define LC_FREQ 2700 #define LC_FREQ 2700
static int hsw_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv, static int hsw_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv,
...@@ -1374,8 +1345,13 @@ static int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv, ...@@ -1374,8 +1345,13 @@ static int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
uint32_t cfgcr0, cfgcr1; uint32_t cfgcr0, cfgcr1;
uint32_t p0, p1, p2, dco_freq, ref_clock; uint32_t p0, p1, p2, dco_freq, ref_clock;
cfgcr0 = I915_READ(CNL_DPLL_CFGCR0(pll_id)); if (INTEL_GEN(dev_priv) >= 11) {
cfgcr1 = I915_READ(CNL_DPLL_CFGCR1(pll_id)); cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(pll_id));
cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(pll_id));
} else {
cfgcr0 = I915_READ(CNL_DPLL_CFGCR0(pll_id));
cfgcr1 = I915_READ(CNL_DPLL_CFGCR1(pll_id));
}
p0 = cfgcr1 & DPLL_CFGCR1_PDIV_MASK; p0 = cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
p2 = cfgcr1 & DPLL_CFGCR1_KDIV_MASK; p2 = cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
...@@ -1451,6 +1427,30 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config) ...@@ -1451,6 +1427,30 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
pipe_config->base.adjusted_mode.crtc_clock = dotclock; pipe_config->base.adjusted_mode.crtc_clock = dotclock;
} }
static void icl_ddi_clock_get(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
int link_clock = 0;
uint32_t pll_id;
pll_id = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll);
if (port == PORT_A || port == PORT_B) {
if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI))
link_clock = cnl_calc_wrpll_link(dev_priv, pll_id);
else
link_clock = icl_calc_dp_combo_pll_link(dev_priv,
pll_id);
} else {
/* FIXME - Add for MG PLL */
WARN(1, "MG PLL clock_get code not implemented yet\n");
}
pipe_config->port_clock = link_clock;
ddi_dotclock_get(pipe_config);
}
static void cnl_ddi_clock_get(struct intel_encoder *encoder, static void cnl_ddi_clock_get(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config) struct intel_crtc_state *pipe_config)
{ {
...@@ -1644,6 +1644,8 @@ static void intel_ddi_clock_get(struct intel_encoder *encoder, ...@@ -1644,6 +1644,8 @@ static void intel_ddi_clock_get(struct intel_encoder *encoder,
bxt_ddi_clock_get(encoder, pipe_config); bxt_ddi_clock_get(encoder, pipe_config);
else if (IS_CANNONLAKE(dev_priv)) else if (IS_CANNONLAKE(dev_priv))
cnl_ddi_clock_get(encoder, pipe_config); cnl_ddi_clock_get(encoder, pipe_config);
else if (IS_ICELAKE(dev_priv))
icl_ddi_clock_get(encoder, pipe_config);
} }
void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state) void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state)
...@@ -2115,6 +2117,26 @@ u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder) ...@@ -2115,6 +2117,26 @@ u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder)
DP_TRAIN_VOLTAGE_SWING_MASK; DP_TRAIN_VOLTAGE_SWING_MASK;
} }
/*
* We assume that the full set of pre-emphasis values can be
* used on all DDI platforms. Should that change we need to
* rethink this code.
*/
u8 intel_ddi_dp_pre_emphasis_max(struct intel_encoder *encoder, u8 voltage_swing)
{
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
return DP_TRAIN_PRE_EMPH_LEVEL_3;
case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
return DP_TRAIN_PRE_EMPH_LEVEL_2;
case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
return DP_TRAIN_PRE_EMPH_LEVEL_1;
case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
default:
return DP_TRAIN_PRE_EMPH_LEVEL_0;
}
}
static void cnl_ddi_vswing_program(struct intel_encoder *encoder, static void cnl_ddi_vswing_program(struct intel_encoder *encoder,
int level, enum intel_output_type type) int level, enum intel_output_type type)
{ {
...@@ -2453,12 +2475,13 @@ void icl_map_plls_to_ports(struct drm_crtc *crtc, ...@@ -2453,12 +2475,13 @@ void icl_map_plls_to_ports(struct drm_crtc *crtc,
for_each_new_connector_in_state(old_state, conn, conn_state, i) { for_each_new_connector_in_state(old_state, conn, conn_state, i) {
struct intel_encoder *encoder = struct intel_encoder *encoder =
to_intel_encoder(conn_state->best_encoder); to_intel_encoder(conn_state->best_encoder);
enum port port = encoder->port; enum port port;
uint32_t val; uint32_t val;
if (conn_state->crtc != crtc) if (conn_state->crtc != crtc)
continue; continue;
port = encoder->port;
mutex_lock(&dev_priv->dpll_lock); mutex_lock(&dev_priv->dpll_lock);
val = I915_READ(DPCLKA_CFGCR0_ICL); val = I915_READ(DPCLKA_CFGCR0_ICL);
...@@ -2490,11 +2513,12 @@ void icl_unmap_plls_to_ports(struct drm_crtc *crtc, ...@@ -2490,11 +2513,12 @@ void icl_unmap_plls_to_ports(struct drm_crtc *crtc,
for_each_old_connector_in_state(old_state, conn, old_conn_state, i) { for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
struct intel_encoder *encoder = struct intel_encoder *encoder =
to_intel_encoder(old_conn_state->best_encoder); to_intel_encoder(old_conn_state->best_encoder);
enum port port = encoder->port; enum port port;
if (old_conn_state->crtc != crtc) if (old_conn_state->crtc != crtc)
continue; continue;
port = encoder->port;
mutex_lock(&dev_priv->dpll_lock); mutex_lock(&dev_priv->dpll_lock);
I915_WRITE(DPCLKA_CFGCR0_ICL, I915_WRITE(DPCLKA_CFGCR0_ICL,
I915_READ(DPCLKA_CFGCR0_ICL) | I915_READ(DPCLKA_CFGCR0_ICL) |
......
...@@ -126,6 +126,17 @@ enum port { ...@@ -126,6 +126,17 @@ enum port {
#define port_name(p) ((p) + 'A') #define port_name(p) ((p) + 'A')
enum tc_port {
PORT_TC_NONE = -1,
PORT_TC1 = 0,
PORT_TC2,
PORT_TC3,
PORT_TC4,
I915_MAX_TC_PORTS
};
enum dpio_channel { enum dpio_channel {
DPIO_CH0, DPIO_CH0,
DPIO_CH1 DPIO_CH1
......
此差异已折叠。
...@@ -2525,6 +2525,76 @@ static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state, ...@@ -2525,6 +2525,76 @@ static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state,
return true; return true;
} }
int icl_calc_dp_combo_pll_link(struct drm_i915_private *dev_priv,
uint32_t pll_id)
{
uint32_t cfgcr0, cfgcr1;
uint32_t pdiv, kdiv, qdiv_mode, qdiv_ratio, dco_integer, dco_fraction;
const struct skl_wrpll_params *params;
int index, n_entries, link_clock;
/* Read back values from DPLL CFGCR registers */
cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(pll_id));
cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(pll_id));
dco_integer = cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK;
dco_fraction = (cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
DPLL_CFGCR0_DCO_FRACTION_SHIFT;
pdiv = (cfgcr1 & DPLL_CFGCR1_PDIV_MASK) >> DPLL_CFGCR1_PDIV_SHIFT;
kdiv = (cfgcr1 & DPLL_CFGCR1_KDIV_MASK) >> DPLL_CFGCR1_KDIV_SHIFT;
qdiv_mode = (cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1)) >>
DPLL_CFGCR1_QDIV_MODE_SHIFT;
qdiv_ratio = (cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
DPLL_CFGCR1_QDIV_RATIO_SHIFT;
params = dev_priv->cdclk.hw.ref == 24000 ?
icl_dp_combo_pll_24MHz_values :
icl_dp_combo_pll_19_2MHz_values;
n_entries = ARRAY_SIZE(icl_dp_combo_pll_24MHz_values);
for (index = 0; index < n_entries; index++) {
if (dco_integer == params[index].dco_integer &&
dco_fraction == params[index].dco_fraction &&
pdiv == params[index].pdiv &&
kdiv == params[index].kdiv &&
qdiv_mode == params[index].qdiv_mode &&
qdiv_ratio == params[index].qdiv_ratio)
break;
}
/* Map PLL Index to Link Clock */
switch (index) {
default:
MISSING_CASE(index);
case 0:
link_clock = 540000;
break;
case 1:
link_clock = 270000;
break;
case 2:
link_clock = 162000;
break;
case 3:
link_clock = 324000;
break;
case 4:
link_clock = 216000;
break;
case 5:
link_clock = 432000;
break;
case 6:
link_clock = 648000;
break;
case 7:
link_clock = 810000;
break;
}
return link_clock;
}
static enum port icl_mg_pll_id_to_port(enum intel_dpll_id id) static enum port icl_mg_pll_id_to_port(enum intel_dpll_id id)
{ {
return id - DPLL_ID_ICL_MGPLL1 + PORT_C; return id - DPLL_ID_ICL_MGPLL1 + PORT_C;
......
...@@ -336,5 +336,7 @@ void intel_shared_dpll_init(struct drm_device *dev); ...@@ -336,5 +336,7 @@ void intel_shared_dpll_init(struct drm_device *dev);
void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv, void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
struct intel_dpll_hw_state *hw_state); struct intel_dpll_hw_state *hw_state);
int icl_calc_dp_combo_pll_link(struct drm_i915_private *dev_priv,
uint32_t pll_id);
#endif /* _INTEL_DPLL_MGR_H_ */ #endif /* _INTEL_DPLL_MGR_H_ */
...@@ -194,7 +194,6 @@ enum intel_output_type { ...@@ -194,7 +194,6 @@ enum intel_output_type {
struct intel_framebuffer { struct intel_framebuffer {
struct drm_framebuffer base; struct drm_framebuffer base;
struct drm_i915_gem_object *obj;
struct intel_rotation_info rot_info; struct intel_rotation_info rot_info;
/* for each plane in the normal GTT view */ /* for each plane in the normal GTT view */
...@@ -277,7 +276,6 @@ struct intel_encoder { ...@@ -277,7 +276,6 @@ struct intel_encoder {
struct intel_panel { struct intel_panel {
struct drm_display_mode *fixed_mode; struct drm_display_mode *fixed_mode;
struct drm_display_mode *alt_fixed_mode;
struct drm_display_mode *downclock_mode; struct drm_display_mode *downclock_mode;
/* backlight */ /* backlight */
...@@ -972,7 +970,7 @@ struct intel_plane { ...@@ -972,7 +970,7 @@ struct intel_plane {
const struct intel_plane_state *plane_state); const struct intel_plane_state *plane_state);
void (*disable_plane)(struct intel_plane *plane, void (*disable_plane)(struct intel_plane *plane,
struct intel_crtc *crtc); struct intel_crtc *crtc);
bool (*get_hw_state)(struct intel_plane *plane); bool (*get_hw_state)(struct intel_plane *plane, enum pipe *pipe);
int (*check_plane)(struct intel_plane *plane, int (*check_plane)(struct intel_plane *plane,
struct intel_crtc_state *crtc_state, struct intel_crtc_state *crtc_state,
struct intel_plane_state *state); struct intel_plane_state *state);
...@@ -1005,7 +1003,7 @@ struct cxsr_latency { ...@@ -1005,7 +1003,7 @@ struct cxsr_latency {
#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base) #define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
#define to_intel_plane(x) container_of(x, struct intel_plane, base) #define to_intel_plane(x) container_of(x, struct intel_plane, base)
#define to_intel_plane_state(x) container_of(x, struct intel_plane_state, base) #define to_intel_plane_state(x) container_of(x, struct intel_plane_state, base)
#define intel_fb_obj(x) (x ? to_intel_framebuffer(x)->obj : NULL) #define intel_fb_obj(x) ((x) ? to_intel_bo((x)->obj[0]) : NULL)
struct intel_hdmi { struct intel_hdmi {
i915_reg_t hdmi_reg; i915_reg_t hdmi_reg;
...@@ -1377,6 +1375,8 @@ void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv); ...@@ -1377,6 +1375,8 @@ void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv);
void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv); void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv);
/* intel_crt.c */ /* intel_crt.c */
bool intel_crt_port_enabled(struct drm_i915_private *dev_priv,
i915_reg_t adpa_reg, enum pipe *pipe);
void intel_crt_init(struct drm_i915_private *dev_priv); void intel_crt_init(struct drm_i915_private *dev_priv);
void intel_crt_reset(struct drm_encoder *encoder); void intel_crt_reset(struct drm_encoder *encoder);
...@@ -1393,8 +1393,6 @@ void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv, ...@@ -1393,8 +1393,6 @@ void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder); enum transcoder cpu_transcoder);
void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state); void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state);
void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state); void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state);
struct intel_encoder *
intel_ddi_get_crtc_new_encoder(struct intel_crtc_state *crtc_state);
void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state); void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state);
void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp); void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp);
bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector); bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
...@@ -1408,6 +1406,8 @@ void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv, ...@@ -1408,6 +1406,8 @@ void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
u32 bxt_signal_levels(struct intel_dp *intel_dp); u32 bxt_signal_levels(struct intel_dp *intel_dp);
uint32_t ddi_signal_levels(struct intel_dp *intel_dp); uint32_t ddi_signal_levels(struct intel_dp *intel_dp);
u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder); u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder);
u8 intel_ddi_dp_pre_emphasis_max(struct intel_encoder *encoder,
u8 voltage_swing);
int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder, int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
bool enable); bool enable);
void icl_map_plls_to_ports(struct drm_crtc *crtc, void icl_map_plls_to_ports(struct drm_crtc *crtc,
...@@ -1489,6 +1489,9 @@ void intel_connector_attach_encoder(struct intel_connector *connector, ...@@ -1489,6 +1489,9 @@ void intel_connector_attach_encoder(struct intel_connector *connector,
struct intel_encoder *encoder); struct intel_encoder *encoder);
struct drm_display_mode * struct drm_display_mode *
intel_encoder_current_mode(struct intel_encoder *encoder); intel_encoder_current_mode(struct intel_encoder *encoder);
bool intel_port_is_tc(struct drm_i915_private *dev_priv, enum port port);
enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv,
enum port port);
enum pipe intel_get_pipe_from_connector(struct intel_connector *connector); enum pipe intel_get_pipe_from_connector(struct intel_connector *connector);
int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data, int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
...@@ -1616,6 +1619,7 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode, ...@@ -1616,6 +1619,7 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode,
void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc, void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state); struct intel_crtc_state *crtc_state);
u16 skl_scaler_calc_phase(int sub, bool chroma_center);
int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state); int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
int skl_max_scale(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, int skl_max_scale(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
uint32_t pixel_format); uint32_t pixel_format);
...@@ -1645,6 +1649,9 @@ void intel_csr_ucode_suspend(struct drm_i915_private *); ...@@ -1645,6 +1649,9 @@ void intel_csr_ucode_suspend(struct drm_i915_private *);
void intel_csr_ucode_resume(struct drm_i915_private *); void intel_csr_ucode_resume(struct drm_i915_private *);
/* intel_dp.c */ /* intel_dp.c */
bool intel_dp_port_enabled(struct drm_i915_private *dev_priv,
i915_reg_t dp_reg, enum port port,
enum pipe *pipe);
bool intel_dp_init(struct drm_i915_private *dev_priv, i915_reg_t output_reg, bool intel_dp_init(struct drm_i915_private *dev_priv, i915_reg_t output_reg,
enum port port); enum port port);
bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port, bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
...@@ -1822,6 +1829,8 @@ void intel_infoframe_init(struct intel_digital_port *intel_dig_port); ...@@ -1822,6 +1829,8 @@ void intel_infoframe_init(struct intel_digital_port *intel_dig_port);
/* intel_lvds.c */ /* intel_lvds.c */
bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv,
i915_reg_t lvds_reg, enum pipe *pipe);
void intel_lvds_init(struct drm_i915_private *dev_priv); void intel_lvds_init(struct drm_i915_private *dev_priv);
struct intel_encoder *intel_get_lvds_encoder(struct drm_device *dev); struct intel_encoder *intel_get_lvds_encoder(struct drm_device *dev);
bool intel_is_dual_link_lvds(struct drm_device *dev); bool intel_is_dual_link_lvds(struct drm_device *dev);
...@@ -1850,7 +1859,6 @@ void intel_overlay_reset(struct drm_i915_private *dev_priv); ...@@ -1850,7 +1859,6 @@ void intel_overlay_reset(struct drm_i915_private *dev_priv);
/* intel_panel.c */ /* intel_panel.c */
int intel_panel_init(struct intel_panel *panel, int intel_panel_init(struct intel_panel *panel,
struct drm_display_mode *fixed_mode, struct drm_display_mode *fixed_mode,
struct drm_display_mode *alt_fixed_mode,
struct drm_display_mode *downclock_mode); struct drm_display_mode *downclock_mode);
void intel_panel_fini(struct intel_panel *panel); void intel_panel_fini(struct intel_panel *panel);
void intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode, void intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
...@@ -1913,8 +1921,6 @@ void intel_psr_flush(struct drm_i915_private *dev_priv, ...@@ -1913,8 +1921,6 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
unsigned frontbuffer_bits, unsigned frontbuffer_bits,
enum fb_op_origin origin); enum fb_op_origin origin);
void intel_psr_init(struct drm_i915_private *dev_priv); void intel_psr_init(struct drm_i915_private *dev_priv);
void intel_psr_single_frame_update(struct drm_i915_private *dev_priv,
unsigned frontbuffer_bits);
void intel_psr_compute_config(struct intel_dp *intel_dp, void intel_psr_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state); struct intel_crtc_state *crtc_state);
void intel_psr_irq_control(struct drm_i915_private *dev_priv, bool debug); void intel_psr_irq_control(struct drm_i915_private *dev_priv, bool debug);
...@@ -2060,6 +2066,8 @@ void intel_init_ipc(struct drm_i915_private *dev_priv); ...@@ -2060,6 +2066,8 @@ void intel_init_ipc(struct drm_i915_private *dev_priv);
void intel_enable_ipc(struct drm_i915_private *dev_priv); void intel_enable_ipc(struct drm_i915_private *dev_priv);
/* intel_sdvo.c */ /* intel_sdvo.c */
bool intel_sdvo_port_enabled(struct drm_i915_private *dev_priv,
i915_reg_t sdvo_reg, enum pipe *pipe);
bool intel_sdvo_init(struct drm_i915_private *dev_priv, bool intel_sdvo_init(struct drm_i915_private *dev_priv,
i915_reg_t reg, enum port port); i915_reg_t reg, enum port port);
...@@ -2078,7 +2086,7 @@ void skl_update_plane(struct intel_plane *plane, ...@@ -2078,7 +2086,7 @@ void skl_update_plane(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state, const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state); const struct intel_plane_state *plane_state);
void skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc); void skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc);
bool skl_plane_get_hw_state(struct intel_plane *plane); bool skl_plane_get_hw_state(struct intel_plane *plane, enum pipe *pipe);
bool skl_plane_has_ccs(struct drm_i915_private *dev_priv, bool skl_plane_has_ccs(struct drm_i915_private *dev_priv,
enum pipe pipe, enum plane_id plane_id); enum pipe pipe, enum plane_id plane_id);
bool intel_format_is_yuv(uint32_t format); bool intel_format_is_yuv(uint32_t format);
......
...@@ -1665,16 +1665,16 @@ static int intel_dsi_get_panel_orientation(struct intel_connector *connector) ...@@ -1665,16 +1665,16 @@ static int intel_dsi_get_panel_orientation(struct intel_connector *connector)
{ {
struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
int orientation = DRM_MODE_PANEL_ORIENTATION_NORMAL; int orientation = DRM_MODE_PANEL_ORIENTATION_NORMAL;
enum i9xx_plane_id plane; enum i9xx_plane_id i9xx_plane;
u32 val; u32 val;
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
if (connector->encoder->crtc_mask == BIT(PIPE_B)) if (connector->encoder->crtc_mask == BIT(PIPE_B))
plane = PLANE_B; i9xx_plane = PLANE_B;
else else
plane = PLANE_A; i9xx_plane = PLANE_A;
val = I915_READ(DSPCNTR(plane)); val = I915_READ(DSPCNTR(i9xx_plane));
if (val & DISPPLANE_ROTATE_180) if (val & DISPPLANE_ROTATE_180)
orientation = DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP; orientation = DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP;
} }
...@@ -1846,7 +1846,7 @@ void intel_dsi_init(struct drm_i915_private *dev_priv) ...@@ -1846,7 +1846,7 @@ void intel_dsi_init(struct drm_i915_private *dev_priv)
connector->display_info.width_mm = fixed_mode->width_mm; connector->display_info.width_mm = fixed_mode->width_mm;
connector->display_info.height_mm = fixed_mode->height_mm; connector->display_info.height_mm = fixed_mode->height_mm;
intel_panel_init(&intel_connector->panel, fixed_mode, NULL, NULL); intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
intel_panel_setup_backlight(connector, INVALID_PIPE); intel_panel_setup_backlight(connector, INVALID_PIPE);
intel_dsi_add_properties(intel_connector); intel_dsi_add_properties(intel_connector);
......
...@@ -137,19 +137,15 @@ static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector) ...@@ -137,19 +137,15 @@ static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector)
static bool intel_dvo_get_hw_state(struct intel_encoder *encoder, static bool intel_dvo_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe) enum pipe *pipe)
{ {
struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_dvo *intel_dvo = enc_to_dvo(encoder); struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
u32 tmp; u32 tmp;
tmp = I915_READ(intel_dvo->dev.dvo_reg); tmp = I915_READ(intel_dvo->dev.dvo_reg);
if (!(tmp & DVO_ENABLE)) *pipe = (tmp & DVO_PIPE_SEL_MASK) >> DVO_PIPE_SEL_SHIFT;
return false;
*pipe = PORT_TO_PIPE(tmp);
return true; return tmp & DVO_ENABLE;
} }
static void intel_dvo_get_config(struct intel_encoder *encoder, static void intel_dvo_get_config(struct intel_encoder *encoder,
...@@ -276,8 +272,7 @@ static void intel_dvo_pre_enable(struct intel_encoder *encoder, ...@@ -276,8 +272,7 @@ static void intel_dvo_pre_enable(struct intel_encoder *encoder,
dvo_val |= DVO_DATA_ORDER_FP | DVO_BORDER_ENABLE | dvo_val |= DVO_DATA_ORDER_FP | DVO_BORDER_ENABLE |
DVO_BLANK_ACTIVE_HIGH; DVO_BLANK_ACTIVE_HIGH;
if (pipe == 1) dvo_val |= DVO_PIPE_SEL(pipe);
dvo_val |= DVO_PIPE_B_SELECT;
dvo_val |= DVO_PIPE_STALL; dvo_val |= DVO_PIPE_STALL;
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
dvo_val |= DVO_HSYNC_ACTIVE_HIGH; dvo_val |= DVO_HSYNC_ACTIVE_HIGH;
...@@ -536,7 +531,7 @@ void intel_dvo_init(struct drm_i915_private *dev_priv) ...@@ -536,7 +531,7 @@ void intel_dvo_init(struct drm_i915_private *dev_priv)
*/ */
intel_panel_init(&intel_connector->panel, intel_panel_init(&intel_connector->panel,
intel_dvo_get_current_mode(intel_encoder), intel_dvo_get_current_mode(intel_encoder),
NULL, NULL); NULL);
intel_dvo->panel_wants_dither = true; intel_dvo->panel_wants_dither = true;
} }
......
...@@ -47,7 +47,7 @@ ...@@ -47,7 +47,7 @@
static void intel_fbdev_invalidate(struct intel_fbdev *ifbdev) static void intel_fbdev_invalidate(struct intel_fbdev *ifbdev)
{ {
struct drm_i915_gem_object *obj = ifbdev->fb->obj; struct drm_i915_gem_object *obj = intel_fb_obj(&ifbdev->fb->base);
unsigned int origin = unsigned int origin =
ifbdev->vma_flags & PLANE_HAS_FENCE ? ORIGIN_GTT : ORIGIN_CPU; ifbdev->vma_flags & PLANE_HAS_FENCE ? ORIGIN_GTT : ORIGIN_CPU;
...@@ -193,7 +193,7 @@ static int intelfb_create(struct drm_fb_helper *helper, ...@@ -193,7 +193,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
drm_framebuffer_put(&intel_fb->base); drm_framebuffer_put(&intel_fb->base);
intel_fb = ifbdev->fb = NULL; intel_fb = ifbdev->fb = NULL;
} }
if (!intel_fb || WARN_ON(!intel_fb->obj)) { if (!intel_fb || WARN_ON(!intel_fb_obj(&intel_fb->base))) {
DRM_DEBUG_KMS("no BIOS fb, allocating a new one\n"); DRM_DEBUG_KMS("no BIOS fb, allocating a new one\n");
ret = intelfb_alloc(helper, sizes); ret = intelfb_alloc(helper, sizes);
if (ret) if (ret)
...@@ -265,7 +265,7 @@ static int intelfb_create(struct drm_fb_helper *helper, ...@@ -265,7 +265,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
* If the object is stolen however, it will be full of whatever * If the object is stolen however, it will be full of whatever
* garbage was left in there. * garbage was left in there.
*/ */
if (intel_fb->obj->stolen && !prealloc) if (intel_fb_obj(fb)->stolen && !prealloc)
memset_io(info->screen_base, 0, info->screen_size); memset_io(info->screen_base, 0, info->screen_size);
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
...@@ -792,7 +792,8 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous ...@@ -792,7 +792,8 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
* been restored from swap. If the object is stolen however, it will be * been restored from swap. If the object is stolen however, it will be
* full of whatever garbage was left in there. * full of whatever garbage was left in there.
*/ */
if (state == FBINFO_STATE_RUNNING && ifbdev->fb->obj->stolen) if (state == FBINFO_STATE_RUNNING &&
intel_fb_obj(&ifbdev->fb->base)->stolen)
memset_io(info->screen_base, 0, info->screen_size); memset_io(info->screen_base, 0, info->screen_size);
drm_fb_helper_set_suspend(&ifbdev->helper, state); drm_fb_helper_set_suspend(&ifbdev->helper, state);
......
...@@ -153,8 +153,6 @@ void intel_frontbuffer_flip_prepare(struct drm_i915_private *dev_priv, ...@@ -153,8 +153,6 @@ void intel_frontbuffer_flip_prepare(struct drm_i915_private *dev_priv,
/* Remove stale busy bits due to the old buffer. */ /* Remove stale busy bits due to the old buffer. */
dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits; dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
spin_unlock(&dev_priv->fb_tracking.lock); spin_unlock(&dev_priv->fb_tracking.lock);
intel_psr_single_frame_update(dev_priv, frontbuffer_bits);
} }
/** /**
......
...@@ -346,10 +346,8 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len, ...@@ -346,10 +346,8 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
ret = -EIO; ret = -EIO;
if (ret) { if (ret) {
DRM_DEBUG_DRIVER("INTEL_GUC_SEND: Action 0x%X failed;" DRM_ERROR("MMIO: GuC action %#x failed with error %d %#x\n",
" ret=%d status=0x%08X response=0x%08X\n", action[0], ret, status);
action[0], ret, status,
I915_READ(SOFT_SCRATCH(15)));
goto out; goto out;
} }
...@@ -572,7 +570,7 @@ struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size) ...@@ -572,7 +570,7 @@ struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
if (IS_ERR(obj)) if (IS_ERR(obj))
return ERR_CAST(obj); return ERR_CAST(obj);
vma = i915_vma_instance(obj, &dev_priv->ggtt.base, NULL); vma = i915_vma_instance(obj, &dev_priv->ggtt.vm, NULL);
if (IS_ERR(vma)) if (IS_ERR(vma))
goto err; goto err;
......
...@@ -1161,33 +1161,16 @@ static void intel_hdmi_prepare(struct intel_encoder *encoder, ...@@ -1161,33 +1161,16 @@ static void intel_hdmi_prepare(struct intel_encoder *encoder,
static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder, static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe) enum pipe *pipe)
{ {
struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
u32 tmp;
bool ret; bool ret;
if (!intel_display_power_get_if_enabled(dev_priv, if (!intel_display_power_get_if_enabled(dev_priv,
encoder->power_domain)) encoder->power_domain))
return false; return false;
ret = false; ret = intel_sdvo_port_enabled(dev_priv, intel_hdmi->hdmi_reg, pipe);
tmp = I915_READ(intel_hdmi->hdmi_reg);
if (!(tmp & SDVO_ENABLE))
goto out;
if (HAS_PCH_CPT(dev_priv))
*pipe = PORT_TO_PIPE_CPT(tmp);
else if (IS_CHERRYVIEW(dev_priv))
*pipe = SDVO_PORT_TO_PIPE_CHV(tmp);
else
*pipe = PORT_TO_PIPE(tmp);
ret = true;
out:
intel_display_power_put(dev_priv, encoder->power_domain); intel_display_power_put(dev_priv, encoder->power_domain);
return ret; return ret;
...@@ -1421,8 +1404,8 @@ static void intel_disable_hdmi(struct intel_encoder *encoder, ...@@ -1421,8 +1404,8 @@ static void intel_disable_hdmi(struct intel_encoder *encoder,
intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false); intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false); intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
temp &= ~SDVO_PIPE_B_SELECT; temp &= ~SDVO_PIPE_SEL_MASK;
temp |= SDVO_ENABLE; temp |= SDVO_ENABLE | SDVO_PIPE_SEL(PIPE_A);
/* /*
* HW workaround, need to write this twice for issue * HW workaround, need to write this twice for issue
* that may result in first write getting masked. * that may result in first write getting masked.
......
此差异已折叠。
...@@ -104,11 +104,4 @@ struct i915_gem_context; ...@@ -104,11 +104,4 @@ struct i915_gem_context;
void intel_lr_context_resume(struct drm_i915_private *dev_priv); void intel_lr_context_resume(struct drm_i915_private *dev_priv);
static inline uint64_t
intel_lr_context_descriptor(struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
{
return to_intel_context(ctx, engine)->lrc_desc;
}
#endif /* _INTEL_LRC_H_ */ #endif /* _INTEL_LRC_H_ */
此差异已折叠。
...@@ -1928,13 +1928,11 @@ intel_panel_init_backlight_funcs(struct intel_panel *panel) ...@@ -1928,13 +1928,11 @@ intel_panel_init_backlight_funcs(struct intel_panel *panel)
int intel_panel_init(struct intel_panel *panel, int intel_panel_init(struct intel_panel *panel,
struct drm_display_mode *fixed_mode, struct drm_display_mode *fixed_mode,
struct drm_display_mode *alt_fixed_mode,
struct drm_display_mode *downclock_mode) struct drm_display_mode *downclock_mode)
{ {
intel_panel_init_backlight_funcs(panel); intel_panel_init_backlight_funcs(panel);
panel->fixed_mode = fixed_mode; panel->fixed_mode = fixed_mode;
panel->alt_fixed_mode = alt_fixed_mode;
panel->downclock_mode = downclock_mode; panel->downclock_mode = downclock_mode;
return 0; return 0;
...@@ -1948,10 +1946,6 @@ void intel_panel_fini(struct intel_panel *panel) ...@@ -1948,10 +1946,6 @@ void intel_panel_fini(struct intel_panel *panel)
if (panel->fixed_mode) if (panel->fixed_mode)
drm_mode_destroy(intel_connector->base.dev, panel->fixed_mode); drm_mode_destroy(intel_connector->base.dev, panel->fixed_mode);
if (panel->alt_fixed_mode)
drm_mode_destroy(intel_connector->base.dev,
panel->alt_fixed_mode);
if (panel->downclock_mode) if (panel->downclock_mode)
drm_mode_destroy(intel_connector->base.dev, drm_mode_destroy(intel_connector->base.dev,
panel->downclock_mode); panel->downclock_mode);
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册