提交 e2d3c414 编写于 作者: D Daniel Vetter

Merge tag 'drm-intel-fixes-2019-01-11' of...

Merge tag 'drm-intel-fixes-2019-01-11' of git://anongit.freedesktop.org/drm/drm-intel into drm-fixes

i915 fixes for v5.0-rc2:
- Disable PSR for Apple panels
- Broxton ERR_PTR error state fix
- Kabylake VECS workaround fix
- Unwind failure on pinning the gen7 ppgtt
- GVT workload request allocation fix
Signed-off-by: NDaniel Vetter <daniel.vetter@ffwll.ch>
From: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/87pnt35z8h.fsf@intel.com
......@@ -1273,6 +1273,8 @@ static const struct dpcd_quirk dpcd_quirk_list[] = {
{ OUI(0x00, 0x22, 0xb9), DEVICE_ID_ANY, true, BIT(DP_DPCD_QUIRK_CONSTANT_N) },
/* LG LP140WF6-SPM1 eDP panel */
{ OUI(0x00, 0x22, 0xb9), DEVICE_ID('s', 'i', 'v', 'a', 'r', 'T'), false, BIT(DP_DPCD_QUIRK_CONSTANT_N) },
/* Apple panels need some additional handling to support PSR */
{ OUI(0x00, 0x10, 0xfa), DEVICE_ID_ANY, false, BIT(DP_DPCD_QUIRK_NO_PSR) }
};
#undef OUI
......
......@@ -356,6 +356,33 @@ static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
return 0;
}
static int
intel_gvt_workload_req_alloc(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_vgpu_submission *s = &vgpu->submission;
struct i915_gem_context *shadow_ctx = s->shadow_ctx;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct intel_engine_cs *engine = dev_priv->engine[workload->ring_id];
struct i915_request *rq;
int ret = 0;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
if (workload->req)
goto out;
rq = i915_request_alloc(engine, shadow_ctx);
if (IS_ERR(rq)) {
gvt_vgpu_err("fail to allocate gem request\n");
ret = PTR_ERR(rq);
goto out;
}
workload->req = i915_request_get(rq);
out:
return ret;
}
/**
* intel_gvt_scan_and_shadow_workload - audit the workload by scanning and
* shadow it as well, include ringbuffer,wa_ctx and ctx.
......@@ -372,12 +399,11 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct intel_engine_cs *engine = dev_priv->engine[workload->ring_id];
struct intel_context *ce;
struct i915_request *rq;
int ret;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
if (workload->req)
if (workload->shadow)
return 0;
ret = set_context_ppgtt_from_shadow(workload, shadow_ctx);
......@@ -417,22 +443,8 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
goto err_shadow;
}
rq = i915_request_alloc(engine, shadow_ctx);
if (IS_ERR(rq)) {
gvt_vgpu_err("fail to allocate gem request\n");
ret = PTR_ERR(rq);
goto err_shadow;
}
workload->req = i915_request_get(rq);
ret = populate_shadow_context(workload);
if (ret)
goto err_req;
workload->shadow = true;
return 0;
err_req:
rq = fetch_and_zero(&workload->req);
i915_request_put(rq);
err_shadow:
release_shadow_wa_ctx(&workload->wa_ctx);
err_unpin:
......@@ -671,23 +683,31 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
mutex_lock(&vgpu->vgpu_lock);
mutex_lock(&dev_priv->drm.struct_mutex);
ret = intel_gvt_workload_req_alloc(workload);
if (ret)
goto err_req;
ret = intel_gvt_scan_and_shadow_workload(workload);
if (ret)
goto out;
ret = prepare_workload(workload);
ret = populate_shadow_context(workload);
if (ret) {
release_shadow_wa_ctx(&workload->wa_ctx);
goto out;
}
ret = prepare_workload(workload);
out:
if (ret)
workload->status = ret;
if (!IS_ERR_OR_NULL(workload->req)) {
gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
ring_id, workload->req);
i915_request_add(workload->req);
workload->dispatched = true;
}
err_req:
if (ret)
workload->status = ret;
mutex_unlock(&dev_priv->drm.struct_mutex);
mutex_unlock(&vgpu->vgpu_lock);
return ret;
......
......@@ -83,6 +83,7 @@ struct intel_vgpu_workload {
struct i915_request *req;
/* if this workload has been dispatched to i915? */
bool dispatched;
bool shadow; /* if workload has done shadow of guest request */
int status;
struct intel_vgpu_mm *shadow_mm;
......
......@@ -984,8 +984,8 @@ static int i915_gpu_info_open(struct inode *inode, struct file *file)
intel_runtime_pm_get(i915);
gpu = i915_capture_gpu_state(i915);
intel_runtime_pm_put(i915);
if (!gpu)
return -ENOMEM;
if (IS_ERR(gpu))
return PTR_ERR(gpu);
file->private_data = gpu;
return 0;
......@@ -1018,7 +1018,13 @@ i915_error_state_write(struct file *filp,
static int i915_error_state_open(struct inode *inode, struct file *file)
{
file->private_data = i915_first_error_state(inode->i_private);
struct i915_gpu_state *error;
error = i915_first_error_state(inode->i_private);
if (IS_ERR(error))
return PTR_ERR(error);
file->private_data = error;
return 0;
}
......
......@@ -2075,6 +2075,7 @@ static struct i915_vma *pd_vma_create(struct gen6_hw_ppgtt *ppgtt, int size)
int gen6_ppgtt_pin(struct i915_hw_ppgtt *base)
{
struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
int err;
/*
* Workaround the limited maximum vma->pin_count and the aliasing_ppgtt
......@@ -2090,9 +2091,17 @@ int gen6_ppgtt_pin(struct i915_hw_ppgtt *base)
* allocator works in address space sizes, so it's multiplied by page
* size. We allocate at the top of the GTT to avoid fragmentation.
*/
return i915_vma_pin(ppgtt->vma,
0, GEN6_PD_ALIGN,
PIN_GLOBAL | PIN_HIGH);
err = i915_vma_pin(ppgtt->vma,
0, GEN6_PD_ALIGN,
PIN_GLOBAL | PIN_HIGH);
if (err)
goto unpin;
return 0;
unpin:
ppgtt->pin_count = 0;
return err;
}
void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base)
......
......@@ -1907,9 +1907,16 @@ i915_capture_gpu_state(struct drm_i915_private *i915)
{
struct i915_gpu_state *error;
/* Check if GPU capture has been disabled */
error = READ_ONCE(i915->gpu_error.first_error);
if (IS_ERR(error))
return error;
error = kzalloc(sizeof(*error), GFP_ATOMIC);
if (!error)
return NULL;
if (!error) {
i915_disable_error_state(i915, -ENOMEM);
return ERR_PTR(-ENOMEM);
}
kref_init(&error->ref);
error->i915 = i915;
......@@ -1945,11 +1952,8 @@ void i915_capture_error_state(struct drm_i915_private *i915,
return;
error = i915_capture_gpu_state(i915);
if (!error) {
DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
i915_disable_error_state(i915, -ENOMEM);
if (IS_ERR(error))
return;
}
i915_error_capture_msg(i915, error, engine_mask, error_msg);
DRM_INFO("%s\n", error->error_msg);
......@@ -1987,7 +1991,7 @@ i915_first_error_state(struct drm_i915_private *i915)
spin_lock_irq(&i915->gpu_error.lock);
error = i915->gpu_error.first_error;
if (error)
if (!IS_ERR_OR_NULL(error))
i915_gpu_state_get(error);
spin_unlock_irq(&i915->gpu_error.lock);
......@@ -2000,10 +2004,11 @@ void i915_reset_error_state(struct drm_i915_private *i915)
spin_lock_irq(&i915->gpu_error.lock);
error = i915->gpu_error.first_error;
i915->gpu_error.first_error = NULL;
if (error != ERR_PTR(-ENODEV)) /* if disabled, always disabled */
i915->gpu_error.first_error = NULL;
spin_unlock_irq(&i915->gpu_error.lock);
if (!IS_ERR(error))
if (!IS_ERR_OR_NULL(error))
i915_gpu_state_put(error);
}
......
......@@ -521,7 +521,9 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
ssize_t ret;
gpu = i915_first_error_state(i915);
if (gpu) {
if (IS_ERR(gpu)) {
ret = PTR_ERR(gpu);
} else if (gpu) {
ret = i915_gpu_state_copy_to_buffer(gpu, buf, off, count);
i915_gpu_state_put(gpu);
} else {
......
......@@ -2244,6 +2244,8 @@ static int logical_ring_init(struct intel_engine_cs *engine)
if (ret)
return ret;
intel_engine_init_workarounds(engine);
if (HAS_LOGICAL_RING_ELSQ(i915)) {
execlists->submit_reg = i915->regs +
i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(engine));
......@@ -2310,7 +2312,6 @@ int logical_render_ring_init(struct intel_engine_cs *engine)
}
intel_engine_init_whitelist(engine);
intel_engine_init_workarounds(engine);
return 0;
}
......
......@@ -274,10 +274,16 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
DRM_DEBUG_KMS("eDP panel supports PSR version %x\n",
intel_dp->psr_dpcd[0]);
if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
DRM_DEBUG_KMS("PSR support not currently available for this panel\n");
return;
}
if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
DRM_DEBUG_KMS("Panel lacks power state control, PSR cannot be enabled\n");
return;
}
dev_priv->psr.sink_support = true;
dev_priv->psr.sink_sync_latency =
intel_dp_get_sink_sync_latency(intel_dp);
......
......@@ -1365,6 +1365,13 @@ enum drm_dp_quirk {
* to 16 bits. So will give a constant value (0x8000) for compatability.
*/
DP_DPCD_QUIRK_CONSTANT_N,
/**
* @DP_DPCD_QUIRK_NO_PSR:
*
* The device does not support PSR even if reports that it supports or
* driver still need to implement proper handling for such device.
*/
DP_DPCD_QUIRK_NO_PSR,
};
/**
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册