提交 d92116a0 编写于 作者: L Linus Torvalds

Merge tag 'drm-fixes-for-v4.14-rc6' of git://people.freedesktop.org/~airlied/linux

Pull drm fixes from Dave Airlie:
 "Standard fixes pull for rc6: one regression fix for amdgpu, a bunch of
  nouveau fixes that I'd missed a pull req for from Ben last week, some
  exynos regression fixes, and a few fixes for i915"

* tag 'drm-fixes-for-v4.14-rc6' of git://people.freedesktop.org/~airlied/linux:
  drm/nouveau/fbcon: fix oops without fbdev emulation
  Revert "drm/amdgpu: discard commands of killed processes"
  drm/i915: Use a mask when applying WaProgramL3SqcReg1Default
  drm/i915: Report -EFAULT before pwrite fast path into shmemfs
  drm/i915/cnl: Fix PLL initialization for HDMI.
  drm/i915/cnl: Fix PLL mapping.
  drm/i915: Use bdw_ddi_translations_fdi for Broadwell
  drm/i915: Fix eviction when the GGTT is idle but full
  drm/i915/gvt: Fix GPU hang after reusing vGPU instance across different guest OS
  drm/exynos: Clear drvdata after component unbind
  drm/exynos: Fix potential NULL pointer dereference in suspend/resume paths
  drm/nouveau/kms/nv50: fix oops during DP IRQ handling on non-MST boards
  drm/nouveau/bsp/g92: disable by default
  drm/nouveau/mmu: flush tlbs before deleting page tables
...@@ -205,32 +205,17 @@ void amd_sched_entity_fini(struct amd_gpu_scheduler *sched, ...@@ -205,32 +205,17 @@ void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
struct amd_sched_entity *entity) struct amd_sched_entity *entity)
{ {
struct amd_sched_rq *rq = entity->rq; struct amd_sched_rq *rq = entity->rq;
int r;
if (!amd_sched_entity_is_initialized(sched, entity)) if (!amd_sched_entity_is_initialized(sched, entity))
return; return;
/** /**
* The client will not queue more IBs during this fini, consume existing * The client will not queue more IBs during this fini, consume existing
* queued IBs or discard them on SIGKILL * queued IBs
*/ */
if ((current->flags & PF_SIGNALED) && current->exit_code == SIGKILL) wait_event(sched->job_scheduled, amd_sched_entity_is_idle(entity));
r = -ERESTARTSYS;
else
r = wait_event_killable(sched->job_scheduled,
amd_sched_entity_is_idle(entity));
amd_sched_rq_remove_entity(rq, entity);
if (r) {
struct amd_sched_job *job;
/* Park the kernel for a moment to make sure it isn't processing amd_sched_rq_remove_entity(rq, entity);
* our enity.
*/
kthread_park(sched->thread);
kthread_unpark(sched->thread);
while (kfifo_out(&entity->job_queue, &job, sizeof(job)))
sched->ops->free_job(job);
}
kfifo_free(&entity->job_queue); kfifo_free(&entity->job_queue);
} }
......
...@@ -168,11 +168,13 @@ static struct drm_driver exynos_drm_driver = { ...@@ -168,11 +168,13 @@ static struct drm_driver exynos_drm_driver = {
static int exynos_drm_suspend(struct device *dev) static int exynos_drm_suspend(struct device *dev)
{ {
struct drm_device *drm_dev = dev_get_drvdata(dev); struct drm_device *drm_dev = dev_get_drvdata(dev);
struct exynos_drm_private *private = drm_dev->dev_private; struct exynos_drm_private *private;
if (pm_runtime_suspended(dev) || !drm_dev) if (pm_runtime_suspended(dev) || !drm_dev)
return 0; return 0;
private = drm_dev->dev_private;
drm_kms_helper_poll_disable(drm_dev); drm_kms_helper_poll_disable(drm_dev);
exynos_drm_fbdev_suspend(drm_dev); exynos_drm_fbdev_suspend(drm_dev);
private->suspend_state = drm_atomic_helper_suspend(drm_dev); private->suspend_state = drm_atomic_helper_suspend(drm_dev);
...@@ -188,11 +190,12 @@ static int exynos_drm_suspend(struct device *dev) ...@@ -188,11 +190,12 @@ static int exynos_drm_suspend(struct device *dev)
static int exynos_drm_resume(struct device *dev) static int exynos_drm_resume(struct device *dev)
{ {
struct drm_device *drm_dev = dev_get_drvdata(dev); struct drm_device *drm_dev = dev_get_drvdata(dev);
struct exynos_drm_private *private = drm_dev->dev_private; struct exynos_drm_private *private;
if (pm_runtime_suspended(dev) || !drm_dev) if (pm_runtime_suspended(dev) || !drm_dev)
return 0; return 0;
private = drm_dev->dev_private;
drm_atomic_helper_resume(drm_dev, private->suspend_state); drm_atomic_helper_resume(drm_dev, private->suspend_state);
exynos_drm_fbdev_resume(drm_dev); exynos_drm_fbdev_resume(drm_dev);
drm_kms_helper_poll_enable(drm_dev); drm_kms_helper_poll_enable(drm_dev);
...@@ -427,6 +430,7 @@ static void exynos_drm_unbind(struct device *dev) ...@@ -427,6 +430,7 @@ static void exynos_drm_unbind(struct device *dev)
kfree(drm->dev_private); kfree(drm->dev_private);
drm->dev_private = NULL; drm->dev_private = NULL;
dev_set_drvdata(dev, NULL);
drm_dev_unref(drm); drm_dev_unref(drm);
} }
......
...@@ -308,20 +308,8 @@ static int tbs_sched_init_vgpu(struct intel_vgpu *vgpu) ...@@ -308,20 +308,8 @@ static int tbs_sched_init_vgpu(struct intel_vgpu *vgpu)
static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu) static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu)
{ {
struct intel_gvt_workload_scheduler *scheduler = &vgpu->gvt->scheduler;
int ring_id;
kfree(vgpu->sched_data); kfree(vgpu->sched_data);
vgpu->sched_data = NULL; vgpu->sched_data = NULL;
spin_lock_bh(&scheduler->mmio_context_lock);
for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) {
if (scheduler->engine_owner[ring_id] == vgpu) {
intel_gvt_switch_mmio(vgpu, NULL, ring_id);
scheduler->engine_owner[ring_id] = NULL;
}
}
spin_unlock_bh(&scheduler->mmio_context_lock);
} }
static void tbs_sched_start_schedule(struct intel_vgpu *vgpu) static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
...@@ -388,6 +376,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu) ...@@ -388,6 +376,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
{ {
struct intel_gvt_workload_scheduler *scheduler = struct intel_gvt_workload_scheduler *scheduler =
&vgpu->gvt->scheduler; &vgpu->gvt->scheduler;
int ring_id;
gvt_dbg_core("vgpu%d: stop schedule\n", vgpu->id); gvt_dbg_core("vgpu%d: stop schedule\n", vgpu->id);
...@@ -401,4 +390,13 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu) ...@@ -401,4 +390,13 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
scheduler->need_reschedule = true; scheduler->need_reschedule = true;
scheduler->current_vgpu = NULL; scheduler->current_vgpu = NULL;
} }
spin_lock_bh(&scheduler->mmio_context_lock);
for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) {
if (scheduler->engine_owner[ring_id] == vgpu) {
intel_gvt_switch_mmio(vgpu, NULL, ring_id);
scheduler->engine_owner[ring_id] = NULL;
}
}
spin_unlock_bh(&scheduler->mmio_context_lock);
} }
...@@ -2657,6 +2657,9 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj, ...@@ -2657,6 +2657,9 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
if (READ_ONCE(obj->mm.pages)) if (READ_ONCE(obj->mm.pages))
return -ENODEV; return -ENODEV;
if (obj->mm.madv != I915_MADV_WILLNEED)
return -EFAULT;
/* Before the pages are instantiated the object is treated as being /* Before the pages are instantiated the object is treated as being
* in the CPU domain. The pages will be clflushed as required before * in the CPU domain. The pages will be clflushed as required before
* use, and we can freely write into the pages directly. If userspace * use, and we can freely write into the pages directly. If userspace
......
...@@ -33,17 +33,16 @@ ...@@ -33,17 +33,16 @@
#include "intel_drv.h" #include "intel_drv.h"
#include "i915_trace.h" #include "i915_trace.h"
static bool ggtt_is_idle(struct drm_i915_private *dev_priv) static bool ggtt_is_idle(struct drm_i915_private *i915)
{ {
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
enum intel_engine_id id; enum intel_engine_id id;
for_each_engine(engine, dev_priv, id) { if (i915->gt.active_requests)
struct intel_timeline *tl; return false;
tl = &ggtt->base.timeline.engine[engine->id]; for_each_engine(engine, i915, id) {
if (i915_gem_active_isset(&tl->last_request)) if (engine->last_retired_context != i915->kernel_context)
return false; return false;
} }
...@@ -157,7 +156,8 @@ i915_gem_evict_something(struct i915_address_space *vm, ...@@ -157,7 +156,8 @@ i915_gem_evict_something(struct i915_address_space *vm,
min_size, alignment, cache_level, min_size, alignment, cache_level,
start, end, mode); start, end, mode);
/* Retire before we search the active list. Although we have /*
* Retire before we search the active list. Although we have
* reasonable accuracy in our retirement lists, we may have * reasonable accuracy in our retirement lists, we may have
* a stray pin (preventing eviction) that can only be resolved by * a stray pin (preventing eviction) that can only be resolved by
* retiring. * retiring.
...@@ -182,7 +182,8 @@ i915_gem_evict_something(struct i915_address_space *vm, ...@@ -182,7 +182,8 @@ i915_gem_evict_something(struct i915_address_space *vm,
BUG_ON(ret); BUG_ON(ret);
} }
/* Can we unpin some objects such as idle hw contents, /*
* Can we unpin some objects such as idle hw contents,
* or pending flips? But since only the GGTT has global entries * or pending flips? But since only the GGTT has global entries
* such as scanouts, rinbuffers and contexts, we can skip the * such as scanouts, rinbuffers and contexts, we can skip the
* purge when inspecting per-process local address spaces. * purge when inspecting per-process local address spaces.
...@@ -190,19 +191,33 @@ i915_gem_evict_something(struct i915_address_space *vm, ...@@ -190,19 +191,33 @@ i915_gem_evict_something(struct i915_address_space *vm,
if (!i915_is_ggtt(vm) || flags & PIN_NONBLOCK) if (!i915_is_ggtt(vm) || flags & PIN_NONBLOCK)
return -ENOSPC; return -ENOSPC;
if (ggtt_is_idle(dev_priv)) { /*
/* If we still have pending pageflip completions, drop * Not everything in the GGTT is tracked via VMA using
* back to userspace to give our workqueues time to * i915_vma_move_to_active(), otherwise we could evict as required
* acquire our locks and unpin the old scanouts. * with minimal stalling. Instead we are forced to idle the GPU and
* explicitly retire outstanding requests which will then remove
* the pinning for active objects such as contexts and ring,
* enabling us to evict them on the next iteration.
*
* To ensure that all user contexts are evictable, we perform
* a switch to the perma-pinned kernel context. This all also gives
* us a termination condition, when the last retired context is
* the kernel's there is no more we can evict.
*/ */
return intel_has_pending_fb_unpin(dev_priv) ? -EAGAIN : -ENOSPC; if (!ggtt_is_idle(dev_priv)) {
}
ret = ggtt_flush(dev_priv); ret = ggtt_flush(dev_priv);
if (ret) if (ret)
return ret; return ret;
goto search_again; goto search_again;
}
/*
* If we still have pending pageflip completions, drop
* back to userspace to give our workqueues time to
* acquire our locks and unpin the old scanouts.
*/
return intel_has_pending_fb_unpin(dev_priv) ? -EAGAIN : -ENOSPC;
found: found:
/* drm_mm doesn't allow any other other operations while /* drm_mm doesn't allow any other other operations while
......
...@@ -6998,6 +6998,7 @@ enum { ...@@ -6998,6 +6998,7 @@ enum {
*/ */
#define L3_GENERAL_PRIO_CREDITS(x) (((x) >> 1) << 19) #define L3_GENERAL_PRIO_CREDITS(x) (((x) >> 1) << 19)
#define L3_HIGH_PRIO_CREDITS(x) (((x) >> 1) << 14) #define L3_HIGH_PRIO_CREDITS(x) (((x) >> 1) << 14)
#define L3_PRIO_CREDITS_MASK ((0x1f << 19) | (0x1f << 14))
#define GEN7_L3CNTLREG1 _MMIO(0xB01C) #define GEN7_L3CNTLREG1 _MMIO(0xB01C)
#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C #define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C
......
...@@ -664,8 +664,8 @@ intel_ddi_get_buf_trans_fdi(struct drm_i915_private *dev_priv, ...@@ -664,8 +664,8 @@ intel_ddi_get_buf_trans_fdi(struct drm_i915_private *dev_priv,
int *n_entries) int *n_entries)
{ {
if (IS_BROADWELL(dev_priv)) { if (IS_BROADWELL(dev_priv)) {
*n_entries = ARRAY_SIZE(hsw_ddi_translations_fdi); *n_entries = ARRAY_SIZE(bdw_ddi_translations_fdi);
return hsw_ddi_translations_fdi; return bdw_ddi_translations_fdi;
} else if (IS_HASWELL(dev_priv)) { } else if (IS_HASWELL(dev_priv)) {
*n_entries = ARRAY_SIZE(hsw_ddi_translations_fdi); *n_entries = ARRAY_SIZE(hsw_ddi_translations_fdi);
return hsw_ddi_translations_fdi; return hsw_ddi_translations_fdi;
...@@ -2102,8 +2102,7 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder, ...@@ -2102,8 +2102,7 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
* register writes. * register writes.
*/ */
val = I915_READ(DPCLKA_CFGCR0); val = I915_READ(DPCLKA_CFGCR0);
val &= ~(DPCLKA_CFGCR0_DDI_CLK_OFF(port) | val &= ~DPCLKA_CFGCR0_DDI_CLK_OFF(port);
DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port));
I915_WRITE(DPCLKA_CFGCR0, val); I915_WRITE(DPCLKA_CFGCR0, val);
} else if (IS_GEN9_BC(dev_priv)) { } else if (IS_GEN9_BC(dev_priv)) {
/* DDI -> PLL mapping */ /* DDI -> PLL mapping */
......
...@@ -1996,7 +1996,7 @@ static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv, ...@@ -1996,7 +1996,7 @@ static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
/* 3. Configure DPLL_CFGCR0 */ /* 3. Configure DPLL_CFGCR0 */
/* Avoid touch CFGCR1 if HDMI mode is not enabled */ /* Avoid touch CFGCR1 if HDMI mode is not enabled */
if (pll->state.hw_state.cfgcr0 & DPLL_CTRL1_HDMI_MODE(pll->id)) { if (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_HDMI_MODE) {
val = pll->state.hw_state.cfgcr1; val = pll->state.hw_state.cfgcr1;
I915_WRITE(CNL_DPLL_CFGCR1(pll->id), val); I915_WRITE(CNL_DPLL_CFGCR1(pll->id), val);
/* 4. Reab back to ensure writes completed */ /* 4. Reab back to ensure writes completed */
......
...@@ -1048,9 +1048,12 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine) ...@@ -1048,9 +1048,12 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine)
} }
/* WaProgramL3SqcReg1DefaultForPerf:bxt */ /* WaProgramL3SqcReg1DefaultForPerf:bxt */
if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) {
I915_WRITE(GEN8_L3SQCREG1, L3_GENERAL_PRIO_CREDITS(62) | u32 val = I915_READ(GEN8_L3SQCREG1);
L3_HIGH_PRIO_CREDITS(2)); val &= ~L3_PRIO_CREDITS_MASK;
val |= L3_GENERAL_PRIO_CREDITS(62) | L3_HIGH_PRIO_CREDITS(2);
I915_WRITE(GEN8_L3SQCREG1, val);
}
/* WaToEnableHwFixForPushConstHWBug:bxt */ /* WaToEnableHwFixForPushConstHWBug:bxt */
if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER)) if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
......
...@@ -8245,14 +8245,17 @@ static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv, ...@@ -8245,14 +8245,17 @@ static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv,
int high_prio_credits) int high_prio_credits)
{ {
u32 misccpctl; u32 misccpctl;
u32 val;
/* WaTempDisableDOPClkGating:bdw */ /* WaTempDisableDOPClkGating:bdw */
misccpctl = I915_READ(GEN7_MISCCPCTL); misccpctl = I915_READ(GEN7_MISCCPCTL);
I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
I915_WRITE(GEN8_L3SQCREG1, val = I915_READ(GEN8_L3SQCREG1);
L3_GENERAL_PRIO_CREDITS(general_prio_credits) | val &= ~L3_PRIO_CREDITS_MASK;
L3_HIGH_PRIO_CREDITS(high_prio_credits)); val |= L3_GENERAL_PRIO_CREDITS(general_prio_credits);
val |= L3_HIGH_PRIO_CREDITS(high_prio_credits);
I915_WRITE(GEN8_L3SQCREG1, val);
/* /*
* Wait at least 100 clocks before re-enabling clock gating. * Wait at least 100 clocks before re-enabling clock gating.
......
...@@ -223,7 +223,7 @@ void ...@@ -223,7 +223,7 @@ void
nouveau_fbcon_accel_save_disable(struct drm_device *dev) nouveau_fbcon_accel_save_disable(struct drm_device *dev)
{ {
struct nouveau_drm *drm = nouveau_drm(dev); struct nouveau_drm *drm = nouveau_drm(dev);
if (drm->fbcon) { if (drm->fbcon && drm->fbcon->helper.fbdev) {
drm->fbcon->saved_flags = drm->fbcon->helper.fbdev->flags; drm->fbcon->saved_flags = drm->fbcon->helper.fbdev->flags;
drm->fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED; drm->fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED;
} }
...@@ -233,7 +233,7 @@ void ...@@ -233,7 +233,7 @@ void
nouveau_fbcon_accel_restore(struct drm_device *dev) nouveau_fbcon_accel_restore(struct drm_device *dev)
{ {
struct nouveau_drm *drm = nouveau_drm(dev); struct nouveau_drm *drm = nouveau_drm(dev);
if (drm->fbcon) { if (drm->fbcon && drm->fbcon->helper.fbdev) {
drm->fbcon->helper.fbdev->flags = drm->fbcon->saved_flags; drm->fbcon->helper.fbdev->flags = drm->fbcon->saved_flags;
} }
} }
...@@ -245,6 +245,7 @@ nouveau_fbcon_accel_fini(struct drm_device *dev) ...@@ -245,6 +245,7 @@ nouveau_fbcon_accel_fini(struct drm_device *dev)
struct nouveau_fbdev *fbcon = drm->fbcon; struct nouveau_fbdev *fbcon = drm->fbcon;
if (fbcon && drm->channel) { if (fbcon && drm->channel) {
console_lock(); console_lock();
if (fbcon->helper.fbdev)
fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED; fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED;
console_unlock(); console_unlock();
nouveau_channel_idle(drm->channel); nouveau_channel_idle(drm->channel);
......
...@@ -3265,11 +3265,14 @@ nv50_mstm = { ...@@ -3265,11 +3265,14 @@ nv50_mstm = {
void void
nv50_mstm_service(struct nv50_mstm *mstm) nv50_mstm_service(struct nv50_mstm *mstm)
{ {
struct drm_dp_aux *aux = mstm->mgr.aux; struct drm_dp_aux *aux = mstm ? mstm->mgr.aux : NULL;
bool handled = true; bool handled = true;
int ret; int ret;
u8 esi[8] = {}; u8 esi[8] = {};
if (!aux)
return;
while (handled) { while (handled) {
ret = drm_dp_dpcd_read(aux, DP_SINK_COUNT_ESI, esi, 8); ret = drm_dp_dpcd_read(aux, DP_SINK_COUNT_ESI, esi, 8);
if (ret != 8) { if (ret != 8) {
......
...@@ -39,5 +39,5 @@ int ...@@ -39,5 +39,5 @@ int
g84_bsp_new(struct nvkm_device *device, int index, struct nvkm_engine **pengine) g84_bsp_new(struct nvkm_device *device, int index, struct nvkm_engine **pengine)
{ {
return nvkm_xtensa_new_(&g84_bsp, device, index, return nvkm_xtensa_new_(&g84_bsp, device, index,
true, 0x103000, pengine); device->chipset != 0x92, 0x103000, pengine);
} }
...@@ -241,6 +241,8 @@ nvkm_vm_unmap_pgt(struct nvkm_vm *vm, int big, u32 fpde, u32 lpde) ...@@ -241,6 +241,8 @@ nvkm_vm_unmap_pgt(struct nvkm_vm *vm, int big, u32 fpde, u32 lpde)
mmu->func->map_pgt(vpgd->obj, pde, vpgt->mem); mmu->func->map_pgt(vpgd->obj, pde, vpgt->mem);
} }
mmu->func->flush(vm);
nvkm_memory_del(&pgt); nvkm_memory_del(&pgt);
} }
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册