提交 9c870d03 编写于 作者: C Chris Wilson

drm/i915: Use RPM as the barrier for controlling user mmap access

We can remove the false coupling between RPM and struct mutex by the
observation that we can use the RPM wakeref as the barrier around user
mmap access. That is as we tear down the user's PTE atomically from
within rpm suspend and then to fault in new PTE requires the rpm
wakeref, means that no user access is possible through those PTE without
RPM being awake. Having made that observation, we can then remove the
presumption of having to take rpm outside of struct_mutex and so allow
fine grained acquisition of a wakeref around hw access rather than
having to remember to acquire the wakeref early on.

v2: Rejig placement of the new intel_runtime_pm_get() to be as tight
as possible around the GTT pread/pwrite.
Signed-off-by: NChris Wilson <chris@chris-wilson.co.uk>
Cc: Imre Deak <imre.deak@intel.com>
Cc: Daniel Vetter <daniel@ffwll.ch>
Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
Reviewed-by: NDaniel Vetter <daniel@ffwll.ch>
Link: http://patchwork.freedesktop.org/patch/msgid/20161024124218.18252-2-chris@chris-wilson.co.uk
上级 275f039d
...@@ -743,17 +743,32 @@ static int i915_interrupt_info(struct seq_file *m, void *data) ...@@ -743,17 +743,32 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
I915_READ(VLV_IIR_RW)); I915_READ(VLV_IIR_RW));
seq_printf(m, "Display IMR:\t%08x\n", seq_printf(m, "Display IMR:\t%08x\n",
I915_READ(VLV_IMR)); I915_READ(VLV_IMR));
for_each_pipe(dev_priv, pipe) for_each_pipe(dev_priv, pipe) {
enum intel_display_power_domain power_domain;
power_domain = POWER_DOMAIN_PIPE(pipe);
if (!intel_display_power_get_if_enabled(dev_priv,
power_domain)) {
seq_printf(m, "Pipe %c power disabled\n",
pipe_name(pipe));
continue;
}
seq_printf(m, "Pipe %c stat:\t%08x\n", seq_printf(m, "Pipe %c stat:\t%08x\n",
pipe_name(pipe), pipe_name(pipe),
I915_READ(PIPESTAT(pipe))); I915_READ(PIPESTAT(pipe)));
intel_display_power_put(dev_priv, power_domain);
}
intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
seq_printf(m, "Port hotplug:\t%08x\n", seq_printf(m, "Port hotplug:\t%08x\n",
I915_READ(PORT_HOTPLUG_EN)); I915_READ(PORT_HOTPLUG_EN));
seq_printf(m, "DPFLIPSTAT:\t%08x\n", seq_printf(m, "DPFLIPSTAT:\t%08x\n",
I915_READ(VLV_DPFLIPSTAT)); I915_READ(VLV_DPFLIPSTAT));
seq_printf(m, "DPINVGTT:\t%08x\n", seq_printf(m, "DPINVGTT:\t%08x\n",
I915_READ(DPINVGTT)); I915_READ(DPINVGTT));
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
for (i = 0; i < 4; i++) { for (i = 0; i < 4; i++) {
seq_printf(m, "GT Interrupt IMR %d:\t%08x\n", seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
...@@ -1396,14 +1411,9 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused) ...@@ -1396,14 +1411,9 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
static int ironlake_drpc_info(struct seq_file *m) static int ironlake_drpc_info(struct seq_file *m)
{ {
struct drm_i915_private *dev_priv = node_to_i915(m->private); struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct drm_device *dev = &dev_priv->drm;
u32 rgvmodectl, rstdbyctl; u32 rgvmodectl, rstdbyctl;
u16 crstandvid; u16 crstandvid;
int ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
intel_runtime_pm_get(dev_priv); intel_runtime_pm_get(dev_priv);
rgvmodectl = I915_READ(MEMMODECTL); rgvmodectl = I915_READ(MEMMODECTL);
...@@ -1411,7 +1421,6 @@ static int ironlake_drpc_info(struct seq_file *m) ...@@ -1411,7 +1421,6 @@ static int ironlake_drpc_info(struct seq_file *m)
crstandvid = I915_READ16(CRSTANDVID); crstandvid = I915_READ16(CRSTANDVID);
intel_runtime_pm_put(dev_priv); intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN)); seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
seq_printf(m, "Boost freq: %d\n", seq_printf(m, "Boost freq: %d\n",
...@@ -1757,6 +1766,7 @@ static int i915_sr_status(struct seq_file *m, void *unused) ...@@ -1757,6 +1766,7 @@ static int i915_sr_status(struct seq_file *m, void *unused)
bool sr_enabled = false; bool sr_enabled = false;
intel_runtime_pm_get(dev_priv); intel_runtime_pm_get(dev_priv);
intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
if (HAS_PCH_SPLIT(dev_priv)) if (HAS_PCH_SPLIT(dev_priv))
sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN; sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
...@@ -1770,6 +1780,7 @@ static int i915_sr_status(struct seq_file *m, void *unused) ...@@ -1770,6 +1780,7 @@ static int i915_sr_status(struct seq_file *m, void *unused)
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN; sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
intel_runtime_pm_put(dev_priv); intel_runtime_pm_put(dev_priv);
seq_printf(m, "self-refresh: %s\n", seq_printf(m, "self-refresh: %s\n",
...@@ -2091,12 +2102,7 @@ static const char *swizzle_string(unsigned swizzle) ...@@ -2091,12 +2102,7 @@ static const char *swizzle_string(unsigned swizzle)
static int i915_swizzle_info(struct seq_file *m, void *data) static int i915_swizzle_info(struct seq_file *m, void *data)
{ {
struct drm_i915_private *dev_priv = node_to_i915(m->private); struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct drm_device *dev = &dev_priv->drm;
int ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
intel_runtime_pm_get(dev_priv); intel_runtime_pm_get(dev_priv);
seq_printf(m, "bit6 swizzle for X-tiling = %s\n", seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
...@@ -2136,7 +2142,6 @@ static int i915_swizzle_info(struct seq_file *m, void *data) ...@@ -2136,7 +2142,6 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
seq_puts(m, "L-shaped memory detected\n"); seq_puts(m, "L-shaped memory detected\n");
intel_runtime_pm_put(dev_priv); intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
return 0; return 0;
} }
...@@ -2542,11 +2547,22 @@ static int i915_edp_psr_status(struct seq_file *m, void *data) ...@@ -2542,11 +2547,22 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE; enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
else { else {
for_each_pipe(dev_priv, pipe) { for_each_pipe(dev_priv, pipe) {
enum transcoder cpu_transcoder =
intel_pipe_to_cpu_transcoder(dev_priv, pipe);
enum intel_display_power_domain power_domain;
power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
if (!intel_display_power_get_if_enabled(dev_priv,
power_domain))
continue;
stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) & stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) &
VLV_EDP_PSR_CURR_STATE_MASK; VLV_EDP_PSR_CURR_STATE_MASK;
if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) || if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
(stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE)) (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
enabled = true; enabled = true;
intel_display_power_put(dev_priv, power_domain);
} }
} }
...@@ -3094,6 +3110,8 @@ static int i915_engine_info(struct seq_file *m, void *unused) ...@@ -3094,6 +3110,8 @@ static int i915_engine_info(struct seq_file *m, void *unused)
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
enum intel_engine_id id; enum intel_engine_id id;
intel_runtime_pm_get(dev_priv);
for_each_engine(engine, dev_priv, id) { for_each_engine(engine, dev_priv, id) {
struct intel_breadcrumbs *b = &engine->breadcrumbs; struct intel_breadcrumbs *b = &engine->breadcrumbs;
struct drm_i915_gem_request *rq; struct drm_i915_gem_request *rq;
...@@ -3213,6 +3231,8 @@ static int i915_engine_info(struct seq_file *m, void *unused) ...@@ -3213,6 +3231,8 @@ static int i915_engine_info(struct seq_file *m, void *unused)
seq_puts(m, "\n"); seq_puts(m, "\n");
} }
intel_runtime_pm_put(dev_priv);
return 0; return 0;
} }
...@@ -4799,13 +4819,9 @@ i915_wedged_set(void *data, u64 val) ...@@ -4799,13 +4819,9 @@ i915_wedged_set(void *data, u64 val)
if (i915_reset_in_progress(&dev_priv->gpu_error)) if (i915_reset_in_progress(&dev_priv->gpu_error))
return -EAGAIN; return -EAGAIN;
intel_runtime_pm_get(dev_priv);
i915_handle_error(dev_priv, val, i915_handle_error(dev_priv, val,
"Manually setting wedged to %llu", val); "Manually setting wedged to %llu", val);
intel_runtime_pm_put(dev_priv);
return 0; return 0;
} }
...@@ -5040,22 +5056,16 @@ static int ...@@ -5040,22 +5056,16 @@ static int
i915_cache_sharing_get(void *data, u64 *val) i915_cache_sharing_get(void *data, u64 *val)
{ {
struct drm_i915_private *dev_priv = data; struct drm_i915_private *dev_priv = data;
struct drm_device *dev = &dev_priv->drm;
u32 snpcr; u32 snpcr;
int ret;
if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv))) if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
return -ENODEV; return -ENODEV;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
intel_runtime_pm_get(dev_priv); intel_runtime_pm_get(dev_priv);
snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
intel_runtime_pm_put(dev_priv); intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
*val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT; *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
......
...@@ -2301,24 +2301,6 @@ static int intel_runtime_suspend(struct device *kdev) ...@@ -2301,24 +2301,6 @@ static int intel_runtime_suspend(struct device *kdev)
DRM_DEBUG_KMS("Suspending device\n"); DRM_DEBUG_KMS("Suspending device\n");
/*
* We could deadlock here in case another thread holding struct_mutex
* calls RPM suspend concurrently, since the RPM suspend will wait
* first for this RPM suspend to finish. In this case the concurrent
* RPM resume will be followed by its RPM suspend counterpart. Still
* for consistency return -EAGAIN, which will reschedule this suspend.
*/
if (!mutex_trylock(&dev->struct_mutex)) {
DRM_DEBUG_KMS("device lock contention, deffering suspend\n");
/*
* Bump the expiration timestamp, otherwise the suspend won't
* be rescheduled.
*/
pm_runtime_mark_last_busy(kdev);
return -EAGAIN;
}
disable_rpm_wakeref_asserts(dev_priv); disable_rpm_wakeref_asserts(dev_priv);
/* /*
...@@ -2326,7 +2308,6 @@ static int intel_runtime_suspend(struct device *kdev) ...@@ -2326,7 +2308,6 @@ static int intel_runtime_suspend(struct device *kdev)
* an RPM reference. * an RPM reference.
*/ */
i915_gem_release_all_mmaps(dev_priv); i915_gem_release_all_mmaps(dev_priv);
mutex_unlock(&dev->struct_mutex);
intel_guc_suspend(dev); intel_guc_suspend(dev);
......
...@@ -826,6 +826,7 @@ i915_gem_gtt_pread(struct drm_device *dev, ...@@ -826,6 +826,7 @@ i915_gem_gtt_pread(struct drm_device *dev,
uint64_t offset; uint64_t offset;
int ret; int ret;
intel_runtime_pm_get(to_i915(dev));
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE); vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
if (!IS_ERR(vma)) { if (!IS_ERR(vma)) {
node.start = i915_ggtt_offset(vma); node.start = i915_ggtt_offset(vma);
...@@ -926,6 +927,7 @@ i915_gem_gtt_pread(struct drm_device *dev, ...@@ -926,6 +927,7 @@ i915_gem_gtt_pread(struct drm_device *dev,
i915_vma_unpin(vma); i915_vma_unpin(vma);
} }
out: out:
intel_runtime_pm_put(to_i915(dev));
return ret; return ret;
} }
...@@ -1060,12 +1062,9 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, ...@@ -1060,12 +1062,9 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
ret = i915_gem_shmem_pread(dev, obj, args, file); ret = i915_gem_shmem_pread(dev, obj, args, file);
/* pread for non shmem backed objects */ /* pread for non shmem backed objects */
if (ret == -EFAULT || ret == -ENODEV) { if (ret == -EFAULT || ret == -ENODEV)
intel_runtime_pm_get(to_i915(dev));
ret = i915_gem_gtt_pread(dev, obj, args->size, ret = i915_gem_gtt_pread(dev, obj, args->size,
args->offset, args->data_ptr); args->offset, args->data_ptr);
intel_runtime_pm_put(to_i915(dev));
}
i915_gem_object_put(obj); i915_gem_object_put(obj);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
...@@ -1126,6 +1125,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915, ...@@ -1126,6 +1125,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
if (i915_gem_object_is_tiled(obj)) if (i915_gem_object_is_tiled(obj))
return -EFAULT; return -EFAULT;
intel_runtime_pm_get(i915);
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
PIN_MAPPABLE | PIN_NONBLOCK); PIN_MAPPABLE | PIN_NONBLOCK);
if (!IS_ERR(vma)) { if (!IS_ERR(vma)) {
...@@ -1234,6 +1234,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915, ...@@ -1234,6 +1234,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
i915_vma_unpin(vma); i915_vma_unpin(vma);
} }
out: out:
intel_runtime_pm_put(i915);
return ret; return ret;
} }
...@@ -1466,12 +1467,12 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, ...@@ -1466,12 +1467,12 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
* perspective, requiring manual detiling by the client. * perspective, requiring manual detiling by the client.
*/ */
if (!i915_gem_object_has_struct_page(obj) || if (!i915_gem_object_has_struct_page(obj) ||
cpu_write_needs_clflush(obj)) { cpu_write_needs_clflush(obj))
ret = i915_gem_gtt_pwrite_fast(dev_priv, obj, args, file);
/* Note that the gtt paths might fail with non-page-backed user /* Note that the gtt paths might fail with non-page-backed user
* pointers (e.g. gtt mappings when moving data between * pointers (e.g. gtt mappings when moving data between
* textures). Fallback to the shmem path in that case. */ * textures). Fallback to the shmem path in that case.
} */
ret = i915_gem_gtt_pwrite_fast(dev_priv, obj, args, file);
if (ret == -EFAULT || ret == -ENOSPC) { if (ret == -EFAULT || ret == -ENOSPC) {
if (obj->phys_handle) if (obj->phys_handle)
...@@ -1840,6 +1841,7 @@ int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf) ...@@ -1840,6 +1841,7 @@ int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf)
goto err_unpin; goto err_unpin;
/* Mark as being mmapped into userspace for later revocation */ /* Mark as being mmapped into userspace for later revocation */
assert_rpm_wakelock_held(dev_priv);
spin_lock(&dev_priv->mm.userfault_lock); spin_lock(&dev_priv->mm.userfault_lock);
if (list_empty(&obj->userfault_link)) if (list_empty(&obj->userfault_link))
list_add(&obj->userfault_link, &dev_priv->mm.userfault_list); list_add(&obj->userfault_link, &dev_priv->mm.userfault_list);
...@@ -1925,8 +1927,13 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj) ...@@ -1925,8 +1927,13 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
/* Serialisation between user GTT access and our code depends upon /* Serialisation between user GTT access and our code depends upon
* revoking the CPU's PTE whilst the mutex is held. The next user * revoking the CPU's PTE whilst the mutex is held. The next user
* pagefault then has to wait until we release the mutex. * pagefault then has to wait until we release the mutex.
*
* Note that RPM complicates somewhat by adding an additional
* requirement that operations to the GGTT be made holding the RPM
* wakeref.
*/ */
lockdep_assert_held(&i915->drm.struct_mutex); lockdep_assert_held(&i915->drm.struct_mutex);
intel_runtime_pm_get(i915);
spin_lock(&i915->mm.userfault_lock); spin_lock(&i915->mm.userfault_lock);
if (!list_empty(&obj->userfault_link)) { if (!list_empty(&obj->userfault_link)) {
...@@ -1935,7 +1942,7 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj) ...@@ -1935,7 +1942,7 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
} }
spin_unlock(&i915->mm.userfault_lock); spin_unlock(&i915->mm.userfault_lock);
if (!zap) if (!zap)
return; goto out;
drm_vma_node_unmap(&obj->base.vma_node, drm_vma_node_unmap(&obj->base.vma_node,
obj->base.dev->anon_inode->i_mapping); obj->base.dev->anon_inode->i_mapping);
...@@ -1948,6 +1955,9 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj) ...@@ -1948,6 +1955,9 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
* memory writes before touching registers / GSM. * memory writes before touching registers / GSM.
*/ */
wmb(); wmb();
out:
intel_runtime_pm_put(i915);
} }
void void
...@@ -3476,7 +3486,7 @@ int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data, ...@@ -3476,7 +3486,7 @@ int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
struct drm_file *file) struct drm_file *file)
{ {
struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_private *i915 = to_i915(dev);
struct drm_i915_gem_caching *args = data; struct drm_i915_gem_caching *args = data;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
enum i915_cache_level level; enum i915_cache_level level;
...@@ -3493,23 +3503,21 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, ...@@ -3493,23 +3503,21 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
* cacheline, whereas normally such cachelines would get * cacheline, whereas normally such cachelines would get
* invalidated. * invalidated.
*/ */
if (!HAS_LLC(dev) && !HAS_SNOOP(dev)) if (!HAS_LLC(i915) && !HAS_SNOOP(i915))
return -ENODEV; return -ENODEV;
level = I915_CACHE_LLC; level = I915_CACHE_LLC;
break; break;
case I915_CACHING_DISPLAY: case I915_CACHING_DISPLAY:
level = HAS_WT(dev_priv) ? I915_CACHE_WT : I915_CACHE_NONE; level = HAS_WT(i915) ? I915_CACHE_WT : I915_CACHE_NONE;
break; break;
default: default:
return -EINVAL; return -EINVAL;
} }
intel_runtime_pm_get(dev_priv);
ret = i915_mutex_lock_interruptible(dev); ret = i915_mutex_lock_interruptible(dev);
if (ret) if (ret)
goto rpm_put; return ret;
obj = i915_gem_object_lookup(file, args->handle); obj = i915_gem_object_lookup(file, args->handle);
if (!obj) { if (!obj) {
...@@ -3518,13 +3526,9 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, ...@@ -3518,13 +3526,9 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
} }
ret = i915_gem_object_set_cache_level(obj, level); ret = i915_gem_object_set_cache_level(obj, level);
i915_gem_object_put(obj); i915_gem_object_put(obj);
unlock: unlock:
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
rpm_put:
intel_runtime_pm_put(dev_priv);
return ret; return ret;
} }
......
...@@ -2667,6 +2667,7 @@ static int ggtt_bind_vma(struct i915_vma *vma, ...@@ -2667,6 +2667,7 @@ static int ggtt_bind_vma(struct i915_vma *vma,
enum i915_cache_level cache_level, enum i915_cache_level cache_level,
u32 flags) u32 flags)
{ {
struct drm_i915_private *i915 = to_i915(vma->vm->dev);
struct drm_i915_gem_object *obj = vma->obj; struct drm_i915_gem_object *obj = vma->obj;
u32 pte_flags = 0; u32 pte_flags = 0;
int ret; int ret;
...@@ -2679,8 +2680,10 @@ static int ggtt_bind_vma(struct i915_vma *vma, ...@@ -2679,8 +2680,10 @@ static int ggtt_bind_vma(struct i915_vma *vma,
if (obj->gt_ro) if (obj->gt_ro)
pte_flags |= PTE_READ_ONLY; pte_flags |= PTE_READ_ONLY;
intel_runtime_pm_get(i915);
vma->vm->insert_entries(vma->vm, vma->pages, vma->node.start, vma->vm->insert_entries(vma->vm, vma->pages, vma->node.start,
cache_level, pte_flags); cache_level, pte_flags);
intel_runtime_pm_put(i915);
/* /*
* Without aliasing PPGTT there's no difference between * Without aliasing PPGTT there's no difference between
...@@ -2696,6 +2699,7 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma, ...@@ -2696,6 +2699,7 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
enum i915_cache_level cache_level, enum i915_cache_level cache_level,
u32 flags) u32 flags)
{ {
struct drm_i915_private *i915 = to_i915(vma->vm->dev);
u32 pte_flags; u32 pte_flags;
int ret; int ret;
...@@ -2710,14 +2714,15 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma, ...@@ -2710,14 +2714,15 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
if (flags & I915_VMA_GLOBAL_BIND) { if (flags & I915_VMA_GLOBAL_BIND) {
intel_runtime_pm_get(i915);
vma->vm->insert_entries(vma->vm, vma->vm->insert_entries(vma->vm,
vma->pages, vma->node.start, vma->pages, vma->node.start,
cache_level, pte_flags); cache_level, pte_flags);
intel_runtime_pm_put(i915);
} }
if (flags & I915_VMA_LOCAL_BIND) { if (flags & I915_VMA_LOCAL_BIND) {
struct i915_hw_ppgtt *appgtt = struct i915_hw_ppgtt *appgtt = i915->mm.aliasing_ppgtt;
to_i915(vma->vm->dev)->mm.aliasing_ppgtt;
appgtt->base.insert_entries(&appgtt->base, appgtt->base.insert_entries(&appgtt->base,
vma->pages, vma->node.start, vma->pages, vma->node.start,
cache_level, pte_flags); cache_level, pte_flags);
...@@ -2728,12 +2733,16 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma, ...@@ -2728,12 +2733,16 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
static void ggtt_unbind_vma(struct i915_vma *vma) static void ggtt_unbind_vma(struct i915_vma *vma)
{ {
struct i915_hw_ppgtt *appgtt = to_i915(vma->vm->dev)->mm.aliasing_ppgtt; struct drm_i915_private *i915 = to_i915(vma->vm->dev);
struct i915_hw_ppgtt *appgtt = i915->mm.aliasing_ppgtt;
const u64 size = min(vma->size, vma->node.size); const u64 size = min(vma->size, vma->node.size);
if (vma->flags & I915_VMA_GLOBAL_BIND) if (vma->flags & I915_VMA_GLOBAL_BIND) {
intel_runtime_pm_get(i915);
vma->vm->clear_range(vma->vm, vma->vm->clear_range(vma->vm,
vma->node.start, size); vma->node.start, size);
intel_runtime_pm_put(i915);
}
if (vma->flags & I915_VMA_LOCAL_BIND && appgtt) if (vma->flags & I915_VMA_LOCAL_BIND && appgtt)
appgtt->base.clear_range(&appgtt->base, appgtt->base.clear_range(&appgtt->base,
......
...@@ -205,8 +205,6 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, ...@@ -205,8 +205,6 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
return -EINVAL; return -EINVAL;
} }
intel_runtime_pm_get(dev_priv);
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
if (obj->pin_display || obj->framebuffer_references) { if (obj->pin_display || obj->framebuffer_references) {
err = -EBUSY; err = -EBUSY;
...@@ -302,8 +300,6 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, ...@@ -302,8 +300,6 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
i915_gem_object_put(obj); i915_gem_object_put(obj);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
intel_runtime_pm_put(dev_priv);
return err; return err;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册