提交 a8198eea 编写于 作者: C Chris Wilson 提交者: Keith Packard

drm/i915: Introduce i915_gem_object_finish_gpu()

... reincarnated from i915_gem_object_flush_gpu(). The semantic
difference is that after calling finish_gpu() the object no longer
resides in any GPU domain, and so will cause the GPU caches to be
invalidated if it is ever used again.
Signed-off-by: NChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: NDaniel Vetter <daniel.vetter@ffwll.ch>
上级 284d9529
......@@ -1190,7 +1190,7 @@ void i915_gem_clflush_object(struct drm_i915_gem_object *obj);
int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj,
uint32_t read_domains,
uint32_t write_domain);
int __must_check i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj);
int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
int __must_check i915_gem_init_ringbuffer(struct drm_device *dev);
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
void i915_gem_do_init(struct drm_device *dev,
......
......@@ -2165,23 +2165,29 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
return -EINVAL;
}
ret = i915_gem_object_finish_gpu(obj);
if (ret == -ERESTARTSYS)
return ret;
/* Continue on if we fail due to EIO, the GPU is hung so we
* should be safe and we need to cleanup or else we might
* cause memory corruption through use-after-free.
*/
/* blow away mappings if mapped through GTT */
i915_gem_release_mmap(obj);
/* Move the object to the CPU domain to ensure that
* any possible CPU writes while it's not in the GTT
* are flushed when we go to remap it. This will
* also ensure that all pending GPU writes are finished
* before we unbind.
* are flushed when we go to remap it.
*/
ret = i915_gem_object_set_to_cpu_domain(obj, 1);
if (ret == 0)
ret = i915_gem_object_set_to_cpu_domain(obj, 1);
if (ret == -ERESTARTSYS)
return ret;
/* Continue on if we fail due to EIO, the GPU is hung so we
* should be safe and we need to cleanup or else we might
* cause memory corruption through use-after-free.
*/
if (ret) {
/* In the event of a disaster, abandon all caches and
* hope for the best.
*/
i915_gem_clflush_object(obj);
obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
}
......@@ -3045,11 +3051,11 @@ i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj,
}
int
i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj)
i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
{
int ret;
if (!obj->active)
if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
return 0;
if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
......@@ -3058,6 +3064,9 @@ i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj)
return ret;
}
/* Ensure that we invalidate the GPU's caches and TLBs. */
obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
return i915_gem_object_wait_rendering(obj);
}
......
......@@ -1971,7 +1971,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
* This should only fail upon a hung GPU, in which case we
* can safely continue.
*/
ret = i915_gem_object_flush_gpu(obj);
ret = i915_gem_object_finish_gpu(obj);
(void) ret;
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册