提交 16e809ac 编写于 作者: D Daniel Vetter 提交者: Chris Wilson

drm/i915: unbind unmappable objects on fault/pin

In i915_gem_object_pin obviously unbind only if mappable is true.

This is the last part to enable gtt_mappable_end != gtt_size, which
the next patch will do.

v2: Fences on g33/pineview only work in the mappable part of the
gtt.
Signed-off-by: NDaniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: NChris Wilson <chris@chris-wilson.co.uk>
上级 920afa77
...@@ -260,6 +260,16 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data, ...@@ -260,6 +260,16 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
return 0; return 0;
} }
static bool
i915_gem_object_cpu_accessible(struct drm_i915_gem_object *obj)
{
struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
return obj->gtt_space == NULL ||
obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
}
static inline int static inline int
fast_shmem_read(struct page **pages, fast_shmem_read(struct page **pages,
loff_t page_base, int page_offset, loff_t page_base, int page_offset,
...@@ -1255,6 +1265,9 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -1255,6 +1265,9 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
/* Now bind it into the GTT if needed */ /* Now bind it into the GTT if needed */
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
if (!i915_gem_object_cpu_accessible(obj_priv))
i915_gem_object_unbind(obj);
if (!obj_priv->gtt_space) { if (!obj_priv->gtt_space) {
ret = i915_gem_object_bind_to_gtt(obj, 0, true); ret = i915_gem_object_bind_to_gtt(obj, 0, true);
if (ret) if (ret)
...@@ -3465,11 +3478,15 @@ i915_gem_execbuffer_pin(struct drm_device *dev, ...@@ -3465,11 +3478,15 @@ i915_gem_execbuffer_pin(struct drm_device *dev,
ret = 0; ret = 0;
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
struct drm_i915_gem_exec_object2 *entry = &exec_list[i]; struct drm_i915_gem_exec_object2 *entry = &exec_list[i];
struct drm_i915_gem_object *obj= to_intel_bo(object_list[i]); struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]);
bool need_fence = bool need_fence =
entry->flags & EXEC_OBJECT_NEEDS_FENCE && entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
obj->tiling_mode != I915_TILING_NONE; obj->tiling_mode != I915_TILING_NONE;
/* g33/pnv can't fence buffers in the unmappable part */
bool need_mappable =
entry->relocation_count ? true : need_fence;
/* Check fence reg constraints and rebind if necessary */ /* Check fence reg constraints and rebind if necessary */
if (need_fence && if (need_fence &&
!i915_gem_object_fence_offset_ok(&obj->base, !i915_gem_object_fence_offset_ok(&obj->base,
...@@ -3480,7 +3497,8 @@ i915_gem_execbuffer_pin(struct drm_device *dev, ...@@ -3480,7 +3497,8 @@ i915_gem_execbuffer_pin(struct drm_device *dev,
} }
ret = i915_gem_object_pin(&obj->base, ret = i915_gem_object_pin(&obj->base,
entry->alignment, true); entry->alignment,
need_mappable);
if (ret) if (ret)
break; break;
...@@ -4064,7 +4082,8 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment, ...@@ -4064,7 +4082,8 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment,
if (obj_priv->gtt_space != NULL) { if (obj_priv->gtt_space != NULL) {
if (alignment == 0) if (alignment == 0)
alignment = i915_gem_get_gtt_alignment(obj); alignment = i915_gem_get_gtt_alignment(obj);
if (obj_priv->gtt_offset & (alignment - 1)) { if (obj_priv->gtt_offset & (alignment - 1) ||
(mappable && !i915_gem_object_cpu_accessible(obj_priv))) {
WARN(obj_priv->pin_count, WARN(obj_priv->pin_count,
"bo is already pinned with incorrect alignment:" "bo is already pinned with incorrect alignment:"
" offset=%x, req.alignment=%x\n", " offset=%x, req.alignment=%x\n",
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册