提交 75e9e915 编写于 作者: D Daniel Vetter 提交者: Chris Wilson

drm/i915: kill mappable/fenceable disdinction

a00b10c3 "Only enforce fence limits inside the GTT" also
added a fenceable/mappable disdinction when binding/pinning buffers.
This only complicates the code with no pratical gain:

- In execbuffer this matters on for g33/pineview, as this is the only
  chip that needs fences and has an unmappable gtt area. But fences
  are only possible in the mappable part of the gtt, so need_fence
  implies need_mappable. And need_mappable is only set independantly
  with relocations which implies (for sane userspace) that the buffer
  is untiled.

- The overlay code is only really used on i8xx, which doesn't have
  unmappable gtt. And it doesn't support tiled buffers, currently.

- For all other buffers it's a bug to pass in a tiled bo.

In short, this disdinction doesn't have any practical gain.

I've also reverted mapping the overlay and context pages as possibly
unmappable. It's not worth being overtly clever here, all the big
gains from unmappable are for execbuf bos.

Also add a comment for a clever optimization that confused me
while reading the original patch by Chris Wilson.
Signed-off-by: NDaniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: NChris Wilson <chris@chris-wilson.co.uk>
上级 818f2a3c
...@@ -752,8 +752,6 @@ struct drm_i915_gem_object { ...@@ -752,8 +752,6 @@ struct drm_i915_gem_object {
* Advice: are the backing pages purgeable? * Advice: are the backing pages purgeable?
*/ */
unsigned int madv : 2; unsigned int madv : 2;
unsigned int fenceable : 1;
unsigned int mappable : 1;
/** /**
* Current tiling mode for the object. * Current tiling mode for the object.
...@@ -772,6 +770,12 @@ struct drm_i915_gem_object { ...@@ -772,6 +770,12 @@ struct drm_i915_gem_object {
unsigned int pin_count : 4; unsigned int pin_count : 4;
#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf #define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
/**
* Is the object at the current location in the gtt mappable and
* fenceable? Used to avoid costly recalculations.
*/
unsigned int map_and_fenceable : 1;
/** /**
* Whether the current gtt mapping needs to be mappable (and isn't just * Whether the current gtt mapping needs to be mappable (and isn't just
* mappable by accident). Track pin and fault separate for a more * mappable by accident). Track pin and fault separate for a more
...@@ -1013,7 +1017,7 @@ struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev, ...@@ -1013,7 +1017,7 @@ struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
size_t size); size_t size);
void i915_gem_free_object(struct drm_gem_object *obj); void i915_gem_free_object(struct drm_gem_object *obj);
int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment, int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment,
bool mappable, bool need_fence); bool map_and_fenceable);
void i915_gem_object_unpin(struct drm_gem_object *obj); void i915_gem_object_unpin(struct drm_gem_object *obj);
int i915_gem_object_unbind(struct drm_gem_object *obj); int i915_gem_object_unbind(struct drm_gem_object *obj);
void i915_gem_release_mmap(struct drm_gem_object *obj); void i915_gem_release_mmap(struct drm_gem_object *obj);
......
...@@ -59,8 +59,7 @@ static int i915_gem_object_wait_rendering(struct drm_gem_object *obj, ...@@ -59,8 +59,7 @@ static int i915_gem_object_wait_rendering(struct drm_gem_object *obj,
bool interruptible); bool interruptible);
static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
unsigned alignment, unsigned alignment,
bool mappable, bool map_and_fenceable);
bool need_fence);
static void i915_gem_clear_fence_reg(struct drm_gem_object *obj); static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
struct drm_i915_gem_pwrite *args, struct drm_i915_gem_pwrite *args,
...@@ -1074,7 +1073,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, ...@@ -1074,7 +1073,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
else if (obj_priv->tiling_mode == I915_TILING_NONE && else if (obj_priv->tiling_mode == I915_TILING_NONE &&
obj_priv->gtt_space && obj_priv->gtt_space &&
obj->write_domain != I915_GEM_DOMAIN_CPU) { obj->write_domain != I915_GEM_DOMAIN_CPU) {
ret = i915_gem_object_pin(obj, 0, true, false); ret = i915_gem_object_pin(obj, 0, true);
if (ret) if (ret)
goto out; goto out;
...@@ -1300,8 +1299,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -1300,8 +1299,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
BUG_ON(obj_priv->pin_count && !obj_priv->pin_mappable); BUG_ON(obj_priv->pin_count && !obj_priv->pin_mappable);
if (obj_priv->gtt_space) { if (obj_priv->gtt_space) {
if (!obj_priv->mappable || if (!obj_priv->map_and_fenceable) {
(obj_priv->tiling_mode && !obj_priv->fenceable)) {
ret = i915_gem_object_unbind(obj); ret = i915_gem_object_unbind(obj);
if (ret) if (ret)
goto unlock; goto unlock;
...@@ -1309,8 +1307,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -1309,8 +1307,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
} }
if (!obj_priv->gtt_space) { if (!obj_priv->gtt_space) {
ret = i915_gem_object_bind_to_gtt(obj, 0, ret = i915_gem_object_bind_to_gtt(obj, 0, true);
true, obj_priv->tiling_mode);
if (ret) if (ret)
goto unlock; goto unlock;
} }
...@@ -2273,8 +2270,8 @@ i915_gem_object_unbind(struct drm_gem_object *obj) ...@@ -2273,8 +2270,8 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
i915_gem_info_remove_gtt(dev_priv, obj_priv); i915_gem_info_remove_gtt(dev_priv, obj_priv);
list_del_init(&obj_priv->mm_list); list_del_init(&obj_priv->mm_list);
obj_priv->fenceable = true; /* Avoid an unnecessary call to unbind on rebind. */
obj_priv->mappable = true; obj_priv->map_and_fenceable = true;
drm_mm_put_block(obj_priv->gtt_space); drm_mm_put_block(obj_priv->gtt_space);
obj_priv->gtt_space = NULL; obj_priv->gtt_space = NULL;
...@@ -2383,7 +2380,7 @@ static void i915_write_fence_reg(struct drm_gem_object *obj) ...@@ -2383,7 +2380,7 @@ static void i915_write_fence_reg(struct drm_gem_object *obj)
if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) || if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
(obj_priv->gtt_offset & (size - 1))) { (obj_priv->gtt_offset & (size - 1))) {
WARN(1, "%s: object 0x%08x [fenceable? %d] not 1M or size (0x%08x) aligned [gtt_space offset=%lx, size=%lx]\n", WARN(1, "%s: object 0x%08x [fenceable? %d] not 1M or size (0x%08x) aligned [gtt_space offset=%lx, size=%lx]\n",
__func__, obj_priv->gtt_offset, obj_priv->fenceable, size, __func__, obj_priv->gtt_offset, obj_priv->map_and_fenceable, size,
obj_priv->gtt_space->start, obj_priv->gtt_space->size); obj_priv->gtt_space->start, obj_priv->gtt_space->size);
return; return;
} }
...@@ -2687,8 +2684,7 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj, ...@@ -2687,8 +2684,7 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj,
static int static int
i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
unsigned alignment, unsigned alignment,
bool mappable, bool map_and_fenceable)
bool need_fence)
{ {
struct drm_device *dev = obj->dev; struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
...@@ -2696,6 +2692,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, ...@@ -2696,6 +2692,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
struct drm_mm_node *free_space; struct drm_mm_node *free_space;
gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN; gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
u32 size, fence_size, fence_alignment; u32 size, fence_size, fence_alignment;
bool mappable, fenceable;
int ret; int ret;
if (obj_priv->madv != I915_MADV_WILLNEED) { if (obj_priv->madv != I915_MADV_WILLNEED) {
...@@ -2707,25 +2704,25 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, ...@@ -2707,25 +2704,25 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
fence_alignment = i915_gem_get_gtt_alignment(obj_priv); fence_alignment = i915_gem_get_gtt_alignment(obj_priv);
if (alignment == 0) if (alignment == 0)
alignment = need_fence ? fence_alignment : 4096; alignment = map_and_fenceable ? fence_alignment : 4096;
if (need_fence && alignment & (fence_alignment - 1)) { if (map_and_fenceable && alignment & (fence_alignment - 1)) {
DRM_ERROR("Invalid object alignment requested %u\n", alignment); DRM_ERROR("Invalid object alignment requested %u\n", alignment);
return -EINVAL; return -EINVAL;
} }
size = need_fence ? fence_size : obj->size; size = map_and_fenceable ? fence_size : obj->size;
/* If the object is bigger than the entire aperture, reject it early /* If the object is bigger than the entire aperture, reject it early
* before evicting everything in a vain attempt to find space. * before evicting everything in a vain attempt to find space.
*/ */
if (obj->size > if (obj->size >
(mappable ? dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) { (map_and_fenceable ? dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) {
DRM_ERROR("Attempting to bind an object larger than the aperture\n"); DRM_ERROR("Attempting to bind an object larger than the aperture\n");
return -E2BIG; return -E2BIG;
} }
search_free: search_free:
if (mappable) if (map_and_fenceable)
free_space = free_space =
drm_mm_search_free_in_range(&dev_priv->mm.gtt_space, drm_mm_search_free_in_range(&dev_priv->mm.gtt_space,
size, alignment, 0, size, alignment, 0,
...@@ -2736,7 +2733,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, ...@@ -2736,7 +2733,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
size, alignment, 0); size, alignment, 0);
if (free_space != NULL) { if (free_space != NULL) {
if (mappable) if (map_and_fenceable)
obj_priv->gtt_space = obj_priv->gtt_space =
drm_mm_get_block_range_generic(free_space, drm_mm_get_block_range_generic(free_space,
size, alignment, 0, size, alignment, 0,
...@@ -2750,7 +2747,8 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, ...@@ -2750,7 +2747,8 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
/* If the gtt is empty and we're still having trouble /* If the gtt is empty and we're still having trouble
* fitting our object in, we're out of memory. * fitting our object in, we're out of memory.
*/ */
ret = i915_gem_evict_something(dev, size, alignment, mappable); ret = i915_gem_evict_something(dev, size, alignment,
map_and_fenceable);
if (ret) if (ret)
return ret; return ret;
...@@ -2765,7 +2763,8 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, ...@@ -2765,7 +2763,8 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
if (ret == -ENOMEM) { if (ret == -ENOMEM) {
/* first try to clear up some space from the GTT */ /* first try to clear up some space from the GTT */
ret = i915_gem_evict_something(dev, size, ret = i915_gem_evict_something(dev, size,
alignment, mappable); alignment,
map_and_fenceable);
if (ret) { if (ret) {
/* now try to shrink everyone else */ /* now try to shrink everyone else */
if (gfpmask) { if (gfpmask) {
...@@ -2796,7 +2795,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, ...@@ -2796,7 +2795,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
obj_priv->gtt_space = NULL; obj_priv->gtt_space = NULL;
ret = i915_gem_evict_something(dev, size, ret = i915_gem_evict_something(dev, size,
alignment, mappable); alignment, map_and_fenceable);
if (ret) if (ret)
return ret; return ret;
...@@ -2816,15 +2815,17 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, ...@@ -2816,15 +2815,17 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS); BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS); BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
trace_i915_gem_object_bind(obj, obj_priv->gtt_offset, mappable); trace_i915_gem_object_bind(obj, obj_priv->gtt_offset, map_and_fenceable);
obj_priv->fenceable = fenceable =
obj_priv->gtt_space->size == fence_size && obj_priv->gtt_space->size == fence_size &&
(obj_priv->gtt_space->start & (fence_alignment -1)) == 0; (obj_priv->gtt_space->start & (fence_alignment -1)) == 0;
obj_priv->mappable = mappable =
obj_priv->gtt_offset + obj->size <= dev_priv->mm.gtt_mappable_end; obj_priv->gtt_offset + obj->size <= dev_priv->mm.gtt_mappable_end;
obj_priv->map_and_fenceable = mappable && fenceable;
return 0; return 0;
} }
...@@ -3538,8 +3539,7 @@ i915_gem_execbuffer_pin(struct drm_device *dev, ...@@ -3538,8 +3539,7 @@ i915_gem_execbuffer_pin(struct drm_device *dev,
entry->relocation_count ? true : need_fence; entry->relocation_count ? true : need_fence;
/* Check fence reg constraints and rebind if necessary */ /* Check fence reg constraints and rebind if necessary */
if ((need_fence && !obj->fenceable) || if (need_mappable && !obj->map_and_fenceable) {
(need_mappable && !obj->mappable)) {
ret = i915_gem_object_unbind(&obj->base); ret = i915_gem_object_unbind(&obj->base);
if (ret) if (ret)
break; break;
...@@ -3547,8 +3547,7 @@ i915_gem_execbuffer_pin(struct drm_device *dev, ...@@ -3547,8 +3547,7 @@ i915_gem_execbuffer_pin(struct drm_device *dev,
ret = i915_gem_object_pin(&obj->base, ret = i915_gem_object_pin(&obj->base,
entry->alignment, entry->alignment,
need_mappable, need_mappable);
need_fence);
if (ret) if (ret)
break; break;
...@@ -4143,7 +4142,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, ...@@ -4143,7 +4142,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
int int
i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment, i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment,
bool mappable, bool need_fence) bool map_and_fenceable)
{ {
struct drm_device *dev = obj->dev; struct drm_device *dev = obj->dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
...@@ -4151,19 +4150,19 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment, ...@@ -4151,19 +4150,19 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment,
int ret; int ret;
BUG_ON(obj_priv->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT); BUG_ON(obj_priv->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
BUG_ON(need_fence && !mappable); BUG_ON(map_and_fenceable && !map_and_fenceable);
WARN_ON(i915_verify_lists(dev)); WARN_ON(i915_verify_lists(dev));
if (obj_priv->gtt_space != NULL) { if (obj_priv->gtt_space != NULL) {
if ((alignment && obj_priv->gtt_offset & (alignment - 1)) || if ((alignment && obj_priv->gtt_offset & (alignment - 1)) ||
(need_fence && !obj_priv->fenceable) || (map_and_fenceable && !obj_priv->map_and_fenceable)) {
(mappable && !obj_priv->mappable)) {
WARN(obj_priv->pin_count, WARN(obj_priv->pin_count,
"bo is already pinned with incorrect alignment:" "bo is already pinned with incorrect alignment:"
" offset=%x, req.alignment=%x, need_fence=%d, fenceable=%d, mappable=%d, cpu_accessible=%d\n", " offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
" obj->map_and_fenceable=%d\n",
obj_priv->gtt_offset, alignment, obj_priv->gtt_offset, alignment,
need_fence, obj_priv->fenceable, map_and_fenceable,
mappable, obj_priv->mappable); obj_priv->map_and_fenceable);
ret = i915_gem_object_unbind(obj); ret = i915_gem_object_unbind(obj);
if (ret) if (ret)
return ret; return ret;
...@@ -4172,18 +4171,18 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment, ...@@ -4172,18 +4171,18 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment,
if (obj_priv->gtt_space == NULL) { if (obj_priv->gtt_space == NULL) {
ret = i915_gem_object_bind_to_gtt(obj, alignment, ret = i915_gem_object_bind_to_gtt(obj, alignment,
mappable, need_fence); map_and_fenceable);
if (ret) if (ret)
return ret; return ret;
} }
if (obj_priv->pin_count++ == 0) { if (obj_priv->pin_count++ == 0) {
i915_gem_info_add_pin(dev_priv, obj_priv, mappable); i915_gem_info_add_pin(dev_priv, obj_priv, map_and_fenceable);
if (!obj_priv->active) if (!obj_priv->active)
list_move_tail(&obj_priv->mm_list, list_move_tail(&obj_priv->mm_list,
&dev_priv->mm.pinned_list); &dev_priv->mm.pinned_list);
} }
BUG_ON(!obj_priv->pin_mappable && mappable); BUG_ON(!obj_priv->pin_mappable && map_and_fenceable);
WARN_ON(i915_verify_lists(dev)); WARN_ON(i915_verify_lists(dev));
return 0; return 0;
...@@ -4245,8 +4244,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data, ...@@ -4245,8 +4244,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
obj_priv->user_pin_count++; obj_priv->user_pin_count++;
obj_priv->pin_filp = file_priv; obj_priv->pin_filp = file_priv;
if (obj_priv->user_pin_count == 1) { if (obj_priv->user_pin_count == 1) {
ret = i915_gem_object_pin(obj, args->alignment, ret = i915_gem_object_pin(obj, args->alignment, true);
true, obj_priv->tiling_mode);
if (ret) if (ret)
goto out; goto out;
} }
...@@ -4439,8 +4437,8 @@ struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev, ...@@ -4439,8 +4437,8 @@ struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
INIT_LIST_HEAD(&obj->ring_list); INIT_LIST_HEAD(&obj->ring_list);
INIT_LIST_HEAD(&obj->gpu_write_list); INIT_LIST_HEAD(&obj->gpu_write_list);
obj->madv = I915_MADV_WILLNEED; obj->madv = I915_MADV_WILLNEED;
obj->fenceable = true; /* Avoid an unnecessary call to unbind on the first bind. */
obj->mappable = true; obj->map_and_fenceable = true;
return &obj->base; return &obj->base;
} }
...@@ -4560,7 +4558,7 @@ i915_gem_init_pipe_control(struct drm_device *dev) ...@@ -4560,7 +4558,7 @@ i915_gem_init_pipe_control(struct drm_device *dev)
obj_priv = to_intel_bo(obj); obj_priv = to_intel_bo(obj);
obj_priv->agp_type = AGP_USER_CACHED_MEMORY; obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
ret = i915_gem_object_pin(obj, 4096, true, false); ret = i915_gem_object_pin(obj, 4096, true);
if (ret) if (ret)
goto err_unref; goto err_unref;
......
...@@ -1461,8 +1461,7 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, ...@@ -1461,8 +1461,7 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
BUG(); BUG();
} }
ret = i915_gem_object_pin(obj, alignment, true, ret = i915_gem_object_pin(obj, alignment, true);
obj_priv->tiling_mode);
if (ret) if (ret)
return ret; return ret;
...@@ -4367,7 +4366,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, ...@@ -4367,7 +4366,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
/* we only need to pin inside GTT if cursor is non-phy */ /* we only need to pin inside GTT if cursor is non-phy */
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
if (!dev_priv->info->cursor_needs_physical) { if (!dev_priv->info->cursor_needs_physical) {
ret = i915_gem_object_pin(bo, PAGE_SIZE, true, false); ret = i915_gem_object_pin(bo, PAGE_SIZE, true);
if (ret) { if (ret) {
DRM_ERROR("failed to pin cursor bo\n"); DRM_ERROR("failed to pin cursor bo\n");
goto fail_locked; goto fail_locked;
...@@ -5531,7 +5530,7 @@ intel_alloc_context_page(struct drm_device *dev) ...@@ -5531,7 +5530,7 @@ intel_alloc_context_page(struct drm_device *dev)
} }
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
ret = i915_gem_object_pin(ctx, 4096, false, false); ret = i915_gem_object_pin(ctx, 4096, true);
if (ret) { if (ret) {
DRM_ERROR("failed to pin power context: %d\n", ret); DRM_ERROR("failed to pin power context: %d\n", ret);
goto err_unref; goto err_unref;
......
...@@ -781,7 +781,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, ...@@ -781,7 +781,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
if (ret != 0) if (ret != 0)
return ret; return ret;
ret = i915_gem_object_pin(new_bo, PAGE_SIZE, false, false); ret = i915_gem_object_pin(new_bo, PAGE_SIZE, true);
if (ret != 0) if (ret != 0)
return ret; return ret;
...@@ -1425,7 +1425,7 @@ void intel_setup_overlay(struct drm_device *dev) ...@@ -1425,7 +1425,7 @@ void intel_setup_overlay(struct drm_device *dev)
} }
overlay->flip_addr = overlay->reg_bo->phys_obj->handle->busaddr; overlay->flip_addr = overlay->reg_bo->phys_obj->handle->busaddr;
} else { } else {
ret = i915_gem_object_pin(reg_bo, PAGE_SIZE, true, false); ret = i915_gem_object_pin(reg_bo, PAGE_SIZE, true);
if (ret) { if (ret) {
DRM_ERROR("failed to pin overlay register bo\n"); DRM_ERROR("failed to pin overlay register bo\n");
goto out_free_bo; goto out_free_bo;
......
...@@ -547,7 +547,7 @@ static int init_status_page(struct intel_ring_buffer *ring) ...@@ -547,7 +547,7 @@ static int init_status_page(struct intel_ring_buffer *ring)
obj_priv = to_intel_bo(obj); obj_priv = to_intel_bo(obj);
obj_priv->agp_type = AGP_USER_CACHED_MEMORY; obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
ret = i915_gem_object_pin(obj, 4096, true, false); ret = i915_gem_object_pin(obj, 4096, true);
if (ret != 0) { if (ret != 0) {
goto err_unref; goto err_unref;
} }
...@@ -602,7 +602,7 @@ int intel_init_ring_buffer(struct drm_device *dev, ...@@ -602,7 +602,7 @@ int intel_init_ring_buffer(struct drm_device *dev,
ring->gem_object = obj; ring->gem_object = obj;
ret = i915_gem_object_pin(obj, PAGE_SIZE, true, false); ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
if (ret) if (ret)
goto err_unref; goto err_unref;
...@@ -906,7 +906,7 @@ static int blt_ring_init(struct intel_ring_buffer *ring) ...@@ -906,7 +906,7 @@ static int blt_ring_init(struct intel_ring_buffer *ring)
if (obj == NULL) if (obj == NULL)
return -ENOMEM; return -ENOMEM;
ret = i915_gem_object_pin(&obj->base, 4096, true, false); ret = i915_gem_object_pin(&obj->base, 4096, true);
if (ret) { if (ret) {
drm_gem_object_unreference(&obj->base); drm_gem_object_unreference(&obj->base);
return ret; return ret;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册