提交 9d80841e 编写于 作者: C Chris Wilson

drm/i915: Allow ringbuffers to be bound anywhere

Now that we have WC vmapping available, we can bind our rings anywhere
in the GGTT and do not need to restrict them to the mappable region.
Except for stolen objects, for which direct access is verbatim and we
must use the mappable aperture.
Signed-off-by: NChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: NJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20160818161718.27187-17-chris@chris-wilson.co.uk
上级 05a20d09
...@@ -1892,17 +1892,20 @@ int intel_ring_pin(struct intel_ring *ring) ...@@ -1892,17 +1892,20 @@ int intel_ring_pin(struct intel_ring *ring)
{ {
/* Ring wraparound at offset 0 sometimes hangs. No idea why. */ /* Ring wraparound at offset 0 sometimes hangs. No idea why. */
unsigned int flags = PIN_GLOBAL | PIN_OFFSET_BIAS | 4096; unsigned int flags = PIN_GLOBAL | PIN_OFFSET_BIAS | 4096;
enum i915_map_type map;
struct i915_vma *vma = ring->vma; struct i915_vma *vma = ring->vma;
void *addr; void *addr;
int ret; int ret;
GEM_BUG_ON(ring->vaddr); GEM_BUG_ON(ring->vaddr);
if (ring->needs_iomap) map = HAS_LLC(ring->engine->i915) ? I915_MAP_WB : I915_MAP_WC;
if (vma->obj->stolen)
flags |= PIN_MAPPABLE; flags |= PIN_MAPPABLE;
if (!(vma->flags & I915_VMA_GLOBAL_BIND)) { if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
if (flags & PIN_MAPPABLE) if (flags & PIN_MAPPABLE || map == I915_MAP_WC)
ret = i915_gem_object_set_to_gtt_domain(vma->obj, true); ret = i915_gem_object_set_to_gtt_domain(vma->obj, true);
else else
ret = i915_gem_object_set_to_cpu_domain(vma->obj, true); ret = i915_gem_object_set_to_cpu_domain(vma->obj, true);
...@@ -1914,10 +1917,10 @@ int intel_ring_pin(struct intel_ring *ring) ...@@ -1914,10 +1917,10 @@ int intel_ring_pin(struct intel_ring *ring)
if (unlikely(ret)) if (unlikely(ret))
return ret; return ret;
if (flags & PIN_MAPPABLE) if (i915_vma_is_map_and_fenceable(vma))
addr = (void __force *)i915_vma_pin_iomap(vma); addr = (void __force *)i915_vma_pin_iomap(vma);
else else
addr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB); addr = i915_gem_object_pin_map(vma->obj, map);
if (IS_ERR(addr)) if (IS_ERR(addr))
goto err; goto err;
...@@ -1934,7 +1937,7 @@ void intel_ring_unpin(struct intel_ring *ring) ...@@ -1934,7 +1937,7 @@ void intel_ring_unpin(struct intel_ring *ring)
GEM_BUG_ON(!ring->vma); GEM_BUG_ON(!ring->vma);
GEM_BUG_ON(!ring->vaddr); GEM_BUG_ON(!ring->vaddr);
if (ring->needs_iomap) if (i915_vma_is_map_and_fenceable(ring->vma))
i915_vma_unpin_iomap(ring->vma); i915_vma_unpin_iomap(ring->vma);
else else
i915_gem_object_unpin_map(ring->vma->obj); i915_gem_object_unpin_map(ring->vma->obj);
...@@ -2005,8 +2008,6 @@ intel_engine_create_ring(struct intel_engine_cs *engine, int size) ...@@ -2005,8 +2008,6 @@ intel_engine_create_ring(struct intel_engine_cs *engine, int size)
return ERR_CAST(vma); return ERR_CAST(vma);
} }
ring->vma = vma; ring->vma = vma;
if (!HAS_LLC(engine->i915) || vma->obj->stolen)
ring->needs_iomap = true;
list_add(&ring->link, &engine->buffers); list_add(&ring->link, &engine->buffers);
return ring; return ring;
......
...@@ -96,7 +96,6 @@ struct intel_ring { ...@@ -96,7 +96,6 @@ struct intel_ring {
int space; int space;
int size; int size;
int effective_size; int effective_size;
bool needs_iomap;
/** We track the position of the requests in the ring buffer, and /** We track the position of the requests in the ring buffer, and
* when each is retired we increment last_retired_head as the GPU * when each is retired we increment last_retired_head as the GPU
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册