提交 cc889e0f 编写于 作者: D Daniel Vetter

drm/i915: disable flushing_list/gpu_write_list

This is just the minimal patch to disable all this code so that we can
do decent amounts of QA before we rip it all out.

The complicating thing is that we need to flush the gpu caches after
the batchbuffer is emitted. Which is past the point of no return where
execbuffer can't fail any more (otherwise we risk submitting the same
batch multiple times).

Hence we need to add a flag to track whether any caches associated
with that ring are dirty. And emit the flush in add_request if that's
the case.

Note that this has a quite a few behaviour changes:
- Caches get flushed/invalidated unconditionally.
- Invalidation now happens after potential inter-ring sync.

I've bantered around a bit with Chris on irc whether this fixes
anything, and it might or might not. The only thing clear is that with
these changes it's much easier to reason about correctness.

Also rip out a lone get_next_request_seqno in the execbuffer
retire_commands function. I've dug around and I couldn't figure out
why that is still there, with the outstanding lazy request stuff it
shouldn't be necessary.

v2: Chris Wilson complained that I also invalidate the read caches
when flushing after a batchbuffer. Now optimized.

v3: Added some comments to explain the new flushing behaviour.

Cc: Eric Anholt <eric@anholt.net>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: NChris Wilson <chris@chris-wilson.co.uk>
Signed-Off-by: NDaniel Vetter <daniel.vetter@ffwll.ch>
上级 8e88a2bd
...@@ -1568,6 +1568,21 @@ i915_add_request(struct intel_ring_buffer *ring, ...@@ -1568,6 +1568,21 @@ i915_add_request(struct intel_ring_buffer *ring,
int was_empty; int was_empty;
int ret; int ret;
/*
* Emit any outstanding flushes - execbuf can fail to emit the flush
* after having emitted the batchbuffer command. Hence we need to fix
* things up similar to emitting the lazy request. The difference here
* is that the flush _must_ happen before the next request, no matter
* what.
*/
if (ring->gpu_caches_dirty) {
ret = i915_gem_flush_ring(ring, 0, I915_GEM_GPU_DOMAINS);
if (ret)
return ret;
ring->gpu_caches_dirty = false;
}
BUG_ON(request == NULL); BUG_ON(request == NULL);
seqno = i915_gem_next_request_seqno(ring); seqno = i915_gem_next_request_seqno(ring);
...@@ -1613,6 +1628,9 @@ i915_add_request(struct intel_ring_buffer *ring, ...@@ -1613,6 +1628,9 @@ i915_add_request(struct intel_ring_buffer *ring,
queue_delayed_work(dev_priv->wq, queue_delayed_work(dev_priv->wq,
&dev_priv->mm.retire_work, HZ); &dev_priv->mm.retire_work, HZ);
} }
WARN_ON(!list_empty(&ring->gpu_write_list));
return 0; return 0;
} }
...@@ -1827,14 +1845,11 @@ i915_gem_retire_work_handler(struct work_struct *work) ...@@ -1827,14 +1845,11 @@ i915_gem_retire_work_handler(struct work_struct *work)
*/ */
idle = true; idle = true;
for_each_ring(ring, dev_priv, i) { for_each_ring(ring, dev_priv, i) {
if (!list_empty(&ring->gpu_write_list)) { if (ring->gpu_caches_dirty) {
struct drm_i915_gem_request *request; struct drm_i915_gem_request *request;
int ret;
ret = i915_gem_flush_ring(ring,
0, I915_GEM_GPU_DOMAINS);
request = kzalloc(sizeof(*request), GFP_KERNEL); request = kzalloc(sizeof(*request), GFP_KERNEL);
if (ret || request == NULL || if (request == NULL ||
i915_add_request(ring, NULL, request)) i915_add_request(ring, NULL, request))
kfree(request); kfree(request);
} }
......
...@@ -810,33 +810,16 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, ...@@ -810,33 +810,16 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
return ret; return ret;
} }
static int static void
i915_gem_execbuffer_flush(struct drm_device *dev, i915_gem_execbuffer_flush(struct drm_device *dev,
uint32_t invalidate_domains, uint32_t invalidate_domains,
uint32_t flush_domains, uint32_t flush_domains)
uint32_t flush_rings)
{ {
drm_i915_private_t *dev_priv = dev->dev_private;
int i, ret;
if (flush_domains & I915_GEM_DOMAIN_CPU) if (flush_domains & I915_GEM_DOMAIN_CPU)
intel_gtt_chipset_flush(); intel_gtt_chipset_flush();
if (flush_domains & I915_GEM_DOMAIN_GTT) if (flush_domains & I915_GEM_DOMAIN_GTT)
wmb(); wmb();
if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
for (i = 0; i < I915_NUM_RINGS; i++)
if (flush_rings & (1 << i)) {
ret = i915_gem_flush_ring(&dev_priv->ring[i],
invalidate_domains,
flush_domains);
if (ret)
return ret;
}
}
return 0;
} }
static int static int
...@@ -885,12 +868,9 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring, ...@@ -885,12 +868,9 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
i915_gem_object_set_to_gpu_domain(obj, ring, &cd); i915_gem_object_set_to_gpu_domain(obj, ring, &cd);
if (cd.invalidate_domains | cd.flush_domains) { if (cd.invalidate_domains | cd.flush_domains) {
ret = i915_gem_execbuffer_flush(ring->dev, i915_gem_execbuffer_flush(ring->dev,
cd.invalidate_domains, cd.invalidate_domains,
cd.flush_domains, cd.flush_domains);
cd.flush_rings);
if (ret)
return ret;
} }
if (cd.flips) { if (cd.flips) {
...@@ -905,6 +885,11 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring, ...@@ -905,6 +885,11 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
return ret; return ret;
} }
/* Unconditionally invalidate gpu caches. */
ret = i915_gem_flush_ring(ring, I915_GEM_GPU_DOMAINS, 0);
if (ret)
return ret;
return 0; return 0;
} }
...@@ -983,26 +968,13 @@ i915_gem_execbuffer_retire_commands(struct drm_device *dev, ...@@ -983,26 +968,13 @@ i915_gem_execbuffer_retire_commands(struct drm_device *dev,
struct intel_ring_buffer *ring) struct intel_ring_buffer *ring)
{ {
struct drm_i915_gem_request *request; struct drm_i915_gem_request *request;
u32 invalidate;
/* /* Unconditionally force add_request to emit a full flush. */
* Ensure that the commands in the batch buffer are ring->gpu_caches_dirty = true;
* finished before the interrupt fires.
*
* The sampler always gets flushed on i965 (sigh).
*/
invalidate = I915_GEM_DOMAIN_COMMAND;
if (INTEL_INFO(dev)->gen >= 4)
invalidate |= I915_GEM_DOMAIN_SAMPLER;
if (ring->flush(ring, invalidate, 0)) {
i915_gem_next_request_seqno(ring);
return;
}
/* Add a breadcrumb for the completion of the batch buffer */ /* Add a breadcrumb for the completion of the batch buffer */
request = kzalloc(sizeof(*request), GFP_KERNEL); request = kzalloc(sizeof(*request), GFP_KERNEL);
if (request == NULL || i915_add_request(ring, file, request)) { if (request == NULL || i915_add_request(ring, file, request)) {
i915_gem_next_request_seqno(ring);
kfree(request); kfree(request);
} }
} }
......
...@@ -113,6 +113,7 @@ struct intel_ring_buffer { ...@@ -113,6 +113,7 @@ struct intel_ring_buffer {
* Do we have some not yet emitted requests outstanding? * Do we have some not yet emitted requests outstanding?
*/ */
u32 outstanding_lazy_request; u32 outstanding_lazy_request;
bool gpu_caches_dirty;
wait_queue_head_t irq_queue; wait_queue_head_t irq_queue;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册