提交 ca9c46c5 编写于 作者: V Ville Syrjälä 提交者: Daniel Vetter

drm/i915: Kill i915_gem_execbuffer_wait_for_flips()

As per Chris Wilson's suggestion make
i915_gem_execbuffer_wait_for_flips() go away.

This was used to stall the GPU ring while there are pending
page flips involving the relevant BO. Ie. while the BO is still
being scanned out by the display controller.

The recommended alternative is to use the page flip events to
wait for the page flips to fully complete before reusing the BO
of the old front buffer. Or use more buffers.
Signed-off-by: NVille Syrjälä <ville.syrjala@linux.intel.com>
Reviewed-by: NChris Wilson <chris@chris-wilson.co.uk>
Acked-by: NKristian Høgsberg <krh@bitplanet.net>
Acked-by: NJesse Barnes <jbarnes@virtuousgeek.org>
[danvet: don't remove obj->pending_flips, still required due to
reorder patches.]
Signed-off-by: NDaniel Vetter <daniel.vetter@ffwll.ch>
上级 f930ddd0
......@@ -601,45 +601,12 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
return ret;
}
static int
i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips)
{
u32 plane, flip_mask;
int ret;
/* Check for any pending flips. As we only maintain a flip queue depth
* of 1, we can simply insert a WAIT for the next display flip prior
* to executing the batch and avoid stalling the CPU.
*/
for (plane = 0; flips >> plane; plane++) {
if (((flips >> plane) & 1) == 0)
continue;
if (plane)
flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
else
flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
ret = intel_ring_begin(ring, 2);
if (ret)
return ret;
intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
}
return 0;
}
static int
i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
struct list_head *objects)
{
struct drm_i915_gem_object *obj;
uint32_t flush_domains = 0;
uint32_t flips = 0;
int ret;
list_for_each_entry(obj, objects, exec_list) {
......@@ -650,18 +617,9 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
i915_gem_clflush_object(obj);
if (obj->base.pending_write_domain)
flips |= atomic_read(&obj->pending_flip);
flush_domains |= obj->base.write_domain;
}
if (flips) {
ret = i915_gem_execbuffer_wait_for_flips(ring, flips);
if (ret)
return ret;
}
if (flush_domains & I915_GEM_DOMAIN_CPU)
i915_gem_chipset_flush(ring->dev);
......
......@@ -6945,8 +6945,6 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
obj = work->old_fb_obj;
atomic_clear_mask(1 << intel_crtc->plane,
&obj->pending_flip.counter);
wake_up(&dev_priv->pending_flip_queue);
queue_work(dev_priv->wq, &work->work);
......@@ -7292,10 +7290,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
work->enable_stall_check = true;
/* Block clients from rendering to the new back buffer until
* the flip occurs and the object is no longer visible.
*/
atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
atomic_inc(&intel_crtc->unpin_work_count);
ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
......@@ -7312,7 +7306,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
cleanup_pending:
atomic_dec(&intel_crtc->unpin_work_count);
atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
drm_gem_object_unreference(&work->old_fb_obj->base);
drm_gem_object_unreference(&obj->base);
mutex_unlock(&dev->struct_mutex);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册