提交 ce453d81 编写于 作者: C Chris Wilson

drm/i915: Use a device flag for non-interruptible phases

The code paths for modesetting are growing in complexity as we may need
to move the buffers around in order to fit the scanout in the aperture.
Therefore we face a choice as to whether to thread the interruptible status
through the entire pinning and unbinding code paths or to add a flag to
the device when we may not be interrupted by a signal. This does the
latter and so fixes a few instances of modesetting failures under stress.
Signed-off-by: NChris Wilson <chris@chris-wilson.co.uk>
上级 8408c282
......@@ -616,6 +616,12 @@ typedef struct drm_i915_private {
*/
struct delayed_work retire_work;
/**
* Are we in a non-interruptible section of code like
* modesetting?
*/
bool interruptible;
/**
* Flag if the X Server, and thus DRM, is not currently in
* control of the device.
......@@ -1110,8 +1116,7 @@ void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
void i915_gem_lastclose(struct drm_device *dev);
int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
bool interruptible);
int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj);
void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *ring,
u32 seqno);
......@@ -1133,8 +1138,7 @@ i915_gem_next_request_seqno(struct intel_ring_buffer *ring)
}
int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *pipelined,
bool interruptible);
struct intel_ring_buffer *pipelined);
int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
void i915_gem_retire_requests(struct drm_device *dev);
......@@ -1143,8 +1147,7 @@ void i915_gem_clflush_object(struct drm_i915_gem_object *obj);
int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj,
uint32_t read_domains,
uint32_t write_domain);
int __must_check i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
bool interruptible);
int __must_check i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj);
int __must_check i915_gem_init_ringbuffer(struct drm_device *dev);
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
void i915_gem_do_init(struct drm_device *dev,
......@@ -1157,8 +1160,7 @@ int __must_check i915_add_request(struct intel_ring_buffer *ring,
struct drm_file *file,
struct drm_i915_gem_request *request);
int __must_check i915_wait_request(struct intel_ring_buffer *ring,
uint32_t seqno,
bool interruptible);
uint32_t seqno);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
int __must_check
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
......
......@@ -1200,7 +1200,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
if (obj->tiling_mode == I915_TILING_NONE)
ret = i915_gem_object_put_fence(obj);
else
ret = i915_gem_object_get_fence(obj, NULL, true);
ret = i915_gem_object_get_fence(obj, NULL);
if (ret)
goto unlock;
......@@ -1989,8 +1989,7 @@ i915_gem_retire_work_handler(struct work_struct *work)
*/
int
i915_wait_request(struct intel_ring_buffer *ring,
uint32_t seqno,
bool interruptible)
uint32_t seqno)
{
drm_i915_private_t *dev_priv = ring->dev->dev_private;
u32 ier;
......@@ -2043,7 +2042,7 @@ i915_wait_request(struct intel_ring_buffer *ring,
ring->waiting_seqno = seqno;
if (ring->irq_get(ring)) {
if (interruptible)
if (dev_priv->mm.interruptible)
ret = wait_event_interruptible(ring->irq_queue,
i915_seqno_passed(ring->get_seqno(ring), seqno)
|| atomic_read(&dev_priv->mm.wedged));
......@@ -2085,8 +2084,7 @@ i915_wait_request(struct intel_ring_buffer *ring,
* safe to unbind from the GTT or access from the CPU.
*/
int
i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
bool interruptible)
i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
{
int ret;
......@@ -2099,9 +2097,7 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
* it.
*/
if (obj->active) {
ret = i915_wait_request(obj->ring,
obj->last_rendering_seqno,
interruptible);
ret = i915_wait_request(obj->ring, obj->last_rendering_seqno);
if (ret)
return ret;
}
......@@ -2202,9 +2198,7 @@ static int i915_ring_idle(struct intel_ring_buffer *ring)
return ret;
}
return i915_wait_request(ring,
i915_gem_next_request_seqno(ring),
true);
return i915_wait_request(ring, i915_gem_next_request_seqno(ring));
}
int
......@@ -2405,8 +2399,7 @@ static bool ring_passed_seqno(struct intel_ring_buffer *ring, u32 seqno)
static int
i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *pipelined,
bool interruptible)
struct intel_ring_buffer *pipelined)
{
int ret;
......@@ -2425,9 +2418,7 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
if (!ring_passed_seqno(obj->last_fenced_ring,
obj->last_fenced_seqno)) {
ret = i915_wait_request(obj->last_fenced_ring,
obj->last_fenced_seqno,
interruptible);
obj->last_fenced_seqno);
if (ret)
return ret;
}
......@@ -2453,7 +2444,7 @@ i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
if (obj->tiling_mode)
i915_gem_release_mmap(obj);
ret = i915_gem_object_flush_fence(obj, NULL, true);
ret = i915_gem_object_flush_fence(obj, NULL);
if (ret)
return ret;
......@@ -2530,8 +2521,7 @@ i915_find_fence_reg(struct drm_device *dev,
*/
int
i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *pipelined,
bool interruptible)
struct intel_ring_buffer *pipelined)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
......@@ -2554,8 +2544,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
if (!ring_passed_seqno(obj->last_fenced_ring,
reg->setup_seqno)) {
ret = i915_wait_request(obj->last_fenced_ring,
reg->setup_seqno,
interruptible);
reg->setup_seqno);
if (ret)
return ret;
}
......@@ -2564,9 +2553,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
}
} else if (obj->last_fenced_ring &&
obj->last_fenced_ring != pipelined) {
ret = i915_gem_object_flush_fence(obj,
pipelined,
interruptible);
ret = i915_gem_object_flush_fence(obj, pipelined);
if (ret)
return ret;
} else if (obj->tiling_changed) {
......@@ -2603,7 +2590,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
if (reg == NULL)
return -ENOSPC;
ret = i915_gem_object_flush_fence(obj, pipelined, interruptible);
ret = i915_gem_object_flush_fence(obj, pipelined);
if (ret)
return ret;
......@@ -2615,9 +2602,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
if (old->tiling_mode)
i915_gem_release_mmap(old);
ret = i915_gem_object_flush_fence(old,
pipelined,
interruptible);
ret = i915_gem_object_flush_fence(old, pipelined);
if (ret) {
drm_gem_object_unreference(&old->base);
return ret;
......@@ -2940,7 +2925,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
return ret;
if (obj->pending_gpu_write || write) {
ret = i915_gem_object_wait_rendering(obj, true);
ret = i915_gem_object_wait_rendering(obj);
if (ret)
return ret;
}
......@@ -2990,7 +2975,7 @@ i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj,
/* Currently, we are always called from an non-interruptible context. */
if (pipelined != obj->ring) {
ret = i915_gem_object_wait_rendering(obj, false);
ret = i915_gem_object_wait_rendering(obj);
if (ret)
return ret;
}
......@@ -3008,8 +2993,7 @@ i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj,
}
int
i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
bool interruptible)
i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj)
{
int ret;
......@@ -3022,7 +3006,7 @@ i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
return ret;
}
return i915_gem_object_wait_rendering(obj, interruptible);
return i915_gem_object_wait_rendering(obj);
}
/**
......@@ -3044,7 +3028,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
if (ret)
return ret;
ret = i915_gem_object_wait_rendering(obj, true);
ret = i915_gem_object_wait_rendering(obj);
if (ret)
return ret;
......@@ -3142,7 +3126,7 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
if (ret)
return ret;
ret = i915_gem_object_wait_rendering(obj, true);
ret = i915_gem_object_wait_rendering(obj);
if (ret)
return ret;
......@@ -3842,6 +3826,8 @@ i915_gem_load(struct drm_device *dev)
i915_gem_detect_bit_6_swizzle(dev);
init_waitqueue_head(&dev_priv->pending_flip_queue);
dev_priv->mm.interruptible = true;
dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
register_shrinker(&dev_priv->mm.inactive_shrinker);
......
......@@ -560,7 +560,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
if (has_fenced_gpu_access) {
if (need_fence) {
ret = i915_gem_object_get_fence(obj, ring, 1);
ret = i915_gem_object_get_fence(obj, ring);
if (ret)
break;
} else if (entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
......@@ -756,7 +756,7 @@ i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
/* XXX gpu semaphores are currently causing hard hangs on SNB mobile */
if (INTEL_INFO(obj->base.dev)->gen < 6 || IS_MOBILE(obj->base.dev))
return i915_gem_object_wait_rendering(obj, true);
return i915_gem_object_wait_rendering(obj);
idx = intel_ring_sync_index(from, to);
......
......@@ -2067,6 +2067,7 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
struct drm_i915_gem_object *obj,
struct intel_ring_buffer *pipelined)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 alignment;
int ret;
......@@ -2091,9 +2092,10 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
BUG();
}
dev_priv->mm.interruptible = false;
ret = i915_gem_object_pin(obj, alignment, true);
if (ret)
return ret;
goto err_interruptible;
ret = i915_gem_object_set_to_display_plane(obj, pipelined);
if (ret)
......@@ -2105,15 +2107,18 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
* a fence as the cost is not that onerous.
*/
if (obj->tiling_mode != I915_TILING_NONE) {
ret = i915_gem_object_get_fence(obj, pipelined, false);
ret = i915_gem_object_get_fence(obj, pipelined);
if (ret)
goto err_unpin;
}
dev_priv->mm.interruptible = true;
return 0;
err_unpin:
i915_gem_object_unpin(obj);
err_interruptible:
dev_priv->mm.interruptible = true;
return ret;
}
......@@ -2247,7 +2252,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
* This should only fail upon a hung GPU, in which case we
* can safely continue.
*/
ret = i915_gem_object_flush_gpu(obj, false);
ret = i915_gem_object_flush_gpu(obj);
(void) ret;
}
......@@ -2994,9 +2999,12 @@ static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
{
if (!enable && intel_crtc->overlay) {
struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
mutex_lock(&dev->struct_mutex);
(void) intel_overlay_switch_off(intel_crtc->overlay, false);
dev_priv->mm.interruptible = false;
(void) intel_overlay_switch_off(intel_crtc->overlay);
dev_priv->mm.interruptible = true;
mutex_unlock(&dev->struct_mutex);
}
......
......@@ -329,8 +329,7 @@ extern void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
extern void intel_setup_overlay(struct drm_device *dev);
extern void intel_cleanup_overlay(struct drm_device *dev);
extern int intel_overlay_switch_off(struct intel_overlay *overlay,
bool interruptible);
extern int intel_overlay_switch_off(struct intel_overlay *overlay);
extern int intel_overlay_put_image(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int intel_overlay_attrs(struct drm_device *dev, void *data,
......
......@@ -213,7 +213,6 @@ static void intel_overlay_unmap_regs(struct intel_overlay *overlay,
static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
struct drm_i915_gem_request *request,
bool interruptible,
void (*tail)(struct intel_overlay *))
{
struct drm_device *dev = overlay->dev;
......@@ -228,8 +227,7 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
}
overlay->last_flip_req = request->seqno;
overlay->flip_tail = tail;
ret = i915_wait_request(LP_RING(dev_priv),
overlay->last_flip_req, true);
ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req);
if (ret)
return ret;
......@@ -321,7 +319,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
OUT_RING(MI_NOOP);
ADVANCE_LP_RING();
ret = intel_overlay_do_wait_request(overlay, request, true, NULL);
ret = intel_overlay_do_wait_request(overlay, request, NULL);
out:
if (pipe_a_quirk)
i830_deactivate_pipe_a(dev);
......@@ -400,8 +398,7 @@ static void intel_overlay_off_tail(struct intel_overlay *overlay)
}
/* overlay needs to be disabled in OCMD reg */
static int intel_overlay_off(struct intel_overlay *overlay,
bool interruptible)
static int intel_overlay_off(struct intel_overlay *overlay)
{
struct drm_device *dev = overlay->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
......@@ -436,14 +433,13 @@ static int intel_overlay_off(struct intel_overlay *overlay,
OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
ADVANCE_LP_RING();
return intel_overlay_do_wait_request(overlay, request, interruptible,
return intel_overlay_do_wait_request(overlay, request,
intel_overlay_off_tail);
}
/* recover from an interruption due to a signal
* We have to be careful not to repeat work forever an make forward progess. */
static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
bool interruptible)
static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
{
struct drm_device *dev = overlay->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
......@@ -452,8 +448,7 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
if (overlay->last_flip_req == 0)
return 0;
ret = i915_wait_request(LP_RING(dev_priv),
overlay->last_flip_req, interruptible);
ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req);
if (ret)
return ret;
......@@ -498,7 +493,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
OUT_RING(MI_NOOP);
ADVANCE_LP_RING();
ret = intel_overlay_do_wait_request(overlay, request, true,
ret = intel_overlay_do_wait_request(overlay, request,
intel_overlay_release_old_vid_tail);
if (ret)
return ret;
......@@ -867,8 +862,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
return ret;
}
int intel_overlay_switch_off(struct intel_overlay *overlay,
bool interruptible)
int intel_overlay_switch_off(struct intel_overlay *overlay)
{
struct overlay_registers *regs;
struct drm_device *dev = overlay->dev;
......@@ -877,7 +871,7 @@ int intel_overlay_switch_off(struct intel_overlay *overlay,
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
BUG_ON(!mutex_is_locked(&dev->mode_config.mutex));
ret = intel_overlay_recover_from_interrupt(overlay, interruptible);
ret = intel_overlay_recover_from_interrupt(overlay);
if (ret != 0)
return ret;
......@@ -892,7 +886,7 @@ int intel_overlay_switch_off(struct intel_overlay *overlay,
regs->OCMD = 0;
intel_overlay_unmap_regs(overlay, regs);
ret = intel_overlay_off(overlay, interruptible);
ret = intel_overlay_off(overlay);
if (ret != 0)
return ret;
......@@ -1134,7 +1128,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
mutex_lock(&dev->mode_config.mutex);
mutex_lock(&dev->struct_mutex);
ret = intel_overlay_switch_off(overlay, true);
ret = intel_overlay_switch_off(overlay);
mutex_unlock(&dev->struct_mutex);
mutex_unlock(&dev->mode_config.mutex);
......@@ -1170,13 +1164,13 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
goto out_unlock;
}
ret = intel_overlay_recover_from_interrupt(overlay, true);
ret = intel_overlay_recover_from_interrupt(overlay);
if (ret != 0)
goto out_unlock;
if (overlay->crtc != crtc) {
struct drm_display_mode *mode = &crtc->base.mode;
ret = intel_overlay_switch_off(overlay, true);
ret = intel_overlay_switch_off(overlay);
if (ret != 0)
goto out_unlock;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册