提交 afa8ce5b 编写于 作者: D Daniel Vetter

drm/i915: Nuke legacy flip queueing code

Just a very minimal patch to nuke that code. Lots of the flip
interrupt handling stuff is still around.

Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
Reviewed-by: NMaarten Lankhorst <maarten.lankhorst@linux.intel.com>
Signed-off-by: NDaniel Vetter <daniel.vetter@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20170719125502.25696-2-daniel.vetter@ffwll.ch
上级 29012159
...@@ -715,11 +715,6 @@ struct drm_i915_display_funcs { ...@@ -715,11 +715,6 @@ struct drm_i915_display_funcs {
void (*fdi_link_train)(struct intel_crtc *crtc, void (*fdi_link_train)(struct intel_crtc *crtc,
const struct intel_crtc_state *crtc_state); const struct intel_crtc_state *crtc_state);
void (*init_clock_gating)(struct drm_i915_private *dev_priv); void (*init_clock_gating)(struct drm_i915_private *dev_priv);
int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj,
struct drm_i915_gem_request *req,
uint32_t flags);
void (*hpd_irq_setup)(struct drm_i915_private *dev_priv); void (*hpd_irq_setup)(struct drm_i915_private *dev_priv);
/* clock updates for mode set */ /* clock updates for mode set */
/* cursor updates */ /* cursor updates */
......
...@@ -2664,20 +2664,6 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc, ...@@ -2664,20 +2664,6 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
return false; return false;
} }
/* Update plane->state->fb to match plane->fb after driver-internal updates */
static void
update_state_fb(struct drm_plane *plane)
{
if (plane->fb == plane->state->fb)
return;
if (plane->state->fb)
drm_framebuffer_unreference(plane->state->fb);
plane->state->fb = plane->fb;
if (plane->state->fb)
drm_framebuffer_reference(plane->state->fb);
}
static void static void
intel_set_plane_visible(struct intel_crtc_state *crtc_state, intel_set_plane_visible(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state, struct intel_plane_state *plane_state,
...@@ -10159,35 +10145,6 @@ static void intel_crtc_destroy(struct drm_crtc *crtc) ...@@ -10159,35 +10145,6 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
kfree(intel_crtc); kfree(intel_crtc);
} }
static void intel_unpin_work_fn(struct work_struct *__work)
{
struct intel_flip_work *work =
container_of(__work, struct intel_flip_work, unpin_work);
struct intel_crtc *crtc = to_intel_crtc(work->crtc);
struct drm_device *dev = crtc->base.dev;
struct drm_plane *primary = crtc->base.primary;
if (is_mmio_work(work))
flush_work(&work->mmio_work);
mutex_lock(&dev->struct_mutex);
intel_unpin_fb_vma(work->old_vma);
i915_gem_object_put(work->pending_flip_obj);
mutex_unlock(&dev->struct_mutex);
i915_gem_request_put(work->flip_queued_req);
intel_frontbuffer_flip_complete(to_i915(dev),
to_intel_plane(primary)->frontbuffer_bit);
intel_fbc_post_update(crtc);
drm_framebuffer_unreference(work->old_fb);
BUG_ON(atomic_read(&crtc->unpin_work_count) == 0);
atomic_dec(&crtc->unpin_work_count);
kfree(work);
}
/* Is 'a' after or equal to 'b'? */ /* Is 'a' after or equal to 'b'? */
static bool g4x_flip_count_after_eq(u32 a, u32 b) static bool g4x_flip_count_after_eq(u32 a, u32 b)
{ {
...@@ -10332,346 +10289,6 @@ static inline void intel_mark_page_flip_active(struct intel_crtc *crtc, ...@@ -10332,346 +10289,6 @@ static inline void intel_mark_page_flip_active(struct intel_crtc *crtc,
atomic_set(&work->pending, 1); atomic_set(&work->pending, 1);
} }
static int intel_gen2_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj,
struct drm_i915_gem_request *req,
uint32_t flags)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
u32 flip_mask, *cs;
cs = intel_ring_begin(req, 6);
if (IS_ERR(cs))
return PTR_ERR(cs);
/* Can't queue multiple flips, so wait for the previous
* one to finish before executing the next.
*/
if (intel_crtc->plane)
flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
else
flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
*cs++ = MI_WAIT_FOR_EVENT | flip_mask;
*cs++ = MI_NOOP;
*cs++ = MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane);
*cs++ = fb->pitches[0];
*cs++ = intel_crtc->flip_work->gtt_offset;
*cs++ = 0; /* aux display base address, unused */
return 0;
}
static int intel_gen3_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj,
struct drm_i915_gem_request *req,
uint32_t flags)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
u32 flip_mask, *cs;
cs = intel_ring_begin(req, 6);
if (IS_ERR(cs))
return PTR_ERR(cs);
if (intel_crtc->plane)
flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
else
flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
*cs++ = MI_WAIT_FOR_EVENT | flip_mask;
*cs++ = MI_NOOP;
*cs++ = MI_DISPLAY_FLIP_I915 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane);
*cs++ = fb->pitches[0];
*cs++ = intel_crtc->flip_work->gtt_offset;
*cs++ = MI_NOOP;
return 0;
}
static int intel_gen4_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj,
struct drm_i915_gem_request *req,
uint32_t flags)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
u32 pf, pipesrc, *cs;
cs = intel_ring_begin(req, 4);
if (IS_ERR(cs))
return PTR_ERR(cs);
/* i965+ uses the linear or tiled offsets from the
* Display Registers (which do not change across a page-flip)
* so we need only reprogram the base address.
*/
*cs++ = MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane);
*cs++ = fb->pitches[0];
*cs++ = intel_crtc->flip_work->gtt_offset |
intel_fb_modifier_to_tiling(fb->modifier);
/* XXX Enabling the panel-fitter across page-flip is so far
* untested on non-native modes, so ignore it for now.
* pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
*/
pf = 0;
pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
*cs++ = pf | pipesrc;
return 0;
}
static int intel_gen6_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj,
struct drm_i915_gem_request *req,
uint32_t flags)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
u32 pf, pipesrc, *cs;
cs = intel_ring_begin(req, 4);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane);
*cs++ = fb->pitches[0] | intel_fb_modifier_to_tiling(fb->modifier);
*cs++ = intel_crtc->flip_work->gtt_offset;
/* Contrary to the suggestions in the documentation,
* "Enable Panel Fitter" does not seem to be required when page
* flipping with a non-native mode, and worse causes a normal
* modeset to fail.
* pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
*/
pf = 0;
pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
*cs++ = pf | pipesrc;
return 0;
}
static int intel_gen7_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj,
struct drm_i915_gem_request *req,
uint32_t flags)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
u32 *cs, plane_bit = 0;
int len, ret;
switch (intel_crtc->plane) {
case PLANE_A:
plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A;
break;
case PLANE_B:
plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B;
break;
case PLANE_C:
plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C;
break;
default:
WARN_ONCE(1, "unknown plane in flip command\n");
return -ENODEV;
}
len = 4;
if (req->engine->id == RCS) {
len += 6;
/*
* On Gen 8, SRM is now taking an extra dword to accommodate
* 48bits addresses, and we need a NOOP for the batch size to
* stay even.
*/
if (IS_GEN8(dev_priv))
len += 2;
}
/*
* BSpec MI_DISPLAY_FLIP for IVB:
* "The full packet must be contained within the same cache line."
*
* Currently the LRI+SRM+MI_DISPLAY_FLIP all fit within the same
* cacheline, if we ever start emitting more commands before
* the MI_DISPLAY_FLIP we may need to first emit everything else,
* then do the cacheline alignment, and finally emit the
* MI_DISPLAY_FLIP.
*/
ret = intel_ring_cacheline_align(req);
if (ret)
return ret;
cs = intel_ring_begin(req, len);
if (IS_ERR(cs))
return PTR_ERR(cs);
/* Unmask the flip-done completion message. Note that the bspec says that
* we should do this for both the BCS and RCS, and that we must not unmask
* more than one flip event at any time (or ensure that one flip message
* can be sent by waiting for flip-done prior to queueing new flips).
* Experimentation says that BCS works despite DERRMR masking all
* flip-done completion events and that unmasking all planes at once
* for the RCS also doesn't appear to drop events. Setting the DERRMR
* to zero does lead to lockups within MI_DISPLAY_FLIP.
*/
if (req->engine->id == RCS) {
*cs++ = MI_LOAD_REGISTER_IMM(1);
*cs++ = i915_mmio_reg_offset(DERRMR);
*cs++ = ~(DERRMR_PIPEA_PRI_FLIP_DONE |
DERRMR_PIPEB_PRI_FLIP_DONE |
DERRMR_PIPEC_PRI_FLIP_DONE);
if (IS_GEN8(dev_priv))
*cs++ = MI_STORE_REGISTER_MEM_GEN8 |
MI_SRM_LRM_GLOBAL_GTT;
else
*cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
*cs++ = i915_mmio_reg_offset(DERRMR);
*cs++ = i915_ggtt_offset(req->engine->scratch) + 256;
if (IS_GEN8(dev_priv)) {
*cs++ = 0;
*cs++ = MI_NOOP;
}
}
*cs++ = MI_DISPLAY_FLIP_I915 | plane_bit;
*cs++ = fb->pitches[0] | intel_fb_modifier_to_tiling(fb->modifier);
*cs++ = intel_crtc->flip_work->gtt_offset;
*cs++ = MI_NOOP;
return 0;
}
static bool use_mmio_flip(struct intel_engine_cs *engine,
struct drm_i915_gem_object *obj)
{
/*
* This is not being used for older platforms, because
* non-availability of flip done interrupt forces us to use
* CS flips. Older platforms derive flip done using some clever
* tricks involving the flip_pending status bits and vblank irqs.
* So using MMIO flips there would disrupt this mechanism.
*/
if (engine == NULL)
return true;
if (INTEL_GEN(engine->i915) < 5)
return false;
if (i915.use_mmio_flip < 0)
return false;
else if (i915.use_mmio_flip > 0)
return true;
else if (i915.enable_execlists)
return true;
return engine != i915_gem_object_last_write_engine(obj);
}
static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
unsigned int rotation,
struct intel_flip_work *work)
{
struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_framebuffer *fb = intel_crtc->base.primary->fb;
const enum pipe pipe = intel_crtc->pipe;
u32 ctl, stride = skl_plane_stride(fb, 0, rotation);
ctl = I915_READ(PLANE_CTL(pipe, 0));
ctl &= ~PLANE_CTL_TILED_MASK;
switch (fb->modifier) {
case DRM_FORMAT_MOD_LINEAR:
break;
case I915_FORMAT_MOD_X_TILED:
ctl |= PLANE_CTL_TILED_X;
break;
case I915_FORMAT_MOD_Y_TILED:
ctl |= PLANE_CTL_TILED_Y;
break;
case I915_FORMAT_MOD_Yf_TILED:
ctl |= PLANE_CTL_TILED_YF;
break;
default:
MISSING_CASE(fb->modifier);
}
/*
* Both PLANE_CTL and PLANE_STRIDE are not updated on vblank but on
* PLANE_SURF updates, the update is then guaranteed to be atomic.
*/
I915_WRITE(PLANE_CTL(pipe, 0), ctl);
I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
I915_WRITE(PLANE_SURF(pipe, 0), work->gtt_offset);
POSTING_READ(PLANE_SURF(pipe, 0));
}
static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc,
struct intel_flip_work *work)
{
struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_framebuffer *fb = intel_crtc->base.primary->fb;
i915_reg_t reg = DSPCNTR(intel_crtc->plane);
u32 dspcntr;
dspcntr = I915_READ(reg);
if (fb->modifier == I915_FORMAT_MOD_X_TILED)
dspcntr |= DISPPLANE_TILED;
else
dspcntr &= ~DISPPLANE_TILED;
I915_WRITE(reg, dspcntr);
I915_WRITE(DSPSURF(intel_crtc->plane), work->gtt_offset);
POSTING_READ(DSPSURF(intel_crtc->plane));
}
static void intel_mmio_flip_work_func(struct work_struct *w)
{
struct intel_flip_work *work =
container_of(w, struct intel_flip_work, mmio_work);
struct intel_crtc *crtc = to_intel_crtc(work->crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_framebuffer *intel_fb =
to_intel_framebuffer(crtc->base.primary->fb);
struct drm_i915_gem_object *obj = intel_fb->obj;
WARN_ON(i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT, NULL) < 0);
intel_pipe_update_start(crtc);
if (INTEL_GEN(dev_priv) >= 9)
skl_do_mmio_flip(crtc, work->rotation, work);
else
/* use_mmio_flip() retricts MMIO flips to ilk+ */
ilk_do_mmio_flip(crtc, work);
intel_pipe_update_end(crtc, work);
}
static int intel_default_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj,
struct drm_i915_gem_request *req,
uint32_t flags)
{
return -ENODEV;
}
static bool __pageflip_stall_check_cs(struct drm_i915_private *dev_priv, static bool __pageflip_stall_check_cs(struct drm_i915_private *dev_priv,
struct intel_crtc *intel_crtc, struct intel_crtc *intel_crtc,
struct intel_flip_work *work) struct intel_flip_work *work)
...@@ -10738,251 +10355,6 @@ void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe) ...@@ -10738,251 +10355,6 @@ void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe)
spin_unlock(&dev->event_lock); spin_unlock(&dev->event_lock);
} }
__maybe_unused
static int intel_crtc_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event,
uint32_t page_flip_flags)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_framebuffer *old_fb = crtc->primary->fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_plane *primary = crtc->primary;
enum pipe pipe = intel_crtc->pipe;
struct intel_flip_work *work;
struct intel_engine_cs *engine;
bool mmio_flip;
struct drm_i915_gem_request *request;
struct i915_vma *vma;
int ret;
/*
* drm_mode_page_flip_ioctl() should already catch this, but double
* check to be safe. In the future we may enable pageflipping from
* a disabled primary plane.
*/
if (WARN_ON(intel_fb_obj(old_fb) == NULL))
return -EBUSY;
/* Can't change pixel format via MI display flips. */
if (fb->format != crtc->primary->fb->format)
return -EINVAL;
/*
* TILEOFF/LINOFF registers can't be changed via MI display flips.
* Note that pitch changes could also affect these register.
*/
if (INTEL_GEN(dev_priv) > 3 &&
(fb->offsets[0] != crtc->primary->fb->offsets[0] ||
fb->pitches[0] != crtc->primary->fb->pitches[0]))
return -EINVAL;
if (i915_terminally_wedged(&dev_priv->gpu_error))
goto out_hang;
work = kzalloc(sizeof(*work), GFP_KERNEL);
if (work == NULL)
return -ENOMEM;
work->event = event;
work->crtc = crtc;
work->old_fb = old_fb;
INIT_WORK(&work->unpin_work, intel_unpin_work_fn);
ret = drm_crtc_vblank_get(crtc);
if (ret)
goto free_work;
/* We borrow the event spin lock for protecting flip_work */
spin_lock_irq(&dev->event_lock);
if (intel_crtc->flip_work) {
/* Before declaring the flip queue wedged, check if
* the hardware completed the operation behind our backs.
*/
if (pageflip_finished(intel_crtc, intel_crtc->flip_work)) {
DRM_DEBUG_DRIVER("flip queue: previous flip completed, continuing\n");
page_flip_completed(intel_crtc);
} else {
DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
spin_unlock_irq(&dev->event_lock);
drm_crtc_vblank_put(crtc);
kfree(work);
return -EBUSY;
}
}
intel_crtc->flip_work = work;
spin_unlock_irq(&dev->event_lock);
if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
flush_workqueue(dev_priv->wq);
/* Reference the objects for the scheduled work. */
drm_framebuffer_reference(work->old_fb);
crtc->primary->fb = fb;
update_state_fb(crtc->primary);
work->pending_flip_obj = i915_gem_object_get(obj);
ret = i915_mutex_lock_interruptible(dev);
if (ret)
goto cleanup;
intel_crtc->reset_count = i915_reset_count(&dev_priv->gpu_error);
if (i915_reset_backoff_or_wedged(&dev_priv->gpu_error)) {
ret = -EIO;
goto unlock;
}
atomic_inc(&intel_crtc->unpin_work_count);
if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
work->flip_count = I915_READ(PIPE_FLIPCOUNT_G4X(pipe)) + 1;
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
engine = dev_priv->engine[BCS];
if (fb->modifier != old_fb->modifier)
/* vlv: DISPLAY_FLIP fails to change tiling */
engine = NULL;
} else if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) {
engine = dev_priv->engine[BCS];
} else if (INTEL_GEN(dev_priv) >= 7) {
engine = i915_gem_object_last_write_engine(obj);
if (engine == NULL || engine->id != RCS)
engine = dev_priv->engine[BCS];
} else {
engine = dev_priv->engine[RCS];
}
mmio_flip = use_mmio_flip(engine, obj);
vma = intel_pin_and_fence_fb_obj(fb, primary->state->rotation);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto cleanup_pending;
}
work->old_vma = to_intel_plane_state(primary->state)->vma;
to_intel_plane_state(primary->state)->vma = vma;
work->gtt_offset = i915_ggtt_offset(vma) + intel_crtc->dspaddr_offset;
work->rotation = crtc->primary->state->rotation;
/*
* There's the potential that the next frame will not be compatible with
* FBC, so we want to call pre_update() before the actual page flip.
* The problem is that pre_update() caches some information about the fb
* object, so we want to do this only after the object is pinned. Let's
* be on the safe side and do this immediately before scheduling the
* flip.
*/
intel_fbc_pre_update(intel_crtc, intel_crtc->config,
to_intel_plane_state(primary->state));
if (mmio_flip) {
INIT_WORK(&work->mmio_work, intel_mmio_flip_work_func);
queue_work(system_unbound_wq, &work->mmio_work);
} else {
request = i915_gem_request_alloc(engine,
dev_priv->kernel_context);
if (IS_ERR(request)) {
ret = PTR_ERR(request);
goto cleanup_unpin;
}
ret = i915_gem_request_await_object(request, obj, false);
if (ret)
goto cleanup_request;
ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request,
page_flip_flags);
if (ret)
goto cleanup_request;
intel_mark_page_flip_active(intel_crtc, work);
work->flip_queued_req = i915_gem_request_get(request);
i915_add_request(request);
}
i915_gem_object_wait_priority(obj, 0, I915_PRIORITY_DISPLAY);
i915_gem_track_fb(intel_fb_obj(old_fb), obj,
to_intel_plane(primary)->frontbuffer_bit);
mutex_unlock(&dev->struct_mutex);
intel_frontbuffer_flip_prepare(to_i915(dev),
to_intel_plane(primary)->frontbuffer_bit);
trace_i915_flip_request(intel_crtc->plane, obj);
return 0;
cleanup_request:
i915_add_request(request);
cleanup_unpin:
to_intel_plane_state(primary->state)->vma = work->old_vma;
intel_unpin_fb_vma(vma);
cleanup_pending:
atomic_dec(&intel_crtc->unpin_work_count);
unlock:
mutex_unlock(&dev->struct_mutex);
cleanup:
crtc->primary->fb = old_fb;
update_state_fb(crtc->primary);
i915_gem_object_put(obj);
drm_framebuffer_unreference(work->old_fb);
spin_lock_irq(&dev->event_lock);
intel_crtc->flip_work = NULL;
spin_unlock_irq(&dev->event_lock);
drm_crtc_vblank_put(crtc);
free_work:
kfree(work);
if (ret == -EIO) {
struct drm_atomic_state *state;
struct drm_plane_state *plane_state;
out_hang:
state = drm_atomic_state_alloc(dev);
if (!state)
return -ENOMEM;
state->acquire_ctx = dev->mode_config.acquire_ctx;
retry:
plane_state = drm_atomic_get_plane_state(state, primary);
ret = PTR_ERR_OR_ZERO(plane_state);
if (!ret) {
drm_atomic_set_fb_for_plane(plane_state, fb);
ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
if (!ret)
ret = drm_atomic_commit(state);
}
if (ret == -EDEADLK) {
drm_modeset_backoff(state->acquire_ctx);
drm_atomic_state_clear(state);
goto retry;
}
drm_atomic_state_put(state);
if (ret == 0 && event) {
spin_lock_irq(&dev->event_lock);
drm_crtc_send_vblank_event(crtc, event);
spin_unlock_irq(&dev->event_lock);
}
}
return ret;
}
/** /**
* intel_wm_need_update - Check whether watermarks need updating * intel_wm_need_update - Check whether watermarks need updating
* @plane: drm plane * @plane: drm plane
...@@ -14727,34 +14099,6 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv) ...@@ -14727,34 +14099,6 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
dev_priv->display.update_crtcs = skl_update_crtcs; dev_priv->display.update_crtcs = skl_update_crtcs;
else else
dev_priv->display.update_crtcs = intel_update_crtcs; dev_priv->display.update_crtcs = intel_update_crtcs;
switch (INTEL_INFO(dev_priv)->gen) {
case 2:
dev_priv->display.queue_flip = intel_gen2_queue_flip;
break;
case 3:
dev_priv->display.queue_flip = intel_gen3_queue_flip;
break;
case 4:
case 5:
dev_priv->display.queue_flip = intel_gen4_queue_flip;
break;
case 6:
dev_priv->display.queue_flip = intel_gen6_queue_flip;
break;
case 7:
case 8: /* FIXME(BDW): Check that the gen8 RCS flip works. */
dev_priv->display.queue_flip = intel_gen7_queue_flip;
break;
case 9:
/* Drop through - unsupported since execlist only. */
default:
/* Default just returns -ENODEV to indicate unsupported */
dev_priv->display.queue_flip = intel_default_queue_flip;
}
} }
/* /*
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册