提交 5251f04e 编写于 作者: M Maarten Lankhorst

drm/i915: Remove intel_prepare_page_flip, v3.

Instead of calling prepare_flip right before calling finish_page_flip
do everything from prepare_page_flip in finish_page_flip.

Putting prepare and finish page_flip in a single step removes the need
for INTEL_FLIP_COMPLETE, so it can be removed. This simplifies the code
slightly.

Changes since v1:
- Invert if case to simplify code.
- Add missing barrier.
- Reword commit message.
Changes since v2:
- intel_page_flip_plane is removed.
- work->pending is turned into a bool.
Signed-off-by: NMaarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1463490484-19540-5-git-send-email-maarten.lankhorst@linux.intel.comReviewed-by: NPatrik Jakobsson <patrik.jakobsson@linux.intel.com>
上级 ef58319d
......@@ -619,12 +619,9 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
u32 addr;
pending = atomic_read(&work->pending);
if (pending == INTEL_FLIP_INACTIVE) {
if (pending) {
seq_printf(m, "Flip ioctl preparing on pipe %c (plane %c)\n",
pipe, plane);
} else if (pending >= INTEL_FLIP_COMPLETE) {
seq_printf(m, "Flip queued on pipe %c (plane %c)\n",
pipe, plane);
} else {
seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
pipe, plane);
......
......@@ -1705,10 +1705,8 @@ static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
intel_pipe_handle_vblank(dev_priv, pipe))
intel_check_page_flip(dev_priv, pipe);
if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
intel_prepare_page_flip(dev_priv, pipe);
if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV)
intel_finish_page_flip(dev_priv, pipe);
}
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
i9xx_pipe_crc_irq_handler(dev_priv, pipe);
......@@ -2162,10 +2160,8 @@ static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
i9xx_pipe_crc_irq_handler(dev_priv, pipe);
/* plane/pipes map 1:1 on ilk+ */
if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
intel_prepare_page_flip(dev_priv, pipe);
if (de_iir & DE_PLANE_FLIP_DONE(pipe))
intel_finish_page_flip(dev_priv, pipe);
}
}
/* check event from PCH */
......@@ -2209,10 +2205,8 @@ static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
intel_check_page_flip(dev_priv, pipe);
/* plane/pipes map 1:1 on ilk+ */
if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
intel_prepare_page_flip(dev_priv, pipe);
if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe))
intel_finish_page_flip(dev_priv, pipe);
}
}
/* check event from PCH */
......@@ -2417,10 +2411,8 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
else
flip_done &= GEN8_PIPE_PRIMARY_FLIP_DONE;
if (flip_done) {
intel_prepare_page_flip(dev_priv, pipe);
if (flip_done)
intel_finish_page_flip(dev_priv, pipe);
}
if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
hsw_pipe_crc_irq_handler(dev_priv, pipe);
......@@ -3998,7 +3990,6 @@ static bool i8xx_handle_vblank(struct drm_i915_private *dev_priv,
if (I915_READ16(ISR) & flip_pending)
goto check_page_flip;
intel_prepare_page_flip(dev_priv, plane);
intel_finish_page_flip(dev_priv, pipe);
return true;
......@@ -4188,7 +4179,6 @@ static bool i915_handle_vblank(struct drm_i915_private *dev_priv,
if (I915_READ(ISR) & flip_pending)
goto check_page_flip;
intel_prepare_page_flip(dev_priv, plane);
intel_finish_page_flip(dev_priv, pipe);
return true;
......
......@@ -3112,10 +3112,8 @@ static void intel_complete_page_flips(struct drm_i915_private *dev_priv)
{
struct intel_crtc *crtc;
for_each_intel_crtc(dev_priv->dev, crtc) {
intel_prepare_page_flip(dev_priv, crtc->plane);
for_each_intel_crtc(dev_priv->dev, crtc)
intel_finish_page_flip(dev_priv, crtc->pipe);
}
}
static void intel_update_primary_planes(struct drm_device *dev)
......@@ -10866,42 +10864,6 @@ static void intel_unpin_work_fn(struct work_struct *__work)
kfree(work);
}
static void do_intel_finish_page_flip(struct drm_i915_private *dev_priv,
struct drm_crtc *crtc)
{
struct drm_device *dev = dev_priv->dev;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_unpin_work *work;
unsigned long flags;
/* Ignore early vblank irqs */
if (intel_crtc == NULL)
return;
/*
* This is called both by irq handlers and the reset code (to complete
* lost pageflips) so needs the full irqsave spinlocks.
*/
spin_lock_irqsave(&dev->event_lock, flags);
work = intel_crtc->unpin_work;
if (work && atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE) {
/* ensure that the unpin work is consistent wrt ->pending. */
smp_rmb();
page_flip_completed(intel_crtc);
}
spin_unlock_irqrestore(&dev->event_lock, flags);
}
void intel_finish_page_flip(struct drm_i915_private *dev_priv, int pipe)
{
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
do_intel_finish_page_flip(dev_priv, crtc);
}
/* Is 'a' after or equal to 'b'? */
static bool g4x_flip_count_after_eq(u32 a, u32 b)
{
......@@ -10914,6 +10876,9 @@ static bool page_flip_finished(struct intel_crtc *crtc)
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned reset_counter;
/* ensure that the unpin work is consistent wrt ->pending. */
smp_rmb();
reset_counter = i915_reset_counter(&dev_priv->gpu_error);
if (crtc->reset_counter != reset_counter)
return true;
......@@ -10955,25 +10920,30 @@ static bool page_flip_finished(struct intel_crtc *crtc)
crtc->unpin_work->flip_count);
}
void intel_prepare_page_flip(struct drm_i915_private *dev_priv, int plane)
void intel_finish_page_flip(struct drm_i915_private *dev_priv, int pipe)
{
struct drm_device *dev = dev_priv->dev;
struct intel_crtc *intel_crtc =
to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_unpin_work *work;
unsigned long flags;
/* Ignore early vblank irqs */
if (!crtc)
return;
/*
* This is called both by irq handlers and the reset code (to complete
* lost pageflips) so needs the full irqsave spinlocks.
*
* NB: An MMIO update of the plane base pointer will also
* generate a page-flip completion irq, i.e. every modeset
* is also accompanied by a spurious intel_prepare_page_flip().
*/
spin_lock_irqsave(&dev->event_lock, flags);
if (intel_crtc->unpin_work && page_flip_finished(intel_crtc))
atomic_inc_not_zero(&intel_crtc->unpin_work->pending);
work = intel_crtc->unpin_work;
if (work != NULL &&
atomic_read(&work->pending) &&
page_flip_finished(intel_crtc))
page_flip_completed(intel_crtc);
spin_unlock_irqrestore(&dev->event_lock, flags);
}
......@@ -10981,7 +10951,7 @@ static inline void intel_mark_page_flip_active(struct intel_unpin_work *work)
{
/* Ensure that the work item is consistent when activating it ... */
smp_mb__before_atomic();
atomic_set(&work->pending, INTEL_FLIP_PENDING);
atomic_set(&work->pending, 1);
}
static int intel_gen2_queue_flip(struct drm_device *dev,
......@@ -11421,8 +11391,8 @@ static bool __intel_pageflip_stall_check(struct drm_device *dev,
/* ensure that the unpin work is consistent wrt ->pending. */
smp_rmb();
if (pending != INTEL_FLIP_PENDING)
return pending == INTEL_FLIP_COMPLETE;
if (!pending)
return false;
if (work->flip_ready_vblank == 0) {
if (work->flip_queued_req &&
......
......@@ -984,9 +984,6 @@ struct intel_unpin_work {
struct drm_i915_gem_object *pending_flip_obj;
struct drm_pending_vblank_event *event;
atomic_t pending;
#define INTEL_FLIP_INACTIVE 0
#define INTEL_FLIP_PENDING 1
#define INTEL_FLIP_COMPLETE 2
u32 flip_count;
u32 gtt_offset;
struct drm_i915_gem_request *flip_queued_req;
......@@ -1199,7 +1196,6 @@ struct drm_framebuffer *
__intel_framebuffer_create(struct drm_device *dev,
struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_i915_gem_object *obj);
void intel_prepare_page_flip(struct drm_i915_private *dev_priv, int plane);
void intel_finish_page_flip(struct drm_i915_private *dev_priv, int pipe);
void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe);
int intel_prepare_plane_fb(struct drm_plane *plane,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册