提交 bd95e0a4 编写于 作者: E Eric Anholt 提交者: Dave Airlie

i915: Remove racy delayed vblank swap ioctl.

When userland detected that this ioctl was supported (by version number check),
it used it in a racy way -- dispatch delayed swap, wait for vblank, continue
rendering. As there was no mechanism for it to wait for the swap to finish,
sometimes it would render before the swap and garbage would be displayed on
the screen.

By removing the ioctl and returning -EINVAL, userland returns to its previous,
correct rendering path of waiting for a vblank then dispatching a swap.  The
only path that could have used this ioctl correctly was page flipping, which
relied on only one client running and emitting wait-for-vblank-before-rendering
in the command stream.  That path also falls back correctly, at the performance
cost of not being able to queue up rendering before the flip occurs.
Signed-off-by: NEric Anholt <eric@anholt.net>
Signed-off-by: NDave Airlie <airlied@linux.ie>
上级 d3e74d02
......@@ -88,13 +88,6 @@ struct mem_block {
struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */
};
typedef struct _drm_i915_vbl_swap {
struct list_head head;
drm_drawable_t drw_id;
unsigned int pipe;
unsigned int sequence;
} drm_i915_vbl_swap_t;
struct opregion_header;
struct opregion_acpi;
struct opregion_swsci;
......@@ -146,10 +139,6 @@ typedef struct drm_i915_private {
unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
int vblank_pipe;
spinlock_t swaps_lock;
drm_i915_vbl_swap_t vbl_swaps;
unsigned int swaps_pending;
struct intel_opregion opregion;
/* Register state */
......@@ -242,9 +231,6 @@ typedef struct drm_i915_private {
u8 saveDACDATA[256*3]; /* 256 3-byte colors */
u8 saveCR[37];
/** Work task for vblank-related ring access */
struct work_struct vblank_work;
struct {
struct drm_mm gtt_space;
......@@ -445,7 +431,6 @@ extern int i915_irq_wait(struct drm_device *dev, void *data,
void i915_user_irq_get(struct drm_device *dev);
void i915_user_irq_put(struct drm_device *dev);
extern void i915_vblank_work_handler(struct work_struct *work);
extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
extern void i915_driver_irq_preinstall(struct drm_device * dev);
extern int i915_driver_irq_postinstall(struct drm_device *dev);
......
......@@ -80,211 +80,6 @@ i915_pipe_enabled(struct drm_device *dev, int pipe)
return 0;
}
/**
* Emit blits for scheduled buffer swaps.
*
* This function will be called with the HW lock held.
* Because this function must grab the ring mutex (dev->struct_mutex),
* it can no longer run at soft irq time. We'll fix this when we do
* the DRI2 swap buffer work.
*/
static void i915_vblank_tasklet(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long irqflags;
struct list_head *list, *tmp, hits, *hit;
int nhits, nrects, slice[2], upper[2], lower[2], i;
unsigned counter[2];
struct drm_drawable_info *drw;
drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv;
u32 cpp = dev_priv->cpp;
u32 cmd = (cpp == 4) ? (XY_SRC_COPY_BLT_CMD |
XY_SRC_COPY_BLT_WRITE_ALPHA |
XY_SRC_COPY_BLT_WRITE_RGB)
: XY_SRC_COPY_BLT_CMD;
u32 src_pitch = sarea_priv->pitch * cpp;
u32 dst_pitch = sarea_priv->pitch * cpp;
u32 ropcpp = (0xcc << 16) | ((cpp - 1) << 24);
RING_LOCALS;
mutex_lock(&dev->struct_mutex);
if (IS_I965G(dev) && sarea_priv->front_tiled) {
cmd |= XY_SRC_COPY_BLT_DST_TILED;
dst_pitch >>= 2;
}
if (IS_I965G(dev) && sarea_priv->back_tiled) {
cmd |= XY_SRC_COPY_BLT_SRC_TILED;
src_pitch >>= 2;
}
counter[0] = drm_vblank_count(dev, 0);
counter[1] = drm_vblank_count(dev, 1);
DRM_DEBUG("\n");
INIT_LIST_HEAD(&hits);
nhits = nrects = 0;
spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
/* Find buffer swaps scheduled for this vertical blank */
list_for_each_safe(list, tmp, &dev_priv->vbl_swaps.head) {
drm_i915_vbl_swap_t *vbl_swap =
list_entry(list, drm_i915_vbl_swap_t, head);
int pipe = vbl_swap->pipe;
if ((counter[pipe] - vbl_swap->sequence) > (1<<23))
continue;
list_del(list);
dev_priv->swaps_pending--;
drm_vblank_put(dev, pipe);
spin_unlock(&dev_priv->swaps_lock);
spin_lock(&dev->drw_lock);
drw = drm_get_drawable_info(dev, vbl_swap->drw_id);
list_for_each(hit, &hits) {
drm_i915_vbl_swap_t *swap_cmp =
list_entry(hit, drm_i915_vbl_swap_t, head);
struct drm_drawable_info *drw_cmp =
drm_get_drawable_info(dev, swap_cmp->drw_id);
/* Make sure both drawables are still
* around and have some rectangles before
* we look inside to order them for the
* blts below.
*/
if (drw_cmp && drw_cmp->num_rects > 0 &&
drw && drw->num_rects > 0 &&
drw_cmp->rects[0].y1 > drw->rects[0].y1) {
list_add_tail(list, hit);
break;
}
}
spin_unlock(&dev->drw_lock);
/* List of hits was empty, or we reached the end of it */
if (hit == &hits)
list_add_tail(list, hits.prev);
nhits++;
spin_lock(&dev_priv->swaps_lock);
}
if (nhits == 0) {
spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
mutex_unlock(&dev->struct_mutex);
return;
}
spin_unlock(&dev_priv->swaps_lock);
i915_kernel_lost_context(dev);
if (IS_I965G(dev)) {
BEGIN_LP_RING(4);
OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
OUT_RING(0);
OUT_RING(((sarea_priv->width - 1) & 0xffff) | ((sarea_priv->height - 1) << 16));
OUT_RING(0);
ADVANCE_LP_RING();
} else {
BEGIN_LP_RING(6);
OUT_RING(GFX_OP_DRAWRECT_INFO);
OUT_RING(0);
OUT_RING(0);
OUT_RING(sarea_priv->width | sarea_priv->height << 16);
OUT_RING(sarea_priv->width | sarea_priv->height << 16);
OUT_RING(0);
ADVANCE_LP_RING();
}
sarea_priv->ctxOwner = DRM_KERNEL_CONTEXT;
upper[0] = upper[1] = 0;
slice[0] = max(sarea_priv->pipeA_h / nhits, 1);
slice[1] = max(sarea_priv->pipeB_h / nhits, 1);
lower[0] = sarea_priv->pipeA_y + slice[0];
lower[1] = sarea_priv->pipeB_y + slice[0];
spin_lock(&dev->drw_lock);
/* Emit blits for buffer swaps, partitioning both outputs into as many
* slices as there are buffer swaps scheduled in order to avoid tearing
* (based on the assumption that a single buffer swap would always
* complete before scanout starts).
*/
for (i = 0; i++ < nhits;
upper[0] = lower[0], lower[0] += slice[0],
upper[1] = lower[1], lower[1] += slice[1]) {
if (i == nhits)
lower[0] = lower[1] = sarea_priv->height;
list_for_each(hit, &hits) {
drm_i915_vbl_swap_t *swap_hit =
list_entry(hit, drm_i915_vbl_swap_t, head);
struct drm_clip_rect *rect;
int num_rects, pipe;
unsigned short top, bottom;
drw = drm_get_drawable_info(dev, swap_hit->drw_id);
/* The drawable may have been destroyed since
* the vblank swap was queued
*/
if (!drw)
continue;
rect = drw->rects;
pipe = swap_hit->pipe;
top = upper[pipe];
bottom = lower[pipe];
for (num_rects = drw->num_rects; num_rects--; rect++) {
int y1 = max(rect->y1, top);
int y2 = min(rect->y2, bottom);
if (y1 >= y2)
continue;
BEGIN_LP_RING(8);
OUT_RING(cmd);
OUT_RING(ropcpp | dst_pitch);
OUT_RING((y1 << 16) | rect->x1);
OUT_RING((y2 << 16) | rect->x2);
OUT_RING(sarea_priv->front_offset);
OUT_RING((y1 << 16) | rect->x1);
OUT_RING(src_pitch);
OUT_RING(sarea_priv->back_offset);
ADVANCE_LP_RING();
}
}
}
spin_unlock_irqrestore(&dev->drw_lock, irqflags);
mutex_unlock(&dev->struct_mutex);
list_for_each_safe(hit, tmp, &hits) {
drm_i915_vbl_swap_t *swap_hit =
list_entry(hit, drm_i915_vbl_swap_t, head);
list_del(hit);
drm_free(swap_hit, sizeof(*swap_hit), DRM_MEM_DRIVER);
}
}
/* Called from drm generic code, passed a 'crtc', which
* we use as a pipe index
*/
......@@ -322,40 +117,6 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
return count;
}
void
i915_vblank_work_handler(struct work_struct *work)
{
drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
vblank_work);
struct drm_device *dev = dev_priv->dev;
unsigned long irqflags;
if (dev->lock.hw_lock == NULL) {
i915_vblank_tasklet(dev);
return;
}
spin_lock_irqsave(&dev->tasklet_lock, irqflags);
dev->locked_tasklet_func = i915_vblank_tasklet;
spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
/* Try to get the lock now, if this fails, the lock
* holder will execute the tasklet during unlock
*/
if (!drm_lock_take(&dev->lock, DRM_KERNEL_CONTEXT))
return;
dev->lock.lock_time = jiffies;
atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
spin_lock_irqsave(&dev->tasklet_lock, irqflags);
dev->locked_tasklet_func = NULL;
spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
i915_vblank_tasklet(dev);
drm_lock_free(&dev->lock, DRM_KERNEL_CONTEXT);
}
irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *) arg;
......@@ -433,9 +194,6 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
if (iir & I915_ASLE_INTERRUPT)
opregion_asle_intr(dev);
if (vblank && dev_priv->swaps_pending > 0)
schedule_work(&dev_priv->vblank_work);
return IRQ_HANDLED;
}
......@@ -696,123 +454,21 @@ int i915_vblank_pipe_get(struct drm_device *dev, void *data,
int i915_vblank_swap(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_i915_private_t *dev_priv = dev->dev_private;
drm_i915_vblank_swap_t *swap = data;
drm_i915_vbl_swap_t *vbl_swap, *vbl_old;
unsigned int pipe, seqtype, curseq;
unsigned long irqflags;
struct list_head *list;
int ret;
if (!dev_priv || !dev_priv->sarea_priv) {
DRM_ERROR("%s called with no initialization\n", __func__);
return -EINVAL;
}
if (dev_priv->sarea_priv->rotation) {
DRM_DEBUG("Rotation not supported\n");
return -EINVAL;
}
if (swap->seqtype & ~(_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE |
_DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS)) {
DRM_ERROR("Invalid sequence type 0x%x\n", swap->seqtype);
return -EINVAL;
}
pipe = (swap->seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0;
seqtype = swap->seqtype & (_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE);
if (!(dev_priv->vblank_pipe & (1 << pipe))) {
DRM_ERROR("Invalid pipe %d\n", pipe);
return -EINVAL;
}
spin_lock_irqsave(&dev->drw_lock, irqflags);
if (!drm_get_drawable_info(dev, swap->drawable)) {
spin_unlock_irqrestore(&dev->drw_lock, irqflags);
DRM_DEBUG("Invalid drawable ID %d\n", swap->drawable);
return -EINVAL;
}
spin_unlock_irqrestore(&dev->drw_lock, irqflags);
/*
* We take the ref here and put it when the swap actually completes
* in the tasklet.
/* The delayed swap mechanism was fundamentally racy, and has been
* removed. The model was that the client requested a delayed flip/swap
* from the kernel, then waited for vblank before continuing to perform
* rendering. The problem was that the kernel might wake the client
* up before it dispatched the vblank swap (since the lock has to be
* held while touching the ringbuffer), in which case the client would
* clear and start the next frame before the swap occurred, and
* flicker would occur in addition to likely missing the vblank.
*
* In the absence of this ioctl, userland falls back to a correct path
* of waiting for a vblank, then dispatching the swap on its own.
* Context switching to userland and back is plenty fast enough for
* meeting the requirements of vblank swapping.
*/
ret = drm_vblank_get(dev, pipe);
if (ret)
return ret;
curseq = drm_vblank_count(dev, pipe);
if (seqtype == _DRM_VBLANK_RELATIVE)
swap->sequence += curseq;
if ((curseq - swap->sequence) <= (1<<23)) {
if (swap->seqtype & _DRM_VBLANK_NEXTONMISS) {
swap->sequence = curseq + 1;
} else {
DRM_DEBUG("Missed target sequence\n");
drm_vblank_put(dev, pipe);
return -EINVAL;
}
}
vbl_swap = drm_calloc(1, sizeof(*vbl_swap), DRM_MEM_DRIVER);
if (!vbl_swap) {
DRM_ERROR("Failed to allocate memory to queue swap\n");
drm_vblank_put(dev, pipe);
return -ENOMEM;
}
vbl_swap->drw_id = swap->drawable;
vbl_swap->pipe = pipe;
vbl_swap->sequence = swap->sequence;
spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
list_for_each(list, &dev_priv->vbl_swaps.head) {
vbl_old = list_entry(list, drm_i915_vbl_swap_t, head);
if (vbl_old->drw_id == swap->drawable &&
vbl_old->pipe == pipe &&
vbl_old->sequence == swap->sequence) {
spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
drm_vblank_put(dev, pipe);
drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER);
DRM_DEBUG("Already scheduled\n");
return 0;
}
}
if (dev_priv->swaps_pending >= 10) {
DRM_DEBUG("Too many swaps queued\n");
DRM_DEBUG(" pipe 0: %d pipe 1: %d\n",
drm_vblank_count(dev, 0),
drm_vblank_count(dev, 1));
list_for_each(list, &dev_priv->vbl_swaps.head) {
vbl_old = list_entry(list, drm_i915_vbl_swap_t, head);
DRM_DEBUG("\tdrw %x pipe %d seq %x\n",
vbl_old->drw_id, vbl_old->pipe,
vbl_old->sequence);
}
spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
drm_vblank_put(dev, pipe);
drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER);
return -EBUSY;
}
list_add_tail(&vbl_swap->head, &dev_priv->vbl_swaps.head);
dev_priv->swaps_pending++;
spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
return 0;
return -EINVAL;
}
/* drm_dma.h hooks
......@@ -831,11 +487,6 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int ret, num_pipes = 2;
spin_lock_init(&dev_priv->swaps_lock);
INIT_LIST_HEAD(&dev_priv->vbl_swaps.head);
INIT_WORK(&dev_priv->vblank_work, i915_vblank_work_handler);
dev_priv->swaps_pending = 0;
/* Set initial unmasked IRQs to just the selected vblank pipes. */
dev_priv->irq_mask_reg = ~0;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册