提交 b2cfe0ab 编写于 作者: C Chris Wilson 提交者: Daniel Vetter

drm/i915: Fix race on unreferencing the wrong mmio-flip-request

As we perform the mmio-flip without any locking and then try to acquire
the struct_mutex prior to dereferencing the request, it is possible for
userspace to queue a new pageflip before the worker can finish clearing
the old state - and then it will clear the new flip request. The result
is that the new flip could be completed before the GPU has finished
rendering.

The bugs stems from removing the seqno checking in
commit 536f5b5e
Author: Ander Conselvan de Oliveira <ander.conselvan.de.oliveira@intel.com>
Date:   Thu Nov 6 11:03:40 2014 +0200

    drm/i915: Make mmio flip wait for seqno in the work function
Signed-off-by: NChris Wilson <chris@chris-wilson.co.uk>
Cc: Ander Conselvan de Oliveira <ander.conselvan.de.oliveira@intel.com>
Signed-off-by: NDaniel Vetter <daniel.vetter@ffwll.ch>
上级 d94b5030
......@@ -2161,10 +2161,12 @@ i915_gem_request_get_ring(struct drm_i915_gem_request *req)
return req ? req->ring : NULL;
}
static inline void
static inline struct drm_i915_gem_request *
i915_gem_request_reference(struct drm_i915_gem_request *req)
{
kref_get(&req->ref);
if (req)
kref_get(&req->ref);
return req;
}
static inline void
......
......@@ -10793,22 +10793,18 @@ static void intel_do_mmio_flip(struct intel_crtc *intel_crtc)
static void intel_mmio_flip_work_func(struct work_struct *work)
{
struct intel_crtc *crtc =
container_of(work, struct intel_crtc, mmio_flip.work);
struct intel_mmio_flip *mmio_flip;
struct intel_mmio_flip *mmio_flip =
container_of(work, struct intel_mmio_flip, work);
mmio_flip = &crtc->mmio_flip;
if (mmio_flip->req)
WARN_ON(__i915_wait_request(mmio_flip->req,
crtc->reset_counter,
false, NULL, NULL) != 0);
if (mmio_flip->rq)
WARN_ON(__i915_wait_request(mmio_flip->rq,
mmio_flip->crtc->reset_counter,
false, NULL, NULL));
intel_do_mmio_flip(crtc);
if (mmio_flip->req) {
mutex_lock(&crtc->base.dev->struct_mutex);
i915_gem_request_assign(&mmio_flip->req, NULL);
mutex_unlock(&crtc->base.dev->struct_mutex);
}
intel_do_mmio_flip(mmio_flip->crtc);
i915_gem_request_unreference__unlocked(mmio_flip->rq);
kfree(mmio_flip);
}
static int intel_queue_mmio_flip(struct drm_device *dev,
......@@ -10818,12 +10814,17 @@ static int intel_queue_mmio_flip(struct drm_device *dev,
struct intel_engine_cs *ring,
uint32_t flags)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_mmio_flip *mmio_flip;
mmio_flip = kmalloc(sizeof(*mmio_flip), GFP_KERNEL);
if (mmio_flip == NULL)
return -ENOMEM;
i915_gem_request_assign(&intel_crtc->mmio_flip.req,
obj->last_write_req);
mmio_flip->rq = i915_gem_request_reference(obj->last_write_req);
mmio_flip->crtc = to_intel_crtc(crtc);
schedule_work(&intel_crtc->mmio_flip.work);
INIT_WORK(&mmio_flip->work, intel_mmio_flip_work_func);
schedule_work(&mmio_flip->work);
return 0;
}
......@@ -13564,8 +13565,6 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
INIT_WORK(&intel_crtc->mmio_flip.work, intel_mmio_flip_work_func);
drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
......
......@@ -459,8 +459,9 @@ struct intel_pipe_wm {
};
struct intel_mmio_flip {
struct drm_i915_gem_request *req;
struct work_struct work;
struct drm_i915_gem_request *rq;
struct intel_crtc *crtc;
};
struct skl_pipe_wm {
......@@ -544,7 +545,6 @@ struct intel_crtc {
} wm;
int scanline_offset;
struct intel_mmio_flip mmio_flip;
struct intel_crtc_atomic_commit atomic;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册