提交 64171959 编写于 作者: D Dave Airlie

drm/mgag200: deal with bo reserve fail in dirty update path

On F19 testing, it was noticed we get a lot of errors in dmesg
about being unable to reserve the buffer when plymouth starts,
this is due to the buffer being in the process of migrating,
so it makes sense we can't reserve it.

In order to deal with it, this adds delayed updates for the dirty
updates, when the bo is unreservable, in the normal console case
this shouldn't ever happen, its just when plymouth or X is
pushing the console bo to system memory.

Cc: stable@vger.kernel.org
Signed-off-by: NDave Airlie <airlied@redhat.com>
上级 b11b88ef
......@@ -115,6 +115,8 @@ struct mga_fbdev {
void *sysram;
int size;
struct ttm_bo_kmap_obj mapping;
int x1, y1, x2, y2; /* dirty rect */
spinlock_t dirty_lock;
};
struct mga_crtc {
......
......@@ -29,16 +29,52 @@ static void mga_dirty_update(struct mga_fbdev *mfbdev,
int bpp = (mfbdev->mfb.base.bits_per_pixel + 7)/8;
int ret;
bool unmap = false;
bool store_for_later = false;
int x2, y2;
unsigned long flags;
obj = mfbdev->mfb.obj;
bo = gem_to_mga_bo(obj);
/*
* try and reserve the BO, if we fail with busy
* then the BO is being moved and we should
* store up the damage until later.
*/
ret = mgag200_bo_reserve(bo, true);
if (ret) {
DRM_ERROR("failed to reserve fb bo\n");
if (ret != -EBUSY)
return;
store_for_later = true;
}
x2 = x + width - 1;
y2 = y + height - 1;
spin_lock_irqsave(&mfbdev->dirty_lock, flags);
if (mfbdev->y1 < y)
y = mfbdev->y1;
if (mfbdev->y2 > y2)
y2 = mfbdev->y2;
if (mfbdev->x1 < x)
x = mfbdev->x1;
if (mfbdev->x2 > x2)
x2 = mfbdev->x2;
if (store_for_later) {
mfbdev->x1 = x;
mfbdev->x2 = x2;
mfbdev->y1 = y;
mfbdev->y2 = y2;
spin_unlock_irqrestore(&mfbdev->dirty_lock, flags);
return;
}
mfbdev->x1 = mfbdev->y1 = INT_MAX;
mfbdev->x2 = mfbdev->y2 = 0;
spin_unlock_irqrestore(&mfbdev->dirty_lock, flags);
if (!bo->kmap.virtual) {
ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
if (ret) {
......@@ -48,10 +84,10 @@ static void mga_dirty_update(struct mga_fbdev *mfbdev,
}
unmap = true;
}
for (i = y; i < y + height; i++) {
for (i = y; i <= y2; i++) {
/* assume equal stride for now */
src_offset = dst_offset = i * mfbdev->mfb.base.pitches[0] + (x * bpp);
memcpy_toio(bo->kmap.virtual + src_offset, mfbdev->sysram + src_offset, width * bpp);
memcpy_toio(bo->kmap.virtual + src_offset, mfbdev->sysram + src_offset, (x2 - x + 1) * bpp);
}
if (unmap)
......@@ -252,6 +288,7 @@ int mgag200_fbdev_init(struct mga_device *mdev)
mdev->mfbdev = mfbdev;
mfbdev->helper.funcs = &mga_fb_helper_funcs;
spin_lock_init(&mfbdev->dirty_lock);
ret = drm_fb_helper_init(mdev->dev, &mfbdev->helper,
mdev->num_crtc, MGAG200FB_CONN_LIMIT);
......
......@@ -315,8 +315,8 @@ int mgag200_bo_reserve(struct mgag200_bo *bo, bool no_wait)
ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0);
if (ret) {
if (ret != -ERESTARTSYS)
DRM_ERROR("reserve failed %p\n", bo);
if (ret != -ERESTARTSYS && ret != -EBUSY)
DRM_ERROR("reserve failed %p %d\n", bo, ret);
return ret;
}
return 0;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册