提交 77dfc28b 编写于 作者: C Christian König 提交者: Alex Deucher

drm/ttm: wait for BO idle in ttm_bo_move_memcpy

When we want to pipeline accelerated moves we need to wait in the fallback path.
Reviewed-by: NAlex Deucher <alexander.deucher@amd.com>
Signed-off-by: NChristian König <christian.koenig@amd.com>
Signed-off-by: NAlex Deucher <alexander.deucher@amd.com>
上级 88932a7b
......@@ -433,7 +433,8 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo,
if (r) {
memcpy:
r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
r = ttm_bo_move_memcpy(bo, evict, interruptible,
no_wait_gpu, new_mem);
if (r) {
return r;
}
......
......@@ -1328,7 +1328,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
/* Fallback to software copy. */
ret = ttm_bo_wait(bo, intr, no_wait_gpu);
if (ret == 0)
ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
ret = ttm_bo_move_memcpy(bo, evict, intr, no_wait_gpu, new_mem);
out:
if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
......
......@@ -361,7 +361,8 @@ static int qxl_bo_move(struct ttm_buffer_object *bo,
qxl_move_null(bo, new_mem);
return 0;
}
return ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
return ttm_bo_move_memcpy(bo, evict, interruptible,
no_wait_gpu, new_mem);
}
static void qxl_bo_move_notify(struct ttm_buffer_object *bo,
......
......@@ -445,7 +445,8 @@ static int radeon_bo_move(struct ttm_buffer_object *bo,
if (r) {
memcpy:
r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
r = ttm_bo_move_memcpy(bo, evict, interruptible,
no_wait_gpu, new_mem);
if (r) {
return r;
}
......
......@@ -359,7 +359,8 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
ret = bdev->driver->move(bo, evict, interruptible,
no_wait_gpu, mem);
else
ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, mem);
ret = ttm_bo_move_memcpy(bo, evict, interruptible,
no_wait_gpu, mem);
if (ret) {
if (bdev->driver->move_notify) {
......
......@@ -320,7 +320,8 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
}
int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
bool evict, bool no_wait_gpu,
bool evict, bool interruptible,
bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
struct ttm_bo_device *bdev = bo->bdev;
......@@ -336,6 +337,10 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
unsigned long add = 0;
int dir;
ret = ttm_bo_wait(bo, interruptible, no_wait_gpu);
if (ret)
return ret;
ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
if (ret)
return ret;
......
......@@ -970,6 +970,7 @@ extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
*
* @bo: A pointer to a struct ttm_buffer_object.
* @evict: 1: This is an eviction. Don't try to pipeline.
* @interruptible: Sleep interruptible if waiting.
* @no_wait_gpu: Return immediately if the GPU is busy.
* @new_mem: struct ttm_mem_reg indicating where to move.
*
......@@ -984,7 +985,8 @@ extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
*/
extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
bool evict, bool no_wait_gpu,
bool evict, bool interruptible,
bool no_wait_gpu,
struct ttm_mem_reg *new_mem);
/**
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册