提交 74561cd4 编写于 作者: C Christian König 提交者: Alex Deucher

drm/ttm: remove no_gpu_wait param from ttm_bo_move_accel_cleanup

It isn't used and not waiting for the GPU after scheduling a move is
actually quite dangerous.
Reviewed-by: NAlex Deucher <alexander.deucher@amd.com>
Signed-off-by: NChristian König <christian.koenig@amd.com>
Signed-off-by: NAlex Deucher <alexander.deucher@amd.com>
上级 99c44632
......@@ -287,8 +287,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
new_mem->num_pages * PAGE_SIZE, /* bytes */
bo->resv, &fence);
/* FIXME: handle copy error */
r = ttm_bo_move_accel_cleanup(bo, fence,
evict, no_wait_gpu, new_mem);
r = ttm_bo_move_accel_cleanup(bo, fence, evict, new_mem);
fence_put(fence);
return r;
}
......
......@@ -1082,7 +1082,6 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
ret = ttm_bo_move_accel_cleanup(bo,
&fence->base,
evict,
no_wait_gpu,
new_mem);
nouveau_fence_unref(&fence);
}
......
......@@ -300,8 +300,7 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
if (IS_ERR(fence))
return PTR_ERR(fence);
r = ttm_bo_move_accel_cleanup(bo, &fence->base,
evict, no_wait_gpu, new_mem);
r = ttm_bo_move_accel_cleanup(bo, &fence->base, evict, new_mem);
radeon_fence_unref(&fence);
return r;
}
......
......@@ -637,7 +637,6 @@ EXPORT_SYMBOL(ttm_bo_kunmap);
int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
struct fence *fence,
bool evict,
bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
struct ttm_bo_device *bdev = bo->bdev;
......
......@@ -1004,7 +1004,6 @@ extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
* @bo: A pointer to a struct ttm_buffer_object.
* @fence: A fence object that signals when moving is complete.
* @evict: This is an evict move. Don't return until the buffer is idle.
* @no_wait_gpu: Return immediately if the GPU is busy.
* @new_mem: struct ttm_mem_reg indicating where to move.
*
* Accelerated move function to be called when an accelerated move
......@@ -1016,8 +1015,7 @@ extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
*/
extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
struct fence *fence,
bool evict, bool no_wait_gpu,
struct fence *fence, bool evict,
struct ttm_mem_reg *new_mem);
/**
* ttm_io_prot
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册