提交 fc9c8f54 编写于 作者: C Christian König 提交者: Alex Deucher

drm/amdgpu: add vm_needs_flush parameter to amdgpu_copy_buffer

This allows us to flush the system VM here.
Signed-off-by: NChristian König <christian.koenig@amd.com>
Reviewed-by: NAlex Deucher <alexander.deucher@amd.com>
Acked-by: NFelix Kuehling <Felix.Kuehling@amd.com>
上级 df264f9e
......@@ -40,7 +40,7 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size,
for (i = 0; i < n; i++) {
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
r = amdgpu_copy_buffer(ring, saddr, daddr, size, NULL, &fence,
false);
false, false);
if (r)
goto exit_do_move;
r = dma_fence_wait(fence, false);
......
......@@ -535,7 +535,7 @@ int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
r = amdgpu_copy_buffer(ring, bo_addr, shadow_addr,
amdgpu_bo_size(bo), resv, fence,
direct);
direct, false);
if (!r)
amdgpu_bo_fence(bo, *fence, true);
......@@ -588,7 +588,7 @@ int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
r = amdgpu_copy_buffer(ring, shadow_addr, bo_addr,
amdgpu_bo_size(bo), resv, fence,
direct);
direct, false);
if (!r)
amdgpu_bo_fence(bo, *fence, true);
......
......@@ -111,7 +111,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
amdgpu_bo_kunmap(gtt_obj[i]);
r = amdgpu_copy_buffer(ring, gtt_addr, vram_addr,
size, NULL, &fence, false);
size, NULL, &fence, false, false);
if (r) {
DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
......@@ -156,7 +156,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
amdgpu_bo_kunmap(vram_obj);
r = amdgpu_copy_buffer(ring, vram_addr, gtt_addr,
size, NULL, &fence, false);
size, NULL, &fence, false, false);
if (r) {
DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
......
......@@ -318,7 +318,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
r = amdgpu_copy_buffer(ring, old_start, new_start,
cur_pages * PAGE_SIZE,
bo->resv, &next, false);
bo->resv, &next, false, false);
if (r)
goto error;
......@@ -1256,12 +1256,11 @@ int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma)
return ttm_bo_mmap(filp, vma, &adev->mman.bdev);
}
int amdgpu_copy_buffer(struct amdgpu_ring *ring,
uint64_t src_offset,
uint64_t dst_offset,
uint32_t byte_count,
int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
uint64_t dst_offset, uint32_t byte_count,
struct reservation_object *resv,
struct dma_fence **fence, bool direct_submit)
struct dma_fence **fence, bool direct_submit,
bool vm_needs_flush)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_job *job;
......@@ -1283,6 +1282,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring,
if (r)
return r;
job->vm_needs_flush = vm_needs_flush;
if (resv) {
r = amdgpu_sync_resv(adev, &job->sync, resv,
AMDGPU_FENCE_OWNER_UNDEFINED);
......
......@@ -61,12 +61,11 @@ int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
const struct ttm_place *place,
struct ttm_mem_reg *mem);
int amdgpu_copy_buffer(struct amdgpu_ring *ring,
uint64_t src_offset,
uint64_t dst_offset,
uint32_t byte_count,
int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
uint64_t dst_offset, uint32_t byte_count,
struct reservation_object *resv,
struct dma_fence **fence, bool direct_submit);
struct dma_fence **fence, bool direct_submit,
bool vm_needs_flush);
int amdgpu_fill_buffer(struct amdgpu_bo *bo,
uint32_t src_data,
struct reservation_object *resv,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册