提交 0e3f154a 编写于 作者: C Chunming Zhou 提交者: Alex Deucher

drm/amdgpu: change uvd ib test to use kernel fence directly

Signed-off-by: NChunming Zhou <david1.zhou@amd.com>
Reviewed-by: NChristian K?nig <christian.koenig@amd.com>
Reviewed-by: NJammy Zhou <jammy.zhou@amd.com>
上级 bb1e38a4
...@@ -244,17 +244,19 @@ static int amdgpu_test_create_and_emit_fence(struct amdgpu_device *adev, ...@@ -244,17 +244,19 @@ static int amdgpu_test_create_and_emit_fence(struct amdgpu_device *adev,
int r; int r;
if (ring == &adev->uvd.ring) { if (ring == &adev->uvd.ring) {
struct fence *f = NULL;
r = amdgpu_uvd_get_create_msg(ring, handle, NULL); r = amdgpu_uvd_get_create_msg(ring, handle, NULL);
if (r) { if (r) {
DRM_ERROR("Failed to get dummy create msg\n"); DRM_ERROR("Failed to get dummy create msg\n");
return r; return r;
} }
r = amdgpu_uvd_get_destroy_msg(ring, handle, fence); r = amdgpu_uvd_get_destroy_msg(ring, handle, &f);
if (r) { if (r) {
DRM_ERROR("Failed to get dummy destroy msg\n"); DRM_ERROR("Failed to get dummy destroy msg\n");
return r; return r;
} }
*fence = to_amdgpu_fence(f);
} else if (ring == &adev->vce.ring[0] || } else if (ring == &adev->vce.ring[0] ||
ring == &adev->vce.ring[1]) { ring == &adev->vce.ring[1]) {
......
...@@ -288,7 +288,7 @@ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp) ...@@ -288,7 +288,7 @@ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) { for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
uint32_t handle = atomic_read(&adev->uvd.handles[i]); uint32_t handle = atomic_read(&adev->uvd.handles[i]);
if (handle != 0 && adev->uvd.filp[i] == filp) { if (handle != 0 && adev->uvd.filp[i] == filp) {
struct amdgpu_fence *fence; struct fence *fence;
amdgpu_uvd_note_usage(adev); amdgpu_uvd_note_usage(adev);
...@@ -298,8 +298,8 @@ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp) ...@@ -298,8 +298,8 @@ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
continue; continue;
} }
amdgpu_fence_wait(fence, false); fence_wait(fence, false);
amdgpu_fence_unref(&fence); fence_put(fence);
adev->uvd.filp[i] = NULL; adev->uvd.filp[i] = NULL;
atomic_set(&adev->uvd.handles[i], 0); atomic_set(&adev->uvd.handles[i], 0);
...@@ -819,7 +819,7 @@ static int amdgpu_uvd_free_job( ...@@ -819,7 +819,7 @@ static int amdgpu_uvd_free_job(
static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring,
struct amdgpu_bo *bo, struct amdgpu_bo *bo,
struct amdgpu_fence **fence) struct fence **fence)
{ {
struct ttm_validate_buffer tv; struct ttm_validate_buffer tv;
struct ww_acquire_ctx ticket; struct ww_acquire_ctx ticket;
...@@ -876,7 +876,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, ...@@ -876,7 +876,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring,
ttm_eu_fence_buffer_objects(&ticket, &head, &ib->fence->base); ttm_eu_fence_buffer_objects(&ticket, &head, &ib->fence->base);
if (fence) if (fence)
*fence = amdgpu_fence_ref(ib->fence); *fence = fence_get(&ib->fence->base);
amdgpu_bo_unref(&bo); amdgpu_bo_unref(&bo);
if (amdgpu_enable_scheduler) if (amdgpu_enable_scheduler)
...@@ -898,7 +898,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, ...@@ -898,7 +898,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring,
crash the vcpu so just try to emmit a dummy create/destroy msg to crash the vcpu so just try to emmit a dummy create/destroy msg to
avoid this */ avoid this */
int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
struct amdgpu_fence **fence) struct fence **fence)
{ {
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
struct amdgpu_bo *bo; struct amdgpu_bo *bo;
...@@ -945,7 +945,7 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, ...@@ -945,7 +945,7 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
} }
int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
struct amdgpu_fence **fence) struct fence **fence)
{ {
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
struct amdgpu_bo *bo; struct amdgpu_bo *bo;
......
...@@ -29,9 +29,9 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev); ...@@ -29,9 +29,9 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev);
int amdgpu_uvd_suspend(struct amdgpu_device *adev); int amdgpu_uvd_suspend(struct amdgpu_device *adev);
int amdgpu_uvd_resume(struct amdgpu_device *adev); int amdgpu_uvd_resume(struct amdgpu_device *adev);
int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
struct amdgpu_fence **fence); struct fence **fence);
int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
struct amdgpu_fence **fence); struct fence **fence);
void amdgpu_uvd_free_handles(struct amdgpu_device *adev, void amdgpu_uvd_free_handles(struct amdgpu_device *adev,
struct drm_file *filp); struct drm_file *filp);
int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx); int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx);
......
...@@ -534,7 +534,7 @@ static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring, ...@@ -534,7 +534,7 @@ static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring,
static int uvd_v4_2_ring_test_ib(struct amdgpu_ring *ring) static int uvd_v4_2_ring_test_ib(struct amdgpu_ring *ring)
{ {
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
struct amdgpu_fence *fence = NULL; struct fence *fence = NULL;
int r; int r;
r = amdgpu_asic_set_uvd_clocks(adev, 53300, 40000); r = amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
...@@ -555,14 +555,14 @@ static int uvd_v4_2_ring_test_ib(struct amdgpu_ring *ring) ...@@ -555,14 +555,14 @@ static int uvd_v4_2_ring_test_ib(struct amdgpu_ring *ring)
goto error; goto error;
} }
r = amdgpu_fence_wait(fence, false); r = fence_wait(fence, false);
if (r) { if (r) {
DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
goto error; goto error;
} }
DRM_INFO("ib test on ring %d succeeded\n", ring->idx); DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
error: error:
amdgpu_fence_unref(&fence); fence_put(fence);
amdgpu_asic_set_uvd_clocks(adev, 0, 0); amdgpu_asic_set_uvd_clocks(adev, 0, 0);
return r; return r;
} }
......
...@@ -580,7 +580,7 @@ static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring, ...@@ -580,7 +580,7 @@ static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
static int uvd_v5_0_ring_test_ib(struct amdgpu_ring *ring) static int uvd_v5_0_ring_test_ib(struct amdgpu_ring *ring)
{ {
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
struct amdgpu_fence *fence = NULL; struct fence *fence = NULL;
int r; int r;
r = amdgpu_asic_set_uvd_clocks(adev, 53300, 40000); r = amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
...@@ -601,14 +601,14 @@ static int uvd_v5_0_ring_test_ib(struct amdgpu_ring *ring) ...@@ -601,14 +601,14 @@ static int uvd_v5_0_ring_test_ib(struct amdgpu_ring *ring)
goto error; goto error;
} }
r = amdgpu_fence_wait(fence, false); r = fence_wait(fence, false);
if (r) { if (r) {
DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
goto error; goto error;
} }
DRM_INFO("ib test on ring %d succeeded\n", ring->idx); DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
error: error:
amdgpu_fence_unref(&fence); fence_put(fence);
amdgpu_asic_set_uvd_clocks(adev, 0, 0); amdgpu_asic_set_uvd_clocks(adev, 0, 0);
return r; return r;
} }
......
...@@ -575,7 +575,7 @@ static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring, ...@@ -575,7 +575,7 @@ static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
*/ */
static int uvd_v6_0_ring_test_ib(struct amdgpu_ring *ring) static int uvd_v6_0_ring_test_ib(struct amdgpu_ring *ring)
{ {
struct amdgpu_fence *fence = NULL; struct fence *fence = NULL;
int r; int r;
r = amdgpu_uvd_get_create_msg(ring, 1, NULL); r = amdgpu_uvd_get_create_msg(ring, 1, NULL);
...@@ -590,14 +590,14 @@ static int uvd_v6_0_ring_test_ib(struct amdgpu_ring *ring) ...@@ -590,14 +590,14 @@ static int uvd_v6_0_ring_test_ib(struct amdgpu_ring *ring)
goto error; goto error;
} }
r = amdgpu_fence_wait(fence, false); r = fence_wait(fence, false);
if (r) { if (r) {
DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
goto error; goto error;
} }
DRM_INFO("ib test on ring %d succeeded\n", ring->idx); DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
error: error:
amdgpu_fence_unref(&fence); fence_put(fence);
return r; return r;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册