提交 336d1f5e 编写于 作者: C Christian König 提交者: Alex Deucher

drm/amdgpu: remove HW fence owner

Not used any more since we now always use the sheduler.
Signed-off-by: NChristian König <christian.koenig@amd.com>
Reviewed-by: NAlex Deucher <alexander.deucher@amd.com>
Reviewed-by: NChunming Zhou <david1.zhou@amd.com>
上级 bcc634f4
...@@ -409,9 +409,6 @@ struct amdgpu_fence { ...@@ -409,9 +409,6 @@ struct amdgpu_fence {
struct amdgpu_ring *ring; struct amdgpu_ring *ring;
uint64_t seq; uint64_t seq;
/* filp or special value for fence creator */
void *owner;
wait_queue_t fence_wake; wait_queue_t fence_wake;
}; };
...@@ -432,8 +429,7 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, ...@@ -432,8 +429,7 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
unsigned irq_type); unsigned irq_type);
void amdgpu_fence_driver_suspend(struct amdgpu_device *adev); void amdgpu_fence_driver_suspend(struct amdgpu_device *adev);
void amdgpu_fence_driver_resume(struct amdgpu_device *adev); void amdgpu_fence_driver_resume(struct amdgpu_device *adev);
int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner, int amdgpu_fence_emit(struct amdgpu_ring *ring, struct amdgpu_fence **fence);
struct amdgpu_fence **fence);
void amdgpu_fence_process(struct amdgpu_ring *ring); void amdgpu_fence_process(struct amdgpu_ring *ring);
int amdgpu_fence_wait_next(struct amdgpu_ring *ring); int amdgpu_fence_wait_next(struct amdgpu_ring *ring);
int amdgpu_fence_wait_empty(struct amdgpu_ring *ring); int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
...@@ -1177,8 +1173,7 @@ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, ...@@ -1177,8 +1173,7 @@ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
unsigned size, struct amdgpu_ib *ib); unsigned size, struct amdgpu_ib *ib);
void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib); void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib);
int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
struct amdgpu_ib *ib, void *owner, struct amdgpu_ib *ib, struct fence *last_vm_update,
struct fence *last_vm_update,
struct fence **f); struct fence **f);
int amdgpu_ib_pool_init(struct amdgpu_device *adev); int amdgpu_ib_pool_init(struct amdgpu_device *adev);
void amdgpu_ib_pool_fini(struct amdgpu_device *adev); void amdgpu_ib_pool_fini(struct amdgpu_device *adev);
......
...@@ -91,25 +91,21 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring) ...@@ -91,25 +91,21 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
* amdgpu_fence_emit - emit a fence on the requested ring * amdgpu_fence_emit - emit a fence on the requested ring
* *
* @ring: ring the fence is associated with * @ring: ring the fence is associated with
* @owner: creator of the fence
* @fence: amdgpu fence object * @fence: amdgpu fence object
* *
* Emits a fence command on the requested ring (all asics). * Emits a fence command on the requested ring (all asics).
* Returns 0 on success, -ENOMEM on failure. * Returns 0 on success, -ENOMEM on failure.
*/ */
int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner, int amdgpu_fence_emit(struct amdgpu_ring *ring, struct amdgpu_fence **fence)
struct amdgpu_fence **fence)
{ {
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
/* we are protected by the ring emission mutex */
*fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL); *fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
if ((*fence) == NULL) { if ((*fence) == NULL) {
return -ENOMEM; return -ENOMEM;
} }
(*fence)->seq = ++ring->fence_drv.sync_seq; (*fence)->seq = ++ring->fence_drv.sync_seq;
(*fence)->ring = ring; (*fence)->ring = ring;
(*fence)->owner = owner;
fence_init(&(*fence)->base, &amdgpu_fence_ops, fence_init(&(*fence)->base, &amdgpu_fence_ops,
&ring->fence_drv.fence_queue.lock, &ring->fence_drv.fence_queue.lock,
adev->fence_context + ring->idx, adev->fence_context + ring->idx,
......
...@@ -101,7 +101,6 @@ void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib) ...@@ -101,7 +101,6 @@ void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib)
* @adev: amdgpu_device pointer * @adev: amdgpu_device pointer
* @num_ibs: number of IBs to schedule * @num_ibs: number of IBs to schedule
* @ibs: IB objects to schedule * @ibs: IB objects to schedule
* @owner: owner for creating the fences
* @f: fence created during this submission * @f: fence created during this submission
* *
* Schedule an IB on the associated ring (all asics). * Schedule an IB on the associated ring (all asics).
...@@ -118,8 +117,7 @@ void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib) ...@@ -118,8 +117,7 @@ void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib)
* to SI there was just a DE IB. * to SI there was just a DE IB.
*/ */
int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
struct amdgpu_ib *ibs, void *owner, struct amdgpu_ib *ibs, struct fence *last_vm_update,
struct fence *last_vm_update,
struct fence **f) struct fence **f)
{ {
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
...@@ -183,7 +181,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, ...@@ -183,7 +181,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
amdgpu_ring_emit_hdp_invalidate(ring); amdgpu_ring_emit_hdp_invalidate(ring);
} }
r = amdgpu_fence_emit(ring, owner, &ib->fence); r = amdgpu_fence_emit(ring, &ib->fence);
if (r) { if (r) {
dev_err(adev->dev, "failed to emit fence (%d)\n", r); dev_err(adev->dev, "failed to emit fence (%d)\n", r);
ring->current_ctx = old_ctx; ring->current_ctx = old_ctx;
......
...@@ -148,7 +148,7 @@ static struct fence *amdgpu_job_run(struct amd_sched_job *sched_job) ...@@ -148,7 +148,7 @@ static struct fence *amdgpu_job_run(struct amd_sched_job *sched_job)
} }
trace_amdgpu_sched_run_job(job); trace_amdgpu_sched_run_job(job);
r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs, job->owner, r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs,
job->sync.last_vm_update, &fence); job->sync.last_vm_update, &fence);
if (r) { if (r) {
DRM_ERROR("Error scheduling IBs (%d)\n", r); DRM_ERROR("Error scheduling IBs (%d)\n", r);
......
...@@ -60,12 +60,8 @@ void amdgpu_sync_create(struct amdgpu_sync *sync) ...@@ -60,12 +60,8 @@ void amdgpu_sync_create(struct amdgpu_sync *sync)
*/ */
static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, struct fence *f) static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, struct fence *f)
{ {
struct amdgpu_fence *a_fence = to_amdgpu_fence(f);
struct amd_sched_fence *s_fence = to_amd_sched_fence(f); struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
if (a_fence)
return a_fence->ring->adev == adev;
if (s_fence) { if (s_fence) {
struct amdgpu_ring *ring; struct amdgpu_ring *ring;
...@@ -85,13 +81,11 @@ static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, struct fence *f) ...@@ -85,13 +81,11 @@ static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, struct fence *f)
*/ */
static void *amdgpu_sync_get_owner(struct fence *f) static void *amdgpu_sync_get_owner(struct fence *f)
{ {
struct amdgpu_fence *a_fence = to_amdgpu_fence(f);
struct amd_sched_fence *s_fence = to_amd_sched_fence(f); struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
if (s_fence) if (s_fence)
return s_fence->owner; return s_fence->owner;
else if (a_fence)
return a_fence->owner;
return AMDGPU_FENCE_OWNER_UNDEFINED; return AMDGPU_FENCE_OWNER_UNDEFINED;
} }
......
...@@ -886,8 +886,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, ...@@ -886,8 +886,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
ib->length_dw = 16; ib->length_dw = 16;
if (direct) { if (direct) {
r = amdgpu_ib_schedule(ring, 1, ib, r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
AMDGPU_FENCE_OWNER_UNDEFINED, NULL, &f);
if (r) if (r)
goto err_free; goto err_free;
......
...@@ -425,8 +425,7 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, ...@@ -425,8 +425,7 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
for (i = ib->length_dw; i < ib_size_dw; ++i) for (i = ib->length_dw; i < ib_size_dw; ++i)
ib->ptr[i] = 0x0; ib->ptr[i] = 0x0;
r = amdgpu_ib_schedule(ring, 1, ib, AMDGPU_FENCE_OWNER_UNDEFINED, r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
NULL, &f);
if (r) if (r)
goto err; goto err;
...@@ -487,9 +486,7 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, ...@@ -487,9 +486,7 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
ib->ptr[i] = 0x0; ib->ptr[i] = 0x0;
if (direct) { if (direct) {
r = amdgpu_ib_schedule(ring, 1, ib, r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
AMDGPU_FENCE_OWNER_UNDEFINED,
NULL, &f);
if (r) if (r)
goto err; goto err;
......
...@@ -643,8 +643,7 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring) ...@@ -643,8 +643,7 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring)
ib.ptr[3] = 1; ib.ptr[3] = 1;
ib.ptr[4] = 0xDEADBEEF; ib.ptr[4] = 0xDEADBEEF;
ib.length_dw = 5; ib.length_dw = 5;
r = amdgpu_ib_schedule(ring, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED, r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
NULL, &f);
if (r) if (r)
goto err1; goto err1;
......
...@@ -2136,8 +2136,7 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring) ...@@ -2136,8 +2136,7 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring)
ib.ptr[2] = 0xDEADBEEF; ib.ptr[2] = 0xDEADBEEF;
ib.length_dw = 3; ib.length_dw = 3;
r = amdgpu_ib_schedule(ring, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED, r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
NULL, &f);
if (r) if (r)
goto err2; goto err2;
......
...@@ -706,8 +706,7 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring) ...@@ -706,8 +706,7 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring)
ib.ptr[2] = 0xDEADBEEF; ib.ptr[2] = 0xDEADBEEF;
ib.length_dw = 3; ib.length_dw = 3;
r = amdgpu_ib_schedule(ring, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED, r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
NULL, &f);
if (r) if (r)
goto err2; goto err2;
...@@ -1262,8 +1261,7 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) ...@@ -1262,8 +1261,7 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4); ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
/* shedule the ib on the ring */ /* shedule the ib on the ring */
r = amdgpu_ib_schedule(ring, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED, r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
NULL, &f);
if (r) { if (r) {
DRM_ERROR("amdgpu: ib submit failed (%d).\n", r); DRM_ERROR("amdgpu: ib submit failed (%d).\n", r);
goto fail; goto fail;
......
...@@ -701,8 +701,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring) ...@@ -701,8 +701,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring)
ib.ptr[7] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP); ib.ptr[7] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
ib.length_dw = 8; ib.length_dw = 8;
r = amdgpu_ib_schedule(ring, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED, r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
NULL, &f);
if (r) if (r)
goto err1; goto err1;
......
...@@ -853,8 +853,7 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring) ...@@ -853,8 +853,7 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring)
ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP); ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
ib.length_dw = 8; ib.length_dw = 8;
r = amdgpu_ib_schedule(ring, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED, r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
NULL, &f);
if (r) if (r)
goto err1; goto err1;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册