diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index fda7792d9e08d1c6fd88eff0fb922210e2cc2c90..468f884271b37093926098e7cd35900b5ce440fb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -907,8 +907,6 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) if (amdgpu_enable_scheduler && parser->num_ibs) { struct amdgpu_ring * ring = amdgpu_cs_parser_get_ring(adev, parser); - parser->ibs[parser->num_ibs - 1].sequence = atomic64_inc_return( - &parser->ctx->rings[ring->idx].c_entity.last_queued_v_seq); if (ring->is_pte_ring || (parser->bo_list && parser->bo_list->has_userptr)) { r = amdgpu_cs_parser_prepare_job(parser); if (r) @@ -918,7 +916,8 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) parser->ring = ring; parser->run_job = amdgpu_cs_parser_run_job; parser->free_job = amdgpu_cs_parser_free_job; - amd_sched_push_job(ring->scheduler, + parser->ibs[parser->num_ibs - 1].sequence = + amd_sched_push_job(ring->scheduler, &parser->ctx->rings[ring->idx].c_entity, parser); cs->out.handle = parser->ibs[parser->num_ibs - 1].sequence; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c index 788dd348a650f87c1bd065989215465c612faa17..8c01c51aac4127362942b1c7426a9f74601094c4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c @@ -109,7 +109,6 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev, { int r = 0; if (amdgpu_enable_scheduler) { - uint64_t v_seq; struct amdgpu_cs_parser *sched_job = amdgpu_cs_parser_create(adev, owner, @@ -119,16 +118,12 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev, return -ENOMEM; } sched_job->free_job = free_job; - v_seq = atomic64_inc_return(&adev->kernel_ctx->rings[ring->idx].c_entity.last_queued_v_seq); - ibs[num_ibs - 1].sequence = v_seq; - amd_sched_push_job(ring->scheduler, + ibs[num_ibs - 1].sequence = amd_sched_push_job(ring->scheduler, &adev->kernel_ctx->rings[ring->idx].c_entity, sched_job); r = amd_sched_wait_emit( &adev->kernel_ctx->rings[ring->idx].c_entity, - v_seq, - false, - -1); + ibs[num_ibs - 1].sequence, false, -1); if (r) WARN(true, "emit timeout\n"); } else diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index d90254f5ca6a0dff79e016c661eb45cf63fd396a..ab9c65a245bacb851843c3a652657895b4f2f597 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -371,7 +371,6 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, if (amdgpu_enable_scheduler) { int r; - uint64_t v_seq; sched_job = amdgpu_cs_parser_create(adev, AMDGPU_FENCE_OWNER_VM, adev->kernel_ctx, ib, 1); if(!sched_job) @@ -379,15 +378,11 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, sched_job->job_param.vm.bo = bo; sched_job->run_job = amdgpu_vm_run_job; sched_job->free_job = amdgpu_vm_free_job; - v_seq = atomic64_inc_return(&adev->kernel_ctx->rings[ring->idx].c_entity.last_queued_v_seq); - ib->sequence = v_seq; - amd_sched_push_job(ring->scheduler, + ib->sequence = amd_sched_push_job(ring->scheduler, &adev->kernel_ctx->rings[ring->idx].c_entity, sched_job); r = amd_sched_wait_emit(&adev->kernel_ctx->rings[ring->idx].c_entity, - v_seq, - false, - -1); + ib->sequence, false, -1); if (r) DRM_ERROR("emit timeout\n"); @@ -521,7 +516,6 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, if (amdgpu_enable_scheduler) { int r; - uint64_t v_seq; sched_job = amdgpu_cs_parser_create(adev, AMDGPU_FENCE_OWNER_VM, adev->kernel_ctx, ib, 1); @@ -530,15 +524,11 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, sched_job->job_param.vm.bo = pd; sched_job->run_job = amdgpu_vm_run_job; sched_job->free_job = amdgpu_vm_free_job; - v_seq = atomic64_inc_return(&adev->kernel_ctx->rings[ring->idx].c_entity.last_queued_v_seq); - ib->sequence = v_seq; - amd_sched_push_job(ring->scheduler, + ib->sequence = amd_sched_push_job(ring->scheduler, &adev->kernel_ctx->rings[ring->idx].c_entity, sched_job); r = amd_sched_wait_emit(&adev->kernel_ctx->rings[ring->idx].c_entity, - v_seq, - false, - -1); + ib->sequence, false, -1); if (r) DRM_ERROR("emit timeout\n"); } else { @@ -872,7 +862,6 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, if (amdgpu_enable_scheduler) { int r; - uint64_t v_seq; sched_job = amdgpu_cs_parser_create(adev, AMDGPU_FENCE_OWNER_VM, adev->kernel_ctx, ib, 1); if(!sched_job) @@ -883,15 +872,11 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, sched_job->job_param.vm_mapping.fence = fence; sched_job->run_job = amdgpu_vm_bo_update_mapping_run_job; sched_job->free_job = amdgpu_vm_free_job; - v_seq = atomic64_inc_return(&adev->kernel_ctx->rings[ring->idx].c_entity.last_queued_v_seq); - ib->sequence = v_seq; - amd_sched_push_job(ring->scheduler, + ib->sequence = amd_sched_push_job(ring->scheduler, &adev->kernel_ctx->rings[ring->idx].c_entity, sched_job); r = amd_sched_wait_emit(&adev->kernel_ctx->rings[ring->idx].c_entity, - v_seq, - false, - -1); + ib->sequence, false, -1); if (r) DRM_ERROR("emit timeout\n"); } else { diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index 89799eb8608360ccd2b3a32fb8bf519501984dd5..2c4c261ff928a5df15ce7290626529723e58e0cd 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c @@ -293,12 +293,9 @@ int amd_context_entity_fini(struct amd_gpu_scheduler *sched, * @sched The pointer to the scheduler * @c_entity The pointer to amd_context_entity * @job The pointer to job required to submit - * return 0 if succeed. -1 if failed. - * -2 indicate queue is full for this client, client should wait untill - * scheduler consum some queued command. - * -1 other fail. + * return the virtual sequence number */ -int amd_sched_push_job(struct amd_gpu_scheduler *sched, +uint64_t amd_sched_push_job(struct amd_gpu_scheduler *sched, struct amd_context_entity *c_entity, void *job) { @@ -312,7 +309,8 @@ int amd_sched_push_job(struct amd_gpu_scheduler *sched, } wake_up_interruptible(&sched->wait_queue); - return 0; + + return atomic64_inc_return(&c_entity->last_queued_v_seq); } /** diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h index 9ab3adc1fa3213eb5b48aa89878a9a082b897cf2..37dd6370bd9817446472358356c5624d848dcfdb 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h @@ -129,7 +129,7 @@ struct amd_gpu_scheduler *amd_sched_create(void *device, int amd_sched_destroy(struct amd_gpu_scheduler *sched); -int amd_sched_push_job(struct amd_gpu_scheduler *sched, +uint64_t amd_sched_push_job(struct amd_gpu_scheduler *sched, struct amd_context_entity *c_entity, void *job);