提交 ea199cc9 编写于 作者: J Jammy Zhou 提交者: Alex Deucher

drm/amdgpu: return new seq_no for amd_sched_push_job

It is clean to update last_queued_v_seq in the scheduler module
Signed-off-by: NJammy Zhou <Jammy.Zhou@amd.com>
Reviewed-by: NChristian König <christian.koenig@amd.com>
Reviewed-by: NAlex Deucher <alexander.deucher@amd.com>
上级 dd01d071
...@@ -907,8 +907,6 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) ...@@ -907,8 +907,6 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
if (amdgpu_enable_scheduler && parser->num_ibs) { if (amdgpu_enable_scheduler && parser->num_ibs) {
struct amdgpu_ring * ring = struct amdgpu_ring * ring =
amdgpu_cs_parser_get_ring(adev, parser); amdgpu_cs_parser_get_ring(adev, parser);
parser->ibs[parser->num_ibs - 1].sequence = atomic64_inc_return(
&parser->ctx->rings[ring->idx].c_entity.last_queued_v_seq);
if (ring->is_pte_ring || (parser->bo_list && parser->bo_list->has_userptr)) { if (ring->is_pte_ring || (parser->bo_list && parser->bo_list->has_userptr)) {
r = amdgpu_cs_parser_prepare_job(parser); r = amdgpu_cs_parser_prepare_job(parser);
if (r) if (r)
...@@ -918,7 +916,8 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) ...@@ -918,7 +916,8 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
parser->ring = ring; parser->ring = ring;
parser->run_job = amdgpu_cs_parser_run_job; parser->run_job = amdgpu_cs_parser_run_job;
parser->free_job = amdgpu_cs_parser_free_job; parser->free_job = amdgpu_cs_parser_free_job;
amd_sched_push_job(ring->scheduler, parser->ibs[parser->num_ibs - 1].sequence =
amd_sched_push_job(ring->scheduler,
&parser->ctx->rings[ring->idx].c_entity, &parser->ctx->rings[ring->idx].c_entity,
parser); parser);
cs->out.handle = parser->ibs[parser->num_ibs - 1].sequence; cs->out.handle = parser->ibs[parser->num_ibs - 1].sequence;
......
...@@ -109,7 +109,6 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev, ...@@ -109,7 +109,6 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
{ {
int r = 0; int r = 0;
if (amdgpu_enable_scheduler) { if (amdgpu_enable_scheduler) {
uint64_t v_seq;
struct amdgpu_cs_parser *sched_job = struct amdgpu_cs_parser *sched_job =
amdgpu_cs_parser_create(adev, amdgpu_cs_parser_create(adev,
owner, owner,
...@@ -119,16 +118,12 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev, ...@@ -119,16 +118,12 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
return -ENOMEM; return -ENOMEM;
} }
sched_job->free_job = free_job; sched_job->free_job = free_job;
v_seq = atomic64_inc_return(&adev->kernel_ctx->rings[ring->idx].c_entity.last_queued_v_seq); ibs[num_ibs - 1].sequence = amd_sched_push_job(ring->scheduler,
ibs[num_ibs - 1].sequence = v_seq;
amd_sched_push_job(ring->scheduler,
&adev->kernel_ctx->rings[ring->idx].c_entity, &adev->kernel_ctx->rings[ring->idx].c_entity,
sched_job); sched_job);
r = amd_sched_wait_emit( r = amd_sched_wait_emit(
&adev->kernel_ctx->rings[ring->idx].c_entity, &adev->kernel_ctx->rings[ring->idx].c_entity,
v_seq, ibs[num_ibs - 1].sequence, false, -1);
false,
-1);
if (r) if (r)
WARN(true, "emit timeout\n"); WARN(true, "emit timeout\n");
} else } else
......
...@@ -371,7 +371,6 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, ...@@ -371,7 +371,6 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
if (amdgpu_enable_scheduler) { if (amdgpu_enable_scheduler) {
int r; int r;
uint64_t v_seq;
sched_job = amdgpu_cs_parser_create(adev, AMDGPU_FENCE_OWNER_VM, sched_job = amdgpu_cs_parser_create(adev, AMDGPU_FENCE_OWNER_VM,
adev->kernel_ctx, ib, 1); adev->kernel_ctx, ib, 1);
if(!sched_job) if(!sched_job)
...@@ -379,15 +378,11 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, ...@@ -379,15 +378,11 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
sched_job->job_param.vm.bo = bo; sched_job->job_param.vm.bo = bo;
sched_job->run_job = amdgpu_vm_run_job; sched_job->run_job = amdgpu_vm_run_job;
sched_job->free_job = amdgpu_vm_free_job; sched_job->free_job = amdgpu_vm_free_job;
v_seq = atomic64_inc_return(&adev->kernel_ctx->rings[ring->idx].c_entity.last_queued_v_seq); ib->sequence = amd_sched_push_job(ring->scheduler,
ib->sequence = v_seq;
amd_sched_push_job(ring->scheduler,
&adev->kernel_ctx->rings[ring->idx].c_entity, &adev->kernel_ctx->rings[ring->idx].c_entity,
sched_job); sched_job);
r = amd_sched_wait_emit(&adev->kernel_ctx->rings[ring->idx].c_entity, r = amd_sched_wait_emit(&adev->kernel_ctx->rings[ring->idx].c_entity,
v_seq, ib->sequence, false, -1);
false,
-1);
if (r) if (r)
DRM_ERROR("emit timeout\n"); DRM_ERROR("emit timeout\n");
...@@ -521,7 +516,6 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, ...@@ -521,7 +516,6 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
if (amdgpu_enable_scheduler) { if (amdgpu_enable_scheduler) {
int r; int r;
uint64_t v_seq;
sched_job = amdgpu_cs_parser_create(adev, AMDGPU_FENCE_OWNER_VM, sched_job = amdgpu_cs_parser_create(adev, AMDGPU_FENCE_OWNER_VM,
adev->kernel_ctx, adev->kernel_ctx,
ib, 1); ib, 1);
...@@ -530,15 +524,11 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, ...@@ -530,15 +524,11 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
sched_job->job_param.vm.bo = pd; sched_job->job_param.vm.bo = pd;
sched_job->run_job = amdgpu_vm_run_job; sched_job->run_job = amdgpu_vm_run_job;
sched_job->free_job = amdgpu_vm_free_job; sched_job->free_job = amdgpu_vm_free_job;
v_seq = atomic64_inc_return(&adev->kernel_ctx->rings[ring->idx].c_entity.last_queued_v_seq); ib->sequence = amd_sched_push_job(ring->scheduler,
ib->sequence = v_seq;
amd_sched_push_job(ring->scheduler,
&adev->kernel_ctx->rings[ring->idx].c_entity, &adev->kernel_ctx->rings[ring->idx].c_entity,
sched_job); sched_job);
r = amd_sched_wait_emit(&adev->kernel_ctx->rings[ring->idx].c_entity, r = amd_sched_wait_emit(&adev->kernel_ctx->rings[ring->idx].c_entity,
v_seq, ib->sequence, false, -1);
false,
-1);
if (r) if (r)
DRM_ERROR("emit timeout\n"); DRM_ERROR("emit timeout\n");
} else { } else {
...@@ -872,7 +862,6 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, ...@@ -872,7 +862,6 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
if (amdgpu_enable_scheduler) { if (amdgpu_enable_scheduler) {
int r; int r;
uint64_t v_seq;
sched_job = amdgpu_cs_parser_create(adev, AMDGPU_FENCE_OWNER_VM, sched_job = amdgpu_cs_parser_create(adev, AMDGPU_FENCE_OWNER_VM,
adev->kernel_ctx, ib, 1); adev->kernel_ctx, ib, 1);
if(!sched_job) if(!sched_job)
...@@ -883,15 +872,11 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, ...@@ -883,15 +872,11 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
sched_job->job_param.vm_mapping.fence = fence; sched_job->job_param.vm_mapping.fence = fence;
sched_job->run_job = amdgpu_vm_bo_update_mapping_run_job; sched_job->run_job = amdgpu_vm_bo_update_mapping_run_job;
sched_job->free_job = amdgpu_vm_free_job; sched_job->free_job = amdgpu_vm_free_job;
v_seq = atomic64_inc_return(&adev->kernel_ctx->rings[ring->idx].c_entity.last_queued_v_seq); ib->sequence = amd_sched_push_job(ring->scheduler,
ib->sequence = v_seq;
amd_sched_push_job(ring->scheduler,
&adev->kernel_ctx->rings[ring->idx].c_entity, &adev->kernel_ctx->rings[ring->idx].c_entity,
sched_job); sched_job);
r = amd_sched_wait_emit(&adev->kernel_ctx->rings[ring->idx].c_entity, r = amd_sched_wait_emit(&adev->kernel_ctx->rings[ring->idx].c_entity,
v_seq, ib->sequence, false, -1);
false,
-1);
if (r) if (r)
DRM_ERROR("emit timeout\n"); DRM_ERROR("emit timeout\n");
} else { } else {
......
...@@ -293,12 +293,9 @@ int amd_context_entity_fini(struct amd_gpu_scheduler *sched, ...@@ -293,12 +293,9 @@ int amd_context_entity_fini(struct amd_gpu_scheduler *sched,
* @sched The pointer to the scheduler * @sched The pointer to the scheduler
* @c_entity The pointer to amd_context_entity * @c_entity The pointer to amd_context_entity
* @job The pointer to job required to submit * @job The pointer to job required to submit
* return 0 if succeed. -1 if failed. * return the virtual sequence number
* -2 indicate queue is full for this client, client should wait untill
* scheduler consum some queued command.
* -1 other fail.
*/ */
int amd_sched_push_job(struct amd_gpu_scheduler *sched, uint64_t amd_sched_push_job(struct amd_gpu_scheduler *sched,
struct amd_context_entity *c_entity, struct amd_context_entity *c_entity,
void *job) void *job)
{ {
...@@ -312,7 +309,8 @@ int amd_sched_push_job(struct amd_gpu_scheduler *sched, ...@@ -312,7 +309,8 @@ int amd_sched_push_job(struct amd_gpu_scheduler *sched,
} }
wake_up_interruptible(&sched->wait_queue); wake_up_interruptible(&sched->wait_queue);
return 0;
return atomic64_inc_return(&c_entity->last_queued_v_seq);
} }
/** /**
......
...@@ -129,7 +129,7 @@ struct amd_gpu_scheduler *amd_sched_create(void *device, ...@@ -129,7 +129,7 @@ struct amd_gpu_scheduler *amd_sched_create(void *device,
int amd_sched_destroy(struct amd_gpu_scheduler *sched); int amd_sched_destroy(struct amd_gpu_scheduler *sched);
int amd_sched_push_job(struct amd_gpu_scheduler *sched, uint64_t amd_sched_push_job(struct amd_gpu_scheduler *sched,
struct amd_context_entity *c_entity, struct amd_context_entity *c_entity,
void *job); void *job);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册