提交 43bce41c 编写于 作者: C Christian König 提交者: Alex Deucher

drm/scheduler: only kill entity if last user is killed v2

Note which task is using the entity and only kill it if the last user of
the entity is killed. This should prevent problems when entities are leaked to
child processes.

v2: add missing kernel doc
Signed-off-by: NChristian König <christian.koenig@amd.com>
Reviewed-by: NAndrey Grodzovsky <andrey.grodzovsky@amd.com>
Acked-by: NNayan Deshmukh <nayan26deshmukh@gmail.com>
Signed-off-by: NAlex Deucher <alexander.deucher@amd.com>
上级 4a102ad4
...@@ -275,6 +275,7 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f, ...@@ -275,6 +275,7 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout) long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
{ {
struct drm_gpu_scheduler *sched; struct drm_gpu_scheduler *sched;
struct task_struct *last_user;
long ret = timeout; long ret = timeout;
sched = entity->rq->sched; sched = entity->rq->sched;
...@@ -295,7 +296,9 @@ long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout) ...@@ -295,7 +296,9 @@ long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
/* For killed process disable any more IBs enqueue right now */ /* For killed process disable any more IBs enqueue right now */
if ((current->flags & PF_EXITING) && (current->exit_code == SIGKILL)) last_user = cmpxchg(&entity->last_user, current->group_leader, NULL);
if ((!last_user || last_user == current->group_leader) &&
(current->flags & PF_EXITING) && (current->exit_code == SIGKILL))
drm_sched_entity_set_rq(entity, NULL); drm_sched_entity_set_rq(entity, NULL);
return ret; return ret;
...@@ -541,6 +544,7 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job, ...@@ -541,6 +544,7 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
trace_drm_sched_job(sched_job, entity); trace_drm_sched_job(sched_job, entity);
WRITE_ONCE(entity->last_user, current->group_leader);
first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node); first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
/* first job wakes up scheduler */ /* first job wakes up scheduler */
......
...@@ -66,6 +66,7 @@ enum drm_sched_priority { ...@@ -66,6 +66,7 @@ enum drm_sched_priority {
* @guilty: points to ctx's guilty. * @guilty: points to ctx's guilty.
* @fini_status: contains the exit status in case the process was signalled. * @fini_status: contains the exit status in case the process was signalled.
* @last_scheduled: points to the finished fence of the last scheduled job. * @last_scheduled: points to the finished fence of the last scheduled job.
* @last_user: last group leader pushing a job into the entity.
* *
* Entities will emit jobs in order to their corresponding hardware * Entities will emit jobs in order to their corresponding hardware
* ring, and the scheduler will alternate between entities based on * ring, and the scheduler will alternate between entities based on
...@@ -85,6 +86,7 @@ struct drm_sched_entity { ...@@ -85,6 +86,7 @@ struct drm_sched_entity {
struct dma_fence_cb cb; struct dma_fence_cb cb;
atomic_t *guilty; atomic_t *guilty;
struct dma_fence *last_scheduled; struct dma_fence *last_scheduled;
struct task_struct *last_user;
}; };
/** /**
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册