提交 0e89d0c1 编写于 作者: C Christian König 提交者: Alex Deucher

drm/amdgpu: stop leaking the ctx id into the scheduler v2

Id's are for the IOCTL ABI only.

v2: remove tgid as well
Signed-off-by: NChristian König <christian.koenig@amd.com>
Reviewed-by: NChunming Zhou <david1.zhou@amd.com>
上级 efd4ccb5
...@@ -50,8 +50,7 @@ static void amdgpu_ctx_do_release(struct kref *ref) ...@@ -50,8 +50,7 @@ static void amdgpu_ctx_do_release(struct kref *ref)
static void amdgpu_ctx_init(struct amdgpu_device *adev, static void amdgpu_ctx_init(struct amdgpu_device *adev,
struct amdgpu_fpriv *fpriv, struct amdgpu_fpriv *fpriv,
struct amdgpu_ctx *ctx, struct amdgpu_ctx *ctx)
uint32_t id)
{ {
int i; int i;
memset(ctx, 0, sizeof(*ctx)); memset(ctx, 0, sizeof(*ctx));
...@@ -81,7 +80,7 @@ int amdgpu_ctx_alloc(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv, ...@@ -81,7 +80,7 @@ int amdgpu_ctx_alloc(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv,
return r; return r;
} }
*id = (uint32_t)r; *id = (uint32_t)r;
amdgpu_ctx_init(adev, fpriv, ctx, *id); amdgpu_ctx_init(adev, fpriv, ctx);
mutex_unlock(&mgr->lock); mutex_unlock(&mgr->lock);
} else { } else {
if (adev->kernel_ctx) { if (adev->kernel_ctx) {
...@@ -89,8 +88,7 @@ int amdgpu_ctx_alloc(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv, ...@@ -89,8 +88,7 @@ int amdgpu_ctx_alloc(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv,
kfree(ctx); kfree(ctx);
return 0; return 0;
} }
*id = AMD_KERNEL_CONTEXT_ID; amdgpu_ctx_init(adev, fpriv, ctx);
amdgpu_ctx_init(adev, fpriv, ctx, *id);
adev->kernel_ctx = ctx; adev->kernel_ctx = ctx;
} }
...@@ -105,8 +103,7 @@ int amdgpu_ctx_alloc(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv, ...@@ -105,8 +103,7 @@ int amdgpu_ctx_alloc(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv,
rq = &adev->rings[i]->scheduler->kernel_rq; rq = &adev->rings[i]->scheduler->kernel_rq;
r = amd_context_entity_init(adev->rings[i]->scheduler, r = amd_context_entity_init(adev->rings[i]->scheduler,
&ctx->rings[i].c_entity, &ctx->rings[i].c_entity,
NULL, rq, *id, NULL, rq, amdgpu_sched_jobs);
amdgpu_sched_jobs);
if (r) if (r)
break; break;
} }
......
...@@ -172,7 +172,7 @@ static struct amd_context_entity *select_context( ...@@ -172,7 +172,7 @@ static struct amd_context_entity *select_context(
* @entity The pointer to a valid amd_context_entity * @entity The pointer to a valid amd_context_entity
* @parent The parent entity of this amd_context_entity * @parent The parent entity of this amd_context_entity
* @rq The run queue this entity belongs * @rq The run queue this entity belongs
* @context_id The context id for this entity * @kernel If this is an entity for the kernel
* @jobs The max number of jobs in the job queue * @jobs The max number of jobs in the job queue
* *
* return 0 if succeed. negative error code on failure * return 0 if succeed. negative error code on failure
...@@ -181,7 +181,6 @@ int amd_context_entity_init(struct amd_gpu_scheduler *sched, ...@@ -181,7 +181,6 @@ int amd_context_entity_init(struct amd_gpu_scheduler *sched,
struct amd_context_entity *entity, struct amd_context_entity *entity,
struct amd_sched_entity *parent, struct amd_sched_entity *parent,
struct amd_run_queue *rq, struct amd_run_queue *rq,
uint32_t context_id,
uint32_t jobs) uint32_t jobs)
{ {
uint64_t seq_ring = 0; uint64_t seq_ring = 0;
...@@ -203,9 +202,6 @@ int amd_context_entity_init(struct amd_gpu_scheduler *sched, ...@@ -203,9 +202,6 @@ int amd_context_entity_init(struct amd_gpu_scheduler *sched,
return -EINVAL; return -EINVAL;
spin_lock_init(&entity->queue_lock); spin_lock_init(&entity->queue_lock);
entity->tgid = (context_id == AMD_KERNEL_CONTEXT_ID) ?
AMD_KERNEL_PROCESS_ID : current->tgid;
entity->context_id = context_id;
atomic64_set(&entity->last_emitted_v_seq, seq_ring); atomic64_set(&entity->last_emitted_v_seq, seq_ring);
atomic64_set(&entity->last_queued_v_seq, seq_ring); atomic64_set(&entity->last_queued_v_seq, seq_ring);
...@@ -275,9 +271,9 @@ int amd_context_entity_fini(struct amd_gpu_scheduler *sched, ...@@ -275,9 +271,9 @@ int amd_context_entity_fini(struct amd_gpu_scheduler *sched,
if (r) { if (r) {
if (entity->is_pending) if (entity->is_pending)
DRM_INFO("Entity %u is in waiting state during fini,\ DRM_INFO("Entity %p is in waiting state during fini,\
all pending ibs will be canceled.\n", all pending ibs will be canceled.\n",
entity->context_id); entity);
} }
mutex_lock(&rq->lock); mutex_lock(&rq->lock);
......
...@@ -26,9 +26,6 @@ ...@@ -26,9 +26,6 @@
#include <linux/kfifo.h> #include <linux/kfifo.h>
#define AMD_KERNEL_CONTEXT_ID 0
#define AMD_KERNEL_PROCESS_ID 0
#define AMD_GPU_WAIT_IDLE_TIMEOUT_IN_MS 3000 #define AMD_GPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
struct amd_gpu_scheduler; struct amd_gpu_scheduler;
...@@ -74,8 +71,6 @@ struct amd_context_entity { ...@@ -74,8 +71,6 @@ struct amd_context_entity {
/* the virtual_seq is unique per context per ring */ /* the virtual_seq is unique per context per ring */
atomic64_t last_queued_v_seq; atomic64_t last_queued_v_seq;
atomic64_t last_emitted_v_seq; atomic64_t last_emitted_v_seq;
pid_t tgid;
uint32_t context_id;
/* the job_queue maintains the jobs submitted by clients */ /* the job_queue maintains the jobs submitted by clients */
struct kfifo job_queue; struct kfifo job_queue;
spinlock_t queue_lock; spinlock_t queue_lock;
...@@ -148,7 +143,6 @@ int amd_context_entity_init(struct amd_gpu_scheduler *sched, ...@@ -148,7 +143,6 @@ int amd_context_entity_init(struct amd_gpu_scheduler *sched,
struct amd_context_entity *entity, struct amd_context_entity *entity,
struct amd_sched_entity *parent, struct amd_sched_entity *parent,
struct amd_run_queue *rq, struct amd_run_queue *rq,
uint32_t context_id,
uint32_t jobs); uint32_t jobs);
void amd_sched_emit(struct amd_context_entity *c_entity, uint64_t seq); void amd_sched_emit(struct amd_context_entity *c_entity, uint64_t seq);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册