提交 7e52a81c 编写于 作者: C Christian König 提交者: Alex Deucher

drm/amdgpu: cleanup amdgpu_cs_parser handling

No need any more to allocate that structure dynamically, just put it on the
stack. This is a start to cleanup some of the scheduler fallouts.
Signed-off-by: NChristian König <christian.koenig@amd.com>
Reviewed-by: NJunwei Zhang <Jerry.Zhang@amd.com>
Reviewed-by: NAlex Deucher <alexander.deucher@amd.com>
上级 e4a58a28
...@@ -2256,11 +2256,6 @@ void amdgpu_pci_config_reset(struct amdgpu_device *adev); ...@@ -2256,11 +2256,6 @@ void amdgpu_pci_config_reset(struct amdgpu_device *adev);
bool amdgpu_card_posted(struct amdgpu_device *adev); bool amdgpu_card_posted(struct amdgpu_device *adev);
void amdgpu_update_display_priority(struct amdgpu_device *adev); void amdgpu_update_display_priority(struct amdgpu_device *adev);
bool amdgpu_boot_test_post_card(struct amdgpu_device *adev); bool amdgpu_boot_test_post_card(struct amdgpu_device *adev);
struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev,
struct drm_file *filp,
struct amdgpu_ctx *ctx,
struct amdgpu_ib *ibs,
uint32_t num_ibs);
int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data); int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data);
int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
......
...@@ -127,30 +127,6 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, ...@@ -127,30 +127,6 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
return 0; return 0;
} }
struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev,
struct drm_file *filp,
struct amdgpu_ctx *ctx,
struct amdgpu_ib *ibs,
uint32_t num_ibs)
{
struct amdgpu_cs_parser *parser;
int i;
parser = kzalloc(sizeof(struct amdgpu_cs_parser), GFP_KERNEL);
if (!parser)
return NULL;
parser->adev = adev;
parser->filp = filp;
parser->ctx = ctx;
parser->ibs = ibs;
parser->num_ibs = num_ibs;
for (i = 0; i < num_ibs; i++)
ibs[i].ctx = ctx;
return parser;
}
int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
{ {
union drm_amdgpu_cs *cs = data; union drm_amdgpu_cs *cs = data;
...@@ -490,6 +466,7 @@ static void amdgpu_cs_parser_fini_early(struct amdgpu_cs_parser *parser, int err ...@@ -490,6 +466,7 @@ static void amdgpu_cs_parser_fini_early(struct amdgpu_cs_parser *parser, int err
static void amdgpu_cs_parser_fini_late(struct amdgpu_cs_parser *parser) static void amdgpu_cs_parser_fini_late(struct amdgpu_cs_parser *parser)
{ {
unsigned i; unsigned i;
if (parser->ctx) if (parser->ctx)
amdgpu_ctx_put(parser->ctx); amdgpu_ctx_put(parser->ctx);
if (parser->bo_list) if (parser->bo_list)
...@@ -505,7 +482,6 @@ static void amdgpu_cs_parser_fini_late(struct amdgpu_cs_parser *parser) ...@@ -505,7 +482,6 @@ static void amdgpu_cs_parser_fini_late(struct amdgpu_cs_parser *parser)
kfree(parser->ibs); kfree(parser->ibs);
if (parser->uf.bo) if (parser->uf.bo)
drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base); drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base);
kfree(parser);
} }
/** /**
...@@ -824,36 +800,36 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) ...@@ -824,36 +800,36 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
union drm_amdgpu_cs *cs = data; union drm_amdgpu_cs *cs = data;
struct amdgpu_fpriv *fpriv = filp->driver_priv; struct amdgpu_fpriv *fpriv = filp->driver_priv;
struct amdgpu_vm *vm = &fpriv->vm; struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_cs_parser *parser; struct amdgpu_cs_parser parser = {};
bool reserved_buffers = false; bool reserved_buffers = false;
int i, r; int i, r;
if (!adev->accel_working) if (!adev->accel_working)
return -EBUSY; return -EBUSY;
parser = amdgpu_cs_parser_create(adev, filp, NULL, NULL, 0); parser.adev = adev;
if (!parser) parser.filp = filp;
return -ENOMEM;
r = amdgpu_cs_parser_init(parser, data); r = amdgpu_cs_parser_init(&parser, data);
if (r) { if (r) {
DRM_ERROR("Failed to initialize parser !\n"); DRM_ERROR("Failed to initialize parser !\n");
amdgpu_cs_parser_fini(parser, r, false); amdgpu_cs_parser_fini(&parser, r, false);
r = amdgpu_cs_handle_lockup(adev, r); r = amdgpu_cs_handle_lockup(adev, r);
return r; return r;
} }
mutex_lock(&vm->mutex); mutex_lock(&vm->mutex);
r = amdgpu_cs_parser_relocs(parser); r = amdgpu_cs_parser_relocs(&parser);
if (r == -ENOMEM) if (r == -ENOMEM)
DRM_ERROR("Not enough memory for command submission!\n"); DRM_ERROR("Not enough memory for command submission!\n");
else if (r && r != -ERESTARTSYS) else if (r && r != -ERESTARTSYS)
DRM_ERROR("Failed to process the buffer list %d!\n", r); DRM_ERROR("Failed to process the buffer list %d!\n", r);
else if (!r) { else if (!r) {
reserved_buffers = true; reserved_buffers = true;
r = amdgpu_cs_ib_fill(adev, parser); r = amdgpu_cs_ib_fill(adev, &parser);
} }
if (!r) { if (!r) {
r = amdgpu_cs_dependencies(adev, parser); r = amdgpu_cs_dependencies(adev, &parser);
if (r) if (r)
DRM_ERROR("Failed in the dependencies handling %d!\n", r); DRM_ERROR("Failed in the dependencies handling %d!\n", r);
} }
...@@ -861,36 +837,38 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) ...@@ -861,36 +837,38 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
if (r) if (r)
goto out; goto out;
for (i = 0; i < parser->num_ibs; i++) for (i = 0; i < parser.num_ibs; i++)
trace_amdgpu_cs(parser, i); trace_amdgpu_cs(&parser, i);
r = amdgpu_cs_ib_vm_chunk(adev, parser); r = amdgpu_cs_ib_vm_chunk(adev, &parser);
if (r) if (r)
goto out; goto out;
if (amdgpu_enable_scheduler && parser->num_ibs) { if (amdgpu_enable_scheduler && parser.num_ibs) {
struct amdgpu_job *job; struct amdgpu_job *job;
struct amdgpu_ring * ring = parser->ibs->ring; struct amdgpu_ring * ring = parser.ibs->ring;
job = kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL); job = kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL);
if (!job) { if (!job) {
r = -ENOMEM; r = -ENOMEM;
goto out; goto out;
} }
job->base.sched = &ring->sched; job->base.sched = &ring->sched;
job->base.s_entity = &parser->ctx->rings[ring->idx].entity; job->base.s_entity = &parser.ctx->rings[ring->idx].entity;
job->adev = parser->adev; job->adev = parser.adev;
job->ibs = parser->ibs; job->ibs = parser.ibs;
job->num_ibs = parser->num_ibs; job->num_ibs = parser.num_ibs;
job->base.owner = parser->filp; job->base.owner = parser.filp;
mutex_init(&job->job_lock); mutex_init(&job->job_lock);
if (job->ibs[job->num_ibs - 1].user) { if (job->ibs[job->num_ibs - 1].user) {
job->uf = parser->uf; job->uf = parser.uf;
job->ibs[job->num_ibs - 1].user = &job->uf; job->ibs[job->num_ibs - 1].user = &job->uf;
parser->uf.bo = NULL; parser.uf.bo = NULL;
} }
parser->ibs = NULL; parser.ibs = NULL;
parser->num_ibs = 0; parser.num_ibs = 0;
job->free_job = amdgpu_cs_free_job; job->free_job = amdgpu_cs_free_job;
mutex_lock(&job->job_lock); mutex_lock(&job->job_lock);
...@@ -902,24 +880,24 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) ...@@ -902,24 +880,24 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
goto out; goto out;
} }
cs->out.handle = cs->out.handle =
amdgpu_ctx_add_fence(parser->ctx, ring, amdgpu_ctx_add_fence(parser.ctx, ring,
&job->base.s_fence->base); &job->base.s_fence->base);
job->ibs[job->num_ibs - 1].sequence = cs->out.handle; job->ibs[job->num_ibs - 1].sequence = cs->out.handle;
list_sort(NULL, &parser->validated, cmp_size_smaller_first); list_sort(NULL, &parser.validated, cmp_size_smaller_first);
ttm_eu_fence_buffer_objects(&parser->ticket, ttm_eu_fence_buffer_objects(&parser.ticket,
&parser->validated, &parser.validated,
&job->base.s_fence->base); &job->base.s_fence->base);
mutex_unlock(&job->job_lock); mutex_unlock(&job->job_lock);
amdgpu_cs_parser_fini_late(parser); amdgpu_cs_parser_fini_late(&parser);
mutex_unlock(&vm->mutex); mutex_unlock(&vm->mutex);
return 0; return 0;
} }
cs->out.handle = parser->ibs[parser->num_ibs - 1].sequence; cs->out.handle = parser.ibs[parser.num_ibs - 1].sequence;
out: out:
amdgpu_cs_parser_fini(parser, r, reserved_buffers); amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
mutex_unlock(&vm->mutex); mutex_unlock(&vm->mutex);
r = amdgpu_cs_handle_lockup(adev, r); r = amdgpu_cs_handle_lockup(adev, r);
return r; return r;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册