提交 8ab62eda 编写于 作者: J Jiawei Gu 提交者: Christian König

drm/sched: Add device pointer to drm_gpu_scheduler

Add device pointer so scheduler's printing can use
DRM_DEV_ERROR() instead, which makes life easier under multiple GPU
scenario.

v2: amend all calls of drm_sched_init()
v3: fill dev pointer for all drm_sched_init() calls
Signed-off-by: NJiawei Gu <Jiawei.Gu@amd.com>
Reviewed-by: NAndrey Grodzovsky <andrey.grodzovsky@amd.com>
Reviewed-by: NChristian König <christian.koenig@amd.com>
Signed-off-by: NChristian König <christian.koenig@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220221095705.5290-1-Jiawei.Gu@amd.com
上级 95ee2a8b
......@@ -2316,7 +2316,9 @@ static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
ring->num_hw_submission, amdgpu_job_hang_limit,
timeout, adev->reset_domain->wq, ring->sched_score, ring->name);
timeout, adev->reset_domain->wq,
ring->sched_score, ring->name,
adev->dev);
if (r) {
DRM_ERROR("Failed to create scheduler on ring %s.\n",
ring->name);
......
......@@ -195,7 +195,7 @@ int etnaviv_sched_init(struct etnaviv_gpu *gpu)
ret = drm_sched_init(&gpu->sched, &etnaviv_sched_ops,
etnaviv_hw_jobs_limit, etnaviv_job_hang_limit,
msecs_to_jiffies(500), NULL, NULL,
dev_name(gpu->dev));
dev_name(gpu->dev), gpu->dev);
if (ret)
return ret;
......
......@@ -491,7 +491,7 @@ int lima_sched_pipe_init(struct lima_sched_pipe *pipe, const char *name)
return drm_sched_init(&pipe->base, &lima_sched_ops, 1,
lima_job_hang_limit,
msecs_to_jiffies(timeout), NULL,
NULL, name);
NULL, name, pipe->ldev->dev);
}
void lima_sched_pipe_fini(struct lima_sched_pipe *pipe)
......
......@@ -89,7 +89,7 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
ret = drm_sched_init(&ring->sched, &msm_sched_ops,
num_hw_submissions, 0, sched_timeout,
NULL, NULL, to_msm_bo(ring->bo)->name);
NULL, NULL, to_msm_bo(ring->bo)->name, gpu->dev->dev);
if (ret) {
goto fail;
}
......
......@@ -812,7 +812,7 @@ int panfrost_job_init(struct panfrost_device *pfdev)
nentries, 0,
msecs_to_jiffies(JOB_TIMEOUT_MS),
pfdev->reset.wq,
NULL, "pan_js");
NULL, "pan_js", pfdev->dev);
if (ret) {
dev_err(pfdev->dev, "Failed to create scheduler: %d.", ret);
goto err_sched;
......
......@@ -491,7 +491,7 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
if (r == -ENOENT)
drm_sched_job_done(s_job);
else if (r)
DRM_ERROR("fence add callback failed (%d)\n",
DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n",
r);
} else
drm_sched_job_done(s_job);
......@@ -957,7 +957,7 @@ static int drm_sched_main(void *param)
if (r == -ENOENT)
drm_sched_job_done(sched_job);
else if (r)
DRM_ERROR("fence add callback failed (%d)\n",
DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n",
r);
dma_fence_put(fence);
} else {
......@@ -991,7 +991,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
const struct drm_sched_backend_ops *ops,
unsigned hw_submission, unsigned hang_limit,
long timeout, struct workqueue_struct *timeout_wq,
atomic_t *score, const char *name)
atomic_t *score, const char *name, struct device *dev)
{
int i, ret;
sched->ops = ops;
......@@ -1001,6 +1001,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
sched->timeout_wq = timeout_wq ? : system_wq;
sched->hang_limit = hang_limit;
sched->score = score ? score : &sched->_score;
sched->dev = dev;
for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_COUNT; i++)
drm_sched_rq_init(sched, &sched->sched_rq[i]);
......@@ -1018,7 +1019,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
if (IS_ERR(sched->thread)) {
ret = PTR_ERR(sched->thread);
sched->thread = NULL;
DRM_ERROR("Failed to create scheduler for %s.\n", name);
DRM_DEV_ERROR(sched->dev, "Failed to create scheduler for %s.\n", name);
return ret;
}
......
......@@ -391,7 +391,7 @@ v3d_sched_init(struct v3d_dev *v3d)
&v3d_bin_sched_ops,
hw_jobs_limit, job_hang_limit,
msecs_to_jiffies(hang_limit_ms), NULL,
NULL, "v3d_bin");
NULL, "v3d_bin", v3d->drm.dev);
if (ret) {
dev_err(v3d->drm.dev, "Failed to create bin scheduler: %d.", ret);
return ret;
......@@ -401,7 +401,7 @@ v3d_sched_init(struct v3d_dev *v3d)
&v3d_render_sched_ops,
hw_jobs_limit, job_hang_limit,
msecs_to_jiffies(hang_limit_ms), NULL,
NULL, "v3d_render");
NULL, "v3d_render", v3d->drm.dev);
if (ret) {
dev_err(v3d->drm.dev, "Failed to create render scheduler: %d.",
ret);
......@@ -413,7 +413,7 @@ v3d_sched_init(struct v3d_dev *v3d)
&v3d_tfu_sched_ops,
hw_jobs_limit, job_hang_limit,
msecs_to_jiffies(hang_limit_ms), NULL,
NULL, "v3d_tfu");
NULL, "v3d_tfu", v3d->drm.dev);
if (ret) {
dev_err(v3d->drm.dev, "Failed to create TFU scheduler: %d.",
ret);
......@@ -426,7 +426,7 @@ v3d_sched_init(struct v3d_dev *v3d)
&v3d_csd_sched_ops,
hw_jobs_limit, job_hang_limit,
msecs_to_jiffies(hang_limit_ms), NULL,
NULL, "v3d_csd");
NULL, "v3d_csd", v3d->drm.dev);
if (ret) {
dev_err(v3d->drm.dev, "Failed to create CSD scheduler: %d.",
ret);
......@@ -438,7 +438,7 @@ v3d_sched_init(struct v3d_dev *v3d)
&v3d_cache_clean_sched_ops,
hw_jobs_limit, job_hang_limit,
msecs_to_jiffies(hang_limit_ms), NULL,
NULL, "v3d_cache_clean");
NULL, "v3d_cache_clean", v3d->drm.dev);
if (ret) {
dev_err(v3d->drm.dev, "Failed to create CACHE_CLEAN scheduler: %d.",
ret);
......
......@@ -457,13 +457,14 @@ struct drm_gpu_scheduler {
atomic_t _score;
bool ready;
bool free_guilty;
struct device *dev;
};
int drm_sched_init(struct drm_gpu_scheduler *sched,
const struct drm_sched_backend_ops *ops,
uint32_t hw_submission, unsigned hang_limit,
long timeout, struct workqueue_struct *timeout_wq,
atomic_t *score, const char *name);
atomic_t *score, const char *name, struct device *dev);
void drm_sched_fini(struct drm_gpu_scheduler *sched);
int drm_sched_job_init(struct drm_sched_job *job,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册