amdgpu_job.c 8.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
/*
 * Copyright 2015 Advanced Micro Devices, Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 *
 */
#include <linux/kthread.h>
#include <linux/wait.h>
#include <linux/sched.h>
27

28 29
#include <drm/drm_drv.h>

30
#include "amdgpu.h"
31
#include "amdgpu_trace.h"
32

33
static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
34
{
C
Christian König 已提交
35 36
	struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
	struct amdgpu_job *job = to_amdgpu_job(s_job);
37
	struct amdgpu_task_info ti;
38
	struct amdgpu_device *adev = ring->adev;
39 40 41 42 43 44 45 46 47
	int idx;

	if (!drm_dev_enter(&adev->ddev, &idx)) {
		DRM_INFO("%s - device unplugged skipping recovery on scheduler:%s",
			 __func__, s_job->sched->name);

		/* Effectively the job is aborted as the device is gone */
		return DRM_GPU_SCHED_STAT_ENODEV;
	}
48 49

	memset(&ti, 0, sizeof(struct amdgpu_task_info));
50

51 52
	if (amdgpu_gpu_recovery &&
	    amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) {
53 54
		DRM_ERROR("ring %s timeout, but soft recovered\n",
			  s_job->sched->name);
55
		goto exit;
56 57
	}

58
	amdgpu_vm_get_task_info(ring->adev, job->pasid, &ti);
59
	DRM_ERROR("ring %s timeout, signaled seq=%u, emitted seq=%u\n",
C
Christian König 已提交
60 61
		  job->base.sched->name, atomic_read(&ring->fence_drv.last_seq),
		  ring->fence_drv.sync_seq);
62 63
	DRM_ERROR("Process information: process %s pid %d thread %s pid %d\n",
		  ti.process_name, ti.tgid, ti.task_name, ti.pid);
64

65
	if (amdgpu_device_should_recover_gpu(ring->adev)) {
66
		amdgpu_device_gpu_recover(ring->adev, job);
67
	} else {
68
		drm_sched_suspend_timeout(&ring->sched);
69 70 71
		if (amdgpu_sriov_vf(adev))
			adev->virt.tdr_debug = true;
	}
72 73 74 75

exit:
	drm_dev_exit(idx);
	return DRM_GPU_SCHED_STAT_NOMINAL;
76 77
}

78
int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
79
		     struct amdgpu_job **job, struct amdgpu_vm *vm)
80 81 82 83 84 85 86 87 88 89 90 91
{
	size_t size = sizeof(struct amdgpu_job);

	if (num_ibs == 0)
		return -EINVAL;

	size += sizeof(struct amdgpu_ib) * num_ibs;

	*job = kzalloc(size, GFP_KERNEL);
	if (!*job)
		return -ENOMEM;

92 93 94 95 96
	/*
	 * Initialize the scheduler to at least some ring so that we always
	 * have a pointer to adev.
	 */
	(*job)->base.sched = &adev->rings[0]->sched;
97
	(*job)->vm = vm;
98 99 100
	(*job)->ibs = (void *)&(*job)[1];
	(*job)->num_ibs = num_ibs;

101
	amdgpu_sync_create(&(*job)->sync);
102
	amdgpu_sync_create(&(*job)->sched_sync);
103
	(*job)->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
104
	(*job)->vm_pd_addr = AMDGPU_BO_INVALID_OFFSET;
105

106 107 108
	return 0;
}

109
int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
110 111
		enum amdgpu_ib_pool_type pool_type,
		struct amdgpu_job **job)
112 113 114
{
	int r;

115
	r = amdgpu_job_alloc(adev, 1, job, NULL);
116 117 118
	if (r)
		return r;

119
	r = amdgpu_ib_get(adev, NULL, size, pool_type, &(*job)->ibs[0]);
120 121 122 123 124 125
	if (r)
		kfree(*job);

	return r;
}

126
void amdgpu_job_free_resources(struct amdgpu_job *job)
127
{
128
	struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched);
129
	struct dma_fence *f;
130
	struct dma_fence *hw_fence;
131 132
	unsigned i;

133 134 135 136
	if (job->hw_fence.ops == NULL)
		hw_fence = job->external_hw_fence;
	else
		hw_fence = &job->hw_fence;
137

138 139
	/* use sched fence if available */
	f = job->base.s_fence ? &job->base.s_fence->finished : hw_fence;
140
	for (i = 0; i < job->num_ibs; ++i)
141
		amdgpu_ib_free(ring->adev, &job->ibs[i], f);
142 143
}

144
static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
M
Monk Liu 已提交
145
{
C
Christian König 已提交
146
	struct amdgpu_job *job = to_amdgpu_job(s_job);
147

148 149
	drm_sched_job_cleanup(s_job);

150
	amdgpu_sync_free(&job->sync);
151
	amdgpu_sync_free(&job->sched_sync);
152 153 154 155 156 157

    /* only put the hw fence if has embedded fence */
	if (job->hw_fence.ops != NULL)
		dma_fence_put(&job->hw_fence);
	else
		kfree(job);
M
Monk Liu 已提交
158 159
}

160 161 162
void amdgpu_job_free(struct amdgpu_job *job)
{
	amdgpu_job_free_resources(job);
163
	amdgpu_sync_free(&job->sync);
164
	amdgpu_sync_free(&job->sched_sync);
165 166 167 168 169 170

	/* only put the hw fence if has embedded fence */
	if (job->hw_fence.ops != NULL)
		dma_fence_put(&job->hw_fence);
	else
		kfree(job);
171 172
}

173 174
int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
		      void *owner, struct dma_fence **f)
175
{
176
	int r;
177

178 179 180
	if (!f)
		return -EINVAL;

181
	r = drm_sched_job_init(&job->base, entity, owner);
182 183
	if (r)
		return r;
184

185 186
	drm_sched_job_arm(&job->base);

187
	*f = dma_fence_get(&job->base.s_fence->finished);
188
	amdgpu_job_free_resources(job);
189
	drm_sched_entity_push_job(&job->base);
190 191

	return 0;
192 193
}

194 195 196 197 198 199 200
int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
			     struct dma_fence **fence)
{
	int r;

	job->base.sched = &ring->sched;
	r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, NULL, fence);
201 202
	/* record external_hw_fence for direct submit */
	job->external_hw_fence = dma_fence_get(*fence);
203 204 205 206
	if (r)
		return r;

	amdgpu_job_free(job);
207 208
	dma_fence_put(*fence);

209 210 211
	return 0;
}

212 213
static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
					       struct drm_sched_entity *s_entity)
214
{
215
	struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->rq->sched);
216
	struct amdgpu_job *job = to_amdgpu_job(sched_job);
217
	struct amdgpu_vm *vm = job->vm;
218
	struct dma_fence *fence;
219
	int r;
220

221 222 223 224 225
	fence = amdgpu_sync_get_fence(&job->sync);
	if (fence && drm_sched_dependency_optimized(fence, s_entity)) {
		r = amdgpu_sync_fence(&job->sched_sync, fence);
		if (r)
			DRM_ERROR("Error adding fence (%d)\n", r);
226
	}
227

228
	while (fence == NULL && vm && !job->vmid) {
229 230 231
		r = amdgpu_vmid_grab(vm, ring, &job->sync,
				     &job->base.s_fence->finished,
				     job);
232
		if (r)
233 234
			DRM_ERROR("Error getting VM ID (%d)\n", r);

235
		fence = amdgpu_sync_get_fence(&job->sync);
236 237 238
	}

	return fence;
239 240
}

241
static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
242
{
C
Christian König 已提交
243
	struct amdgpu_ring *ring = to_amdgpu_ring(sched_job->sched);
244
	struct dma_fence *fence = NULL, *finished;
245
	struct amdgpu_job *job;
246
	int r = 0;
247

248
	job = to_amdgpu_job(sched_job);
249
	finished = &job->base.s_fence->finished;
250

251
	BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL));
252

253
	trace_amdgpu_sched_run_job(job);
254

255 256 257 258 259 260
	if (job->vram_lost_counter != atomic_read(&ring->adev->vram_lost_counter))
		dma_fence_set_error(finished, -ECANCELED);/* skip IB as well if VRAM lost */

	if (finished->error < 0) {
		DRM_INFO("Skip scheduling IBs!\n");
	} else {
C
Christian König 已提交
261
		r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job,
262
				       &fence);
263 264 265
		if (r)
			DRM_ERROR("Error scheduling IBs (%d)\n", r);
	}
266

267 268 269 270 271
	if (!job->job_run_counter)
		dma_fence_get(fence);
	else if (finished->error < 0)
		dma_fence_put(&job->hw_fence);
	job->job_run_counter++;
272
	amdgpu_job_free_resources(job);
273 274

	fence = r ? ERR_PTR(r) : fence;
275
	return fence;
276 277
}

278 279 280 281 282 283 284 285 286 287
#define to_drm_sched_job(sched_job)		\
		container_of((sched_job), struct drm_sched_job, queue_node)

void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched)
{
	struct drm_sched_job *s_job;
	struct drm_sched_entity *s_entity = NULL;
	int i;

	/* Signal all jobs not yet scheduled */
288
	for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307
		struct drm_sched_rq *rq = &sched->sched_rq[i];

		if (!rq)
			continue;

		spin_lock(&rq->lock);
		list_for_each_entry(s_entity, &rq->entities, list) {
			while ((s_job = to_drm_sched_job(spsc_queue_pop(&s_entity->job_queue)))) {
				struct drm_sched_fence *s_fence = s_job->s_fence;

				dma_fence_signal(&s_fence->scheduled);
				dma_fence_set_error(&s_fence->finished, -EHWPOISON);
				dma_fence_signal(&s_fence->finished);
			}
		}
		spin_unlock(&rq->lock);
	}

	/* Signal all jobs already scheduled to HW */
308
	list_for_each_entry(s_job, &sched->pending_list, list) {
309 310 311 312 313 314 315
		struct drm_sched_fence *s_fence = s_job->s_fence;

		dma_fence_set_error(&s_fence->finished, -EHWPOISON);
		dma_fence_signal(&s_fence->finished);
	}
}

316
const struct drm_sched_backend_ops amdgpu_sched_ops = {
317 318
	.dependency = amdgpu_job_dependency,
	.run_job = amdgpu_job_run,
319
	.timedout_job = amdgpu_job_timedout,
320
	.free_job = amdgpu_job_free_cb
321
};