amdgpu_job.c 8.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
/*
 * Copyright 2015 Advanced Micro Devices, Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 *
 */
#include <linux/kthread.h>
#include <linux/wait.h>
#include <linux/sched.h>
27

28 29
#include <drm/drm_drv.h>

30
#include "amdgpu.h"
31
#include "amdgpu_trace.h"
32
#include "amdgpu_reset.h"
33

34
static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
35
{
C
Christian König 已提交
36 37
	struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
	struct amdgpu_job *job = to_amdgpu_job(s_job);
38
	struct amdgpu_task_info ti;
39
	struct amdgpu_device *adev = ring->adev;
40
	int idx;
41
	int r;
42

43
	if (!drm_dev_enter(adev_to_drm(adev), &idx)) {
44 45 46 47 48 49
		DRM_INFO("%s - device unplugged skipping recovery on scheduler:%s",
			 __func__, s_job->sched->name);

		/* Effectively the job is aborted as the device is gone */
		return DRM_GPU_SCHED_STAT_ENODEV;
	}
50 51

	memset(&ti, 0, sizeof(struct amdgpu_task_info));
52

53 54
	if (amdgpu_gpu_recovery &&
	    amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) {
55 56
		DRM_ERROR("ring %s timeout, but soft recovered\n",
			  s_job->sched->name);
57
		goto exit;
58 59
	}

60
	amdgpu_vm_get_task_info(ring->adev, job->pasid, &ti);
61
	DRM_ERROR("ring %s timeout, signaled seq=%u, emitted seq=%u\n",
C
Christian König 已提交
62 63
		  job->base.sched->name, atomic_read(&ring->fence_drv.last_seq),
		  ring->fence_drv.sync_seq);
64 65
	DRM_ERROR("Process information: process %s pid %d thread %s pid %d\n",
		  ti.process_name, ti.tgid, ti.task_name, ti.pid);
66

67
	if (amdgpu_device_should_recover_gpu(ring->adev)) {
68 69 70 71 72 73 74 75
		struct amdgpu_reset_context reset_context;
		memset(&reset_context, 0, sizeof(reset_context));

		reset_context.method = AMD_RESET_METHOD_NONE;
		reset_context.reset_req_dev = adev;
		clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);

		r = amdgpu_device_gpu_recover(ring->adev, job, &reset_context);
76 77
		if (r)
			DRM_ERROR("GPU Recovery Failed: %d\n", r);
78
	} else {
79
		drm_sched_suspend_timeout(&ring->sched);
80 81 82
		if (amdgpu_sriov_vf(adev))
			adev->virt.tdr_debug = true;
	}
83 84 85 86

exit:
	drm_dev_exit(idx);
	return DRM_GPU_SCHED_STAT_NOMINAL;
87 88
}

89
int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
90
		     struct amdgpu_job **job, struct amdgpu_vm *vm)
91 92 93 94
{
	if (num_ibs == 0)
		return -EINVAL;

95
	*job = kzalloc(struct_size(*job, ibs, num_ibs), GFP_KERNEL);
96 97 98
	if (!*job)
		return -ENOMEM;

99 100 101 102 103
	/*
	 * Initialize the scheduler to at least some ring so that we always
	 * have a pointer to adev.
	 */
	(*job)->base.sched = &adev->rings[0]->sched;
104
	(*job)->vm = vm;
105 106
	(*job)->num_ibs = num_ibs;

107
	amdgpu_sync_create(&(*job)->sync);
108
	amdgpu_sync_create(&(*job)->sched_sync);
109
	(*job)->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
110
	(*job)->vm_pd_addr = AMDGPU_BO_INVALID_OFFSET;
111

112 113 114
	return 0;
}

115
int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
116 117
		enum amdgpu_ib_pool_type pool_type,
		struct amdgpu_job **job)
118 119 120
{
	int r;

121
	r = amdgpu_job_alloc(adev, 1, job, NULL);
122 123 124
	if (r)
		return r;

125
	r = amdgpu_ib_get(adev, NULL, size, pool_type, &(*job)->ibs[0]);
126 127 128 129 130 131
	if (r)
		kfree(*job);

	return r;
}

132
void amdgpu_job_free_resources(struct amdgpu_job *job)
133
{
134
	struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched);
135
	struct dma_fence *f;
136
	struct dma_fence *hw_fence;
137 138
	unsigned i;

139 140 141 142
	if (job->hw_fence.ops == NULL)
		hw_fence = job->external_hw_fence;
	else
		hw_fence = &job->hw_fence;
143

144 145
	/* use sched fence if available */
	f = job->base.s_fence ? &job->base.s_fence->finished : hw_fence;
146
	for (i = 0; i < job->num_ibs; ++i)
147
		amdgpu_ib_free(ring->adev, &job->ibs[i], f);
148 149
}

150
static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
M
Monk Liu 已提交
151
{
C
Christian König 已提交
152
	struct amdgpu_job *job = to_amdgpu_job(s_job);
153

154 155
	drm_sched_job_cleanup(s_job);

156
	amdgpu_sync_free(&job->sync);
157
	amdgpu_sync_free(&job->sched_sync);
158 159 160 161 162 163

    /* only put the hw fence if has embedded fence */
	if (job->hw_fence.ops != NULL)
		dma_fence_put(&job->hw_fence);
	else
		kfree(job);
M
Monk Liu 已提交
164 165
}

166 167 168
void amdgpu_job_free(struct amdgpu_job *job)
{
	amdgpu_job_free_resources(job);
169
	amdgpu_sync_free(&job->sync);
170
	amdgpu_sync_free(&job->sched_sync);
171 172 173 174 175 176

	/* only put the hw fence if has embedded fence */
	if (job->hw_fence.ops != NULL)
		dma_fence_put(&job->hw_fence);
	else
		kfree(job);
177 178
}

179 180
int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
		      void *owner, struct dma_fence **f)
181
{
182
	int r;
183

184 185 186
	if (!f)
		return -EINVAL;

187
	r = drm_sched_job_init(&job->base, entity, owner);
188 189
	if (r)
		return r;
190

191 192
	drm_sched_job_arm(&job->base);

193
	*f = dma_fence_get(&job->base.s_fence->finished);
194
	amdgpu_job_free_resources(job);
195
	drm_sched_entity_push_job(&job->base);
196 197

	return 0;
198 199
}

200 201 202 203 204 205 206
int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
			     struct dma_fence **fence)
{
	int r;

	job->base.sched = &ring->sched;
	r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, NULL, fence);
207 208
	/* record external_hw_fence for direct submit */
	job->external_hw_fence = dma_fence_get(*fence);
209 210 211 212
	if (r)
		return r;

	amdgpu_job_free(job);
213 214
	dma_fence_put(*fence);

215 216 217
	return 0;
}

218 219
static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
					       struct drm_sched_entity *s_entity)
220
{
221
	struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->rq->sched);
222
	struct amdgpu_job *job = to_amdgpu_job(sched_job);
223
	struct amdgpu_vm *vm = job->vm;
224
	struct dma_fence *fence;
225
	int r;
226

227 228 229 230 231
	fence = amdgpu_sync_get_fence(&job->sync);
	if (fence && drm_sched_dependency_optimized(fence, s_entity)) {
		r = amdgpu_sync_fence(&job->sched_sync, fence);
		if (r)
			DRM_ERROR("Error adding fence (%d)\n", r);
232
	}
233

234
	while (fence == NULL && vm && !job->vmid) {
235 236 237
		r = amdgpu_vmid_grab(vm, ring, &job->sync,
				     &job->base.s_fence->finished,
				     job);
238
		if (r)
239 240
			DRM_ERROR("Error getting VM ID (%d)\n", r);

241
		fence = amdgpu_sync_get_fence(&job->sync);
242 243 244
	}

	return fence;
245 246
}

247
static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
248
{
C
Christian König 已提交
249
	struct amdgpu_ring *ring = to_amdgpu_ring(sched_job->sched);
250
	struct dma_fence *fence = NULL, *finished;
251
	struct amdgpu_job *job;
252
	int r = 0;
253

254
	job = to_amdgpu_job(sched_job);
255
	finished = &job->base.s_fence->finished;
256

257
	BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL));
258

259
	trace_amdgpu_sched_run_job(job);
260

261 262 263 264 265 266
	if (job->vram_lost_counter != atomic_read(&ring->adev->vram_lost_counter))
		dma_fence_set_error(finished, -ECANCELED);/* skip IB as well if VRAM lost */

	if (finished->error < 0) {
		DRM_INFO("Skip scheduling IBs!\n");
	} else {
C
Christian König 已提交
267
		r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job,
268
				       &fence);
269 270 271
		if (r)
			DRM_ERROR("Error scheduling IBs (%d)\n", r);
	}
272

273
	job->job_run_counter++;
274
	amdgpu_job_free_resources(job);
275 276

	fence = r ? ERR_PTR(r) : fence;
277
	return fence;
278 279
}

280 281 282 283 284 285 286 287 288 289
#define to_drm_sched_job(sched_job)		\
		container_of((sched_job), struct drm_sched_job, queue_node)

void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched)
{
	struct drm_sched_job *s_job;
	struct drm_sched_entity *s_entity = NULL;
	int i;

	/* Signal all jobs not yet scheduled */
290
	for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309
		struct drm_sched_rq *rq = &sched->sched_rq[i];

		if (!rq)
			continue;

		spin_lock(&rq->lock);
		list_for_each_entry(s_entity, &rq->entities, list) {
			while ((s_job = to_drm_sched_job(spsc_queue_pop(&s_entity->job_queue)))) {
				struct drm_sched_fence *s_fence = s_job->s_fence;

				dma_fence_signal(&s_fence->scheduled);
				dma_fence_set_error(&s_fence->finished, -EHWPOISON);
				dma_fence_signal(&s_fence->finished);
			}
		}
		spin_unlock(&rq->lock);
	}

	/* Signal all jobs already scheduled to HW */
310
	list_for_each_entry(s_job, &sched->pending_list, list) {
311 312 313 314 315 316 317
		struct drm_sched_fence *s_fence = s_job->s_fence;

		dma_fence_set_error(&s_fence->finished, -EHWPOISON);
		dma_fence_signal(&s_fence->finished);
	}
}

318
const struct drm_sched_backend_ops amdgpu_sched_ops = {
319 320
	.dependency = amdgpu_job_dependency,
	.run_job = amdgpu_job_run,
321
	.timedout_job = amdgpu_job_timedout,
322
	.free_job = amdgpu_job_free_cb
323
};