amdgpu_job.c 6.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
/*
 * Copyright 2015 Advanced Micro Devices, Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 *
 */
#include <linux/kthread.h>
#include <linux/wait.h>
#include <linux/sched.h>
#include <drm/drmP.h>
#include "amdgpu.h"
29
#include "amdgpu_trace.h"
30

31
static void amdgpu_job_timedout(struct drm_sched_job *s_job)
32
{
C
Christian König 已提交
33 34
	struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
	struct amdgpu_job *job = to_amdgpu_job(s_job);
35

36 37 38 39 40 41
	if (amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) {
		DRM_ERROR("ring %s timeout, but soft recovered\n",
			  s_job->sched->name);
		return;
	}

42
	DRM_ERROR("ring %s timeout, signaled seq=%u, emitted seq=%u\n",
C
Christian König 已提交
43 44
		  job->base.sched->name, atomic_read(&ring->fence_drv.last_seq),
		  ring->fence_drv.sync_seq);
45

46 47
	if (amdgpu_device_should_recover_gpu(ring->adev))
		amdgpu_device_gpu_recover(ring->adev, job);
48 49
}

50
int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
51
		     struct amdgpu_job **job, struct amdgpu_vm *vm)
52 53 54 55 56 57 58 59 60 61 62 63
{
	size_t size = sizeof(struct amdgpu_job);

	if (num_ibs == 0)
		return -EINVAL;

	size += sizeof(struct amdgpu_ib) * num_ibs;

	*job = kzalloc(size, GFP_KERNEL);
	if (!*job)
		return -ENOMEM;

64 65 66 67 68
	/*
	 * Initialize the scheduler to at least some ring so that we always
	 * have a pointer to adev.
	 */
	(*job)->base.sched = &adev->rings[0]->sched;
69
	(*job)->vm = vm;
70 71 72
	(*job)->ibs = (void *)&(*job)[1];
	(*job)->num_ibs = num_ibs;

73
	amdgpu_sync_create(&(*job)->sync);
74
	amdgpu_sync_create(&(*job)->sched_sync);
75
	(*job)->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
76

77 78 79
	return 0;
}

80 81 82 83 84
int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
			     struct amdgpu_job **job)
{
	int r;

85
	r = amdgpu_job_alloc(adev, 1, job, NULL);
86 87 88 89 90 91 92 93 94 95
	if (r)
		return r;

	r = amdgpu_ib_get(adev, NULL, size, &(*job)->ibs[0]);
	if (r)
		kfree(*job);

	return r;
}

96
void amdgpu_job_free_resources(struct amdgpu_job *job)
97
{
98
	struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched);
99
	struct dma_fence *f;
100 101
	unsigned i;

102
	/* use sched fence if available */
103
	f = job->base.s_fence ? &job->base.s_fence->finished : job->fence;
104 105

	for (i = 0; i < job->num_ibs; ++i)
106
		amdgpu_ib_free(ring->adev, &job->ibs[i], f);
107 108
}

109
static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
M
Monk Liu 已提交
110
{
C
Christian König 已提交
111 112
	struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
	struct amdgpu_job *job = to_amdgpu_job(s_job);
113

C
Christian König 已提交
114
	amdgpu_ring_priority_put(ring, s_job->s_priority);
115
	dma_fence_put(job->fence);
116
	amdgpu_sync_free(&job->sync);
117
	amdgpu_sync_free(&job->sched_sync);
M
Monk Liu 已提交
118 119 120
	kfree(job);
}

121 122 123
void amdgpu_job_free(struct amdgpu_job *job)
{
	amdgpu_job_free_resources(job);
124

125
	dma_fence_put(job->fence);
126
	amdgpu_sync_free(&job->sync);
127
	amdgpu_sync_free(&job->sched_sync);
128 129 130
	kfree(job);
}

131 132
int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
		      void *owner, struct dma_fence **f)
133
{
134 135
	enum drm_sched_priority priority;
	struct amdgpu_ring *ring;
136
	int r;
137

138 139 140
	if (!f)
		return -EINVAL;

141
	r = drm_sched_job_init(&job->base, entity, owner);
142 143
	if (r)
		return r;
144 145

	job->owner = owner;
146
	*f = dma_fence_get(&job->base.s_fence->finished);
147
	amdgpu_job_free_resources(job);
148
	priority = job->base.s_priority;
149
	drm_sched_entity_push_job(&job->base, entity);
150

151
	ring = to_amdgpu_ring(entity->rq->sched);
152 153
	amdgpu_ring_priority_get(ring, priority);

154
	return 0;
155 156
}

157 158 159 160 161 162 163 164 165 166 167 168 169 170 171
int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
			     struct dma_fence **fence)
{
	int r;

	job->base.sched = &ring->sched;
	r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, NULL, fence);
	job->fence = dma_fence_get(*fence);
	if (r)
		return r;

	amdgpu_job_free(job);
	return 0;
}

172 173
static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
					       struct drm_sched_entity *s_entity)
174
{
175
	struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->rq->sched);
176
	struct amdgpu_job *job = to_amdgpu_job(sched_job);
177
	struct amdgpu_vm *vm = job->vm;
178
	struct dma_fence *fence;
179
	bool explicit = false;
180
	int r;
181

182
	fence = amdgpu_sync_get_fence(&job->sync, &explicit);
183
	if (fence && explicit) {
184
		if (drm_sched_dependency_optimized(fence, s_entity)) {
185 186
			r = amdgpu_sync_fence(ring->adev, &job->sched_sync,
					      fence, false);
187
			if (r)
188
				DRM_ERROR("Error adding fence (%d)\n", r);
189
		}
190
	}
191

192
	while (fence == NULL && vm && !job->vmid) {
193 194 195
		r = amdgpu_vmid_grab(vm, ring, &job->sync,
				     &job->base.s_fence->finished,
				     job);
196
		if (r)
197 198
			DRM_ERROR("Error getting VM ID (%d)\n", r);

199
		fence = amdgpu_sync_get_fence(&job->sync, NULL);
200 201 202
	}

	return fence;
203 204
}

205
static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
206
{
C
Christian König 已提交
207
	struct amdgpu_ring *ring = to_amdgpu_ring(sched_job->sched);
208
	struct dma_fence *fence = NULL, *finished;
209
	struct amdgpu_job *job;
210
	int r;
211

212
	job = to_amdgpu_job(sched_job);
213
	finished = &job->base.s_fence->finished;
214

215
	BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL));
216

217
	trace_amdgpu_sched_run_job(job);
218

219
	if (job->vram_lost_counter != atomic_read(&ring->adev->vram_lost_counter))
220 221 222 223
		dma_fence_set_error(finished, -ECANCELED);/* skip IB as well if VRAM lost */

	if (finished->error < 0) {
		DRM_INFO("Skip scheduling IBs!\n");
224
	} else {
C
Christian König 已提交
225
		r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job,
226
				       &fence);
227 228 229
		if (r)
			DRM_ERROR("Error scheduling IBs (%d)\n", r);
	}
230
	/* if gpu reset, hw fence will be replaced here */
231 232
	dma_fence_put(job->fence);
	job->fence = dma_fence_get(fence);
233

234
	amdgpu_job_free_resources(job);
235
	return fence;
236 237
}

238
const struct drm_sched_backend_ops amdgpu_sched_ops = {
239 240
	.dependency = amdgpu_job_dependency,
	.run_job = amdgpu_job_run,
241
	.timedout_job = amdgpu_job_timedout,
242
	.free_job = amdgpu_job_free_cb
243
};