amdgpu_job.c 6.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
/*
 * Copyright 2015 Advanced Micro Devices, Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 *
 */
#include <linux/kthread.h>
#include <linux/wait.h>
#include <linux/sched.h>
#include <drm/drmP.h>
#include "amdgpu.h"
29
#include "amdgpu_trace.h"
30

31
static void amdgpu_job_timedout(struct amd_sched_job *s_job)
32
{
33 34
	struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);

35
	DRM_ERROR("ring %s timeout, last signaled seq=%u, last emitted seq=%u\n",
36 37 38
		  job->base.sched->name,
		  atomic_read(&job->ring->fence_drv.last_seq),
		  job->ring->fence_drv.sync_seq);
39 40

	if (amdgpu_sriov_vf(job->adev))
41
		amdgpu_sriov_gpu_reset(job->adev, job);
42 43
	else
		amdgpu_gpu_reset(job->adev);
44 45
}

46
int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
47
		     struct amdgpu_job **job, struct amdgpu_vm *vm)
48 49 50 51 52 53 54 55 56 57 58 59 60
{
	size_t size = sizeof(struct amdgpu_job);

	if (num_ibs == 0)
		return -EINVAL;

	size += sizeof(struct amdgpu_ib) * num_ibs;

	*job = kzalloc(size, GFP_KERNEL);
	if (!*job)
		return -ENOMEM;

	(*job)->adev = adev;
61
	(*job)->vm = vm;
62 63 64
	(*job)->ibs = (void *)&(*job)[1];
	(*job)->num_ibs = num_ibs;

65
	amdgpu_sync_create(&(*job)->sync);
66
	amdgpu_sync_create(&(*job)->dep_sync);
67
	amdgpu_sync_create(&(*job)->sched_sync);
68
	(*job)->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
69

70 71 72
	return 0;
}

73 74 75 76 77
int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
			     struct amdgpu_job **job)
{
	int r;

78
	r = amdgpu_job_alloc(adev, 1, job, NULL);
79 80 81 82 83 84
	if (r)
		return r;

	r = amdgpu_ib_get(adev, NULL, size, &(*job)->ibs[0]);
	if (r)
		kfree(*job);
85 86
	else
		(*job)->vm_pd_addr = adev->gart.table_addr;
87 88 89 90

	return r;
}

91
void amdgpu_job_free_resources(struct amdgpu_job *job)
92
{
93
	struct dma_fence *f;
94 95
	unsigned i;

96
	/* use sched fence if available */
97
	f = job->base.s_fence ? &job->base.s_fence->finished : job->fence;
98 99

	for (i = 0; i < job->num_ibs; ++i)
100
		amdgpu_ib_free(job->adev, &job->ibs[i], f);
101 102
}

103
static void amdgpu_job_free_cb(struct amd_sched_job *s_job)
M
Monk Liu 已提交
104
{
105 106
	struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);

107
	amdgpu_ring_priority_put(job->ring, s_job->s_priority);
108
	dma_fence_put(job->fence);
109
	amdgpu_sync_free(&job->sync);
110
	amdgpu_sync_free(&job->dep_sync);
111
	amdgpu_sync_free(&job->sched_sync);
M
Monk Liu 已提交
112 113 114
	kfree(job);
}

115 116 117
void amdgpu_job_free(struct amdgpu_job *job)
{
	amdgpu_job_free_resources(job);
118

119
	dma_fence_put(job->fence);
120
	amdgpu_sync_free(&job->sync);
121
	amdgpu_sync_free(&job->dep_sync);
122
	amdgpu_sync_free(&job->sched_sync);
123 124 125
	kfree(job);
}

126
int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
127
		      struct amd_sched_entity *entity, void *owner,
128
		      struct dma_fence **f)
129
{
130
	int r;
131 132
	job->ring = ring;

133 134 135
	if (!f)
		return -EINVAL;

136
	r = amd_sched_job_init(&job->base, &ring->sched, entity, owner);
137 138
	if (r)
		return r;
139 140

	job->owner = owner;
141
	job->fence_ctx = entity->fence_context;
142
	*f = dma_fence_get(&job->base.s_fence->finished);
143
	amdgpu_job_free_resources(job);
144
	amdgpu_ring_priority_get(job->ring, job->base.s_priority);
145
	amd_sched_entity_push_job(&job->base, entity);
146 147

	return 0;
148 149
}

150 151
static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job,
					       struct amd_sched_entity *s_entity)
152
{
153
	struct amdgpu_job *job = to_amdgpu_job(sched_job);
154
	struct amdgpu_vm *vm = job->vm;
155

156
	struct dma_fence *fence = amdgpu_sync_get_fence(&job->dep_sync);
157
	int r;
158

159
	if (amd_sched_dependency_optimized(fence, s_entity)) {
160 161 162 163 164 165
		r = amdgpu_sync_fence(job->adev, &job->sched_sync, fence);
		if (r)
			DRM_ERROR("Error adding fence to sync (%d)\n", r);
	}
	if (!fence)
		fence = amdgpu_sync_get_fence(&job->sync);
C
Chunming Zhou 已提交
166
	while (fence == NULL && vm && !job->vm_id) {
167
		struct amdgpu_ring *ring = job->ring;
168

169
		r = amdgpu_vm_grab_id(vm, ring, &job->sync,
170
				      &job->base.s_fence->finished,
171
				      job);
172
		if (r)
173 174
			DRM_ERROR("Error getting VM ID (%d)\n", r);

175
		fence = amdgpu_sync_get_fence(&job->sync);
176 177 178
	}

	return fence;
179 180
}

181
static struct dma_fence *amdgpu_job_run(struct amd_sched_job *sched_job)
182
{
183
	struct dma_fence *fence = NULL, *finished;
184
	struct amdgpu_device *adev;
185
	struct amdgpu_job *job;
186
	int r;
187

188
	if (!sched_job) {
189
		DRM_ERROR("job is null\n");
190
		return NULL;
191
	}
192
	job = to_amdgpu_job(sched_job);
193
	finished = &job->base.s_fence->finished;
194
	adev = job->adev;
195

196
	BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL));
197

198
	trace_amdgpu_sched_run_job(job);
199 200 201 202 203 204

	if (job->vram_lost_counter != atomic_read(&adev->vram_lost_counter))
		dma_fence_set_error(finished, -ECANCELED);/* skip IB as well if VRAM lost */

	if (finished->error < 0) {
		DRM_INFO("Skip scheduling IBs!\n");
205 206 207
	} else {
		r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs, job,
				       &fence);
208 209 210
		if (r)
			DRM_ERROR("Error scheduling IBs (%d)\n", r);
	}
211
	/* if gpu reset, hw fence will be replaced here */
212 213
	dma_fence_put(job->fence);
	job->fence = dma_fence_get(fence);
214

215
	amdgpu_job_free_resources(job);
216
	return fence;
217 218
}

219
const struct amd_sched_backend_ops amdgpu_sched_ops = {
220 221
	.dependency = amdgpu_job_dependency,
	.run_job = amdgpu_job_run,
222
	.timedout_job = amdgpu_job_timedout,
223
	.free_job = amdgpu_job_free_cb
224
};