amdgpu_ctx.c 10.7 KB
Newer Older
A
Alex Deucher 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
/*
 * Copyright 2015 Advanced Micro Devices, Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 * Authors: monk liu <monk.liu@amd.com>
 */

#include <drm/drmP.h>
26
#include <drm/drm_auth.h>
A
Alex Deucher 已提交
27
#include "amdgpu.h"
28
#include "amdgpu_sched.h"
A
Alex Deucher 已提交
29

30
static int amdgpu_ctx_priority_permit(struct drm_file *filp,
31
				      enum drm_sched_priority priority)
32 33
{
	/* NORMAL and below are accessible by everyone */
34
	if (priority <= DRM_SCHED_PRIORITY_NORMAL)
35 36 37 38 39 40 41 42 43 44 45 46
		return 0;

	if (capable(CAP_SYS_NICE))
		return 0;

	if (drm_is_current_master(filp))
		return 0;

	return -EACCES;
}

static int amdgpu_ctx_init(struct amdgpu_device *adev,
47
			   enum drm_sched_priority priority,
48 49
			   struct drm_file *filp,
			   struct amdgpu_ctx *ctx)
A
Alex Deucher 已提交
50
{
51
	unsigned i, j;
52
	int r;
A
Alex Deucher 已提交
53

54
	if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX)
55 56 57 58 59 60
		return -EINVAL;

	r = amdgpu_ctx_priority_permit(filp, priority);
	if (r)
		return r;

61 62 63 64
	memset(ctx, 0, sizeof(*ctx));
	ctx->adev = adev;
	kref_init(&ctx->refcount);
	spin_lock_init(&ctx->ring_lock);
65
	ctx->fences = kcalloc(amdgpu_sched_jobs * AMDGPU_MAX_RINGS,
66
			      sizeof(struct dma_fence*), GFP_KERNEL);
67 68
	if (!ctx->fences)
		return -ENOMEM;
A
Alex Deucher 已提交
69

70 71
	mutex_init(&ctx->lock);

72 73
	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
		ctx->rings[i].sequence = 1;
74
		ctx->rings[i].fences = &ctx->fences[amdgpu_sched_jobs * i];
75
	}
76 77

	ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
78
	ctx->reset_counter_query = ctx->reset_counter;
79
	ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
80
	ctx->init_priority = priority;
81
	ctx->override_priority = DRM_SCHED_PRIORITY_UNSET;
82

83 84
	/* create context entity for each ring */
	for (i = 0; i < adev->num_rings; i++) {
85
		struct amdgpu_ring *ring = adev->rings[i];
86
		struct drm_sched_rq *rq;
87

88
		rq = &ring->sched.sched_rq[priority];
M
Monk Liu 已提交
89 90 91 92

		if (ring == &adev->gfx.kiq.ring)
			continue;

93
		r = drm_sched_entity_init(&ring->sched, &ctx->rings[i].entity,
94
					  rq, amdgpu_sched_jobs, &ctx->guilty);
95
		if (r)
96
			goto failed;
97 98
	}

99 100 101 102
	r = amdgpu_queue_mgr_init(adev, &ctx->queue_mgr);
	if (r)
		goto failed;

A
Alex Deucher 已提交
103
	return 0;
104 105 106

failed:
	for (j = 0; j < i; j++)
107
		drm_sched_entity_fini(&adev->rings[j]->sched,
108 109 110 111
				      &ctx->rings[j].entity);
	kfree(ctx->fences);
	ctx->fences = NULL;
	return r;
A
Alex Deucher 已提交
112 113
}

114
static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
A
Alex Deucher 已提交
115
{
116 117 118
	struct amdgpu_device *adev = ctx->adev;
	unsigned i, j;

119 120 121
	if (!adev)
		return;

122
	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
123
		for (j = 0; j < amdgpu_sched_jobs; ++j)
124
			dma_fence_put(ctx->rings[i].fences[j]);
125
	kfree(ctx->fences);
126
	ctx->fences = NULL;
127

128
	for (i = 0; i < adev->num_rings; i++)
129
		drm_sched_entity_fini(&adev->rings[i]->sched,
130
				      &ctx->rings[i].entity);
131 132

	amdgpu_queue_mgr_fini(adev, &ctx->queue_mgr);
133 134

	mutex_destroy(&ctx->lock);
135 136 137 138
}

static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
			    struct amdgpu_fpriv *fpriv,
139
			    struct drm_file *filp,
140
			    enum drm_sched_priority priority,
141 142 143
			    uint32_t *id)
{
	struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
A
Alex Deucher 已提交
144
	struct amdgpu_ctx *ctx;
145
	int r;
A
Alex Deucher 已提交
146

147 148 149 150 151 152 153
	ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
	if (!ctx)
		return -ENOMEM;

	mutex_lock(&mgr->lock);
	r = idr_alloc(&mgr->ctx_handles, ctx, 1, 0, GFP_KERNEL);
	if (r < 0) {
154
		mutex_unlock(&mgr->lock);
155 156 157
		kfree(ctx);
		return r;
	}
158

159
	*id = (uint32_t)r;
160
	r = amdgpu_ctx_init(adev, priority, filp, ctx);
161 162 163 164 165
	if (r) {
		idr_remove(&mgr->ctx_handles, *id);
		*id = 0;
		kfree(ctx);
	}
166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186
	mutex_unlock(&mgr->lock);
	return r;
}

static void amdgpu_ctx_do_release(struct kref *ref)
{
	struct amdgpu_ctx *ctx;

	ctx = container_of(ref, struct amdgpu_ctx, refcount);

	amdgpu_ctx_fini(ctx);

	kfree(ctx);
}

static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id)
{
	struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
	struct amdgpu_ctx *ctx;

	mutex_lock(&mgr->lock);
187 188
	ctx = idr_remove(&mgr->ctx_handles, id);
	if (ctx)
189
		kref_put(&ctx->refcount, amdgpu_ctx_do_release);
190
	mutex_unlock(&mgr->lock);
191
	return ctx ? 0 : -EINVAL;
A
Alex Deucher 已提交
192 193
}

194 195 196
static int amdgpu_ctx_query(struct amdgpu_device *adev,
			    struct amdgpu_fpriv *fpriv, uint32_t id,
			    union drm_amdgpu_ctx_out *out)
A
Alex Deucher 已提交
197 198
{
	struct amdgpu_ctx *ctx;
199
	struct amdgpu_ctx_mgr *mgr;
200
	unsigned reset_counter;
A
Alex Deucher 已提交
201

202 203 204 205
	if (!fpriv)
		return -EINVAL;

	mgr = &fpriv->ctx_mgr;
206
	mutex_lock(&mgr->lock);
A
Alex Deucher 已提交
207
	ctx = idr_find(&mgr->ctx_handles, id);
208
	if (!ctx) {
209
		mutex_unlock(&mgr->lock);
210
		return -EINVAL;
A
Alex Deucher 已提交
211
	}
212 213

	/* TODO: these two are always zero */
214 215
	out->state.flags = 0x0;
	out->state.hangs = 0x0;
216 217 218 219

	/* determine if a GPU reset has occured since the last call */
	reset_counter = atomic_read(&adev->gpu_reset_counter);
	/* TODO: this should ideally return NO, GUILTY, or INNOCENT. */
220
	if (ctx->reset_counter_query == reset_counter)
221 222 223
		out->state.reset_status = AMDGPU_CTX_NO_RESET;
	else
		out->state.reset_status = AMDGPU_CTX_UNKNOWN_RESET;
224
	ctx->reset_counter_query = reset_counter;
225

226
	mutex_unlock(&mgr->lock);
227
	return 0;
A
Alex Deucher 已提交
228 229
}

M
Monk Liu 已提交
230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263
static int amdgpu_ctx_query2(struct amdgpu_device *adev,
	struct amdgpu_fpriv *fpriv, uint32_t id,
	union drm_amdgpu_ctx_out *out)
{
	struct amdgpu_ctx *ctx;
	struct amdgpu_ctx_mgr *mgr;

	if (!fpriv)
		return -EINVAL;

	mgr = &fpriv->ctx_mgr;
	mutex_lock(&mgr->lock);
	ctx = idr_find(&mgr->ctx_handles, id);
	if (!ctx) {
		mutex_unlock(&mgr->lock);
		return -EINVAL;
	}

	out->state.flags = 0x0;
	out->state.hangs = 0x0;

	if (ctx->reset_counter != atomic_read(&adev->gpu_reset_counter))
		out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RESET;

	if (ctx->vram_lost_counter != atomic_read(&adev->vram_lost_counter))
		out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_VRAMLOST;

	if (atomic_read(&ctx->guilty))
		out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_GUILTY;

	mutex_unlock(&mgr->lock);
	return 0;
}

A
Alex Deucher 已提交
264
int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
265
		     struct drm_file *filp)
A
Alex Deucher 已提交
266 267 268
{
	int r;
	uint32_t id;
269
	enum drm_sched_priority priority;
A
Alex Deucher 已提交
270 271 272 273 274 275 276

	union drm_amdgpu_ctx *args = data;
	struct amdgpu_device *adev = dev->dev_private;
	struct amdgpu_fpriv *fpriv = filp->driver_priv;

	r = 0;
	id = args->in.ctx_id;
277 278
	priority = amdgpu_to_sched_priority(args->in.priority);

279 280
	/* For backwards compatibility reasons, we need to accept
	 * ioctls with garbage in the priority field */
281 282
	if (priority == DRM_SCHED_PRIORITY_INVALID)
		priority = DRM_SCHED_PRIORITY_NORMAL;
A
Alex Deucher 已提交
283 284

	switch (args->in.op) {
285
	case AMDGPU_CTX_OP_ALLOC_CTX:
286
		r = amdgpu_ctx_alloc(adev, fpriv, filp, priority, &id);
287 288 289 290 291 292 293 294
		args->out.alloc.ctx_id = id;
		break;
	case AMDGPU_CTX_OP_FREE_CTX:
		r = amdgpu_ctx_free(fpriv, id);
		break;
	case AMDGPU_CTX_OP_QUERY_STATE:
		r = amdgpu_ctx_query(adev, fpriv, id, &args->out);
		break;
M
Monk Liu 已提交
295 296 297
	case AMDGPU_CTX_OP_QUERY_STATE2:
		r = amdgpu_ctx_query2(adev, fpriv, id, &args->out);
		break;
298 299
	default:
		return -EINVAL;
A
Alex Deucher 已提交
300 301 302 303
	}

	return r;
}
304 305 306 307

struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id)
{
	struct amdgpu_ctx *ctx;
308 309 310 311 312 313
	struct amdgpu_ctx_mgr *mgr;

	if (!fpriv)
		return NULL;

	mgr = &fpriv->ctx_mgr;
314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330

	mutex_lock(&mgr->lock);
	ctx = idr_find(&mgr->ctx_handles, id);
	if (ctx)
		kref_get(&ctx->refcount);
	mutex_unlock(&mgr->lock);
	return ctx;
}

int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
{
	if (ctx == NULL)
		return -EINVAL;

	kref_put(&ctx->refcount, amdgpu_ctx_do_release);
	return 0;
}
331

332 333
int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
			      struct dma_fence *fence, uint64_t* handler)
334 335
{
	struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
336
	uint64_t seq = cring->sequence;
337
	unsigned idx = 0;
338
	struct dma_fence *other = NULL;
339

340
	idx = seq & (amdgpu_sched_jobs - 1);
341
	other = cring->fences[idx];
342 343
	if (other)
		BUG_ON(!dma_fence_is_signaled(other));
344

345
	dma_fence_get(fence);
346 347 348

	spin_lock(&ctx->ring_lock);
	cring->fences[idx] = fence;
349
	cring->sequence++;
350 351
	spin_unlock(&ctx->ring_lock);

352
	dma_fence_put(other);
353 354
	if (handler)
		*handler = seq;
355

356
	return 0;
357 358
}

359 360
struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
				       struct amdgpu_ring *ring, uint64_t seq)
361 362
{
	struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
363
	struct dma_fence *fence;
364 365

	spin_lock(&ctx->ring_lock);
366

M
Monk Liu 已提交
367 368 369
	if (seq == ~0ull)
		seq = ctx->rings[ring->idx].sequence - 1;

370
	if (seq >= cring->sequence) {
371 372 373 374
		spin_unlock(&ctx->ring_lock);
		return ERR_PTR(-EINVAL);
	}

375

376
	if (seq + amdgpu_sched_jobs < cring->sequence) {
377 378 379 380
		spin_unlock(&ctx->ring_lock);
		return NULL;
	}

381
	fence = dma_fence_get(cring->fences[seq & (amdgpu_sched_jobs - 1)]);
382 383 384 385
	spin_unlock(&ctx->ring_lock);

	return fence;
}
386

387
void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
388
				  enum drm_sched_priority priority)
389 390 391
{
	int i;
	struct amdgpu_device *adev = ctx->adev;
392 393
	struct drm_sched_rq *rq;
	struct drm_sched_entity *entity;
394
	struct amdgpu_ring *ring;
395
	enum drm_sched_priority ctx_prio;
396 397 398

	ctx->override_priority = priority;

399
	ctx_prio = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ?
400 401 402 403 404 405 406 407 408 409
			ctx->init_priority : ctx->override_priority;

	for (i = 0; i < adev->num_rings; i++) {
		ring = adev->rings[i];
		entity = &ctx->rings[i].entity;
		rq = &ring->sched.sched_rq[ctx_prio];

		if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
			continue;

410
		drm_sched_entity_set_rq(entity, rq);
411 412 413
	}
}

414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431
int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, unsigned ring_id)
{
	struct amdgpu_ctx_ring *cring = &ctx->rings[ring_id];
	unsigned idx = cring->sequence & (amdgpu_sched_jobs - 1);
	struct dma_fence *other = cring->fences[idx];

	if (other) {
		signed long r;
		r = dma_fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT);
		if (r < 0) {
			DRM_ERROR("Error (%ld) waiting for fence!\n", r);
			return r;
		}
	}

	return 0;
}

432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453
void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
{
	mutex_init(&mgr->lock);
	idr_init(&mgr->ctx_handles);
}

void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
{
	struct amdgpu_ctx *ctx;
	struct idr *idp;
	uint32_t id;

	idp = &mgr->ctx_handles;

	idr_for_each_entry(idp, ctx, id) {
		if (kref_put(&ctx->refcount, amdgpu_ctx_do_release) != 1)
			DRM_ERROR("ctx %p is still alive\n", ctx);
	}

	idr_destroy(&mgr->ctx_handles);
	mutex_destroy(&mgr->lock);
}