amdgpu_ctx.c 16.0 KB
Newer Older
A
Alex Deucher 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
/*
 * Copyright 2015 Advanced Micro Devices, Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 * Authors: monk liu <monk.liu@amd.com>
 */

25
#include <drm/drm_auth.h>
A
Alex Deucher 已提交
26
#include "amdgpu.h"
27
#include "amdgpu_sched.h"
28
#include "amdgpu_ras.h"
A
Alex Deucher 已提交
29

30 31 32 33 34 35 36 37 38 39 40 41
#define to_amdgpu_ctx_entity(e)	\
	container_of((e), struct amdgpu_ctx_entity, entity)

const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = {
	[AMDGPU_HW_IP_GFX]	=	1,
	[AMDGPU_HW_IP_COMPUTE]	=	4,
	[AMDGPU_HW_IP_DMA]	=	2,
	[AMDGPU_HW_IP_UVD]	=	1,
	[AMDGPU_HW_IP_VCE]	=	1,
	[AMDGPU_HW_IP_UVD_ENC]	=	1,
	[AMDGPU_HW_IP_VCN_DEC]	=	1,
	[AMDGPU_HW_IP_VCN_ENC]	=	1,
42
	[AMDGPU_HW_IP_VCN_JPEG]	=	1,
43 44
};

45
static int amdgpu_ctx_total_num_entities(void)
46 47 48 49 50 51 52 53
{
	unsigned i, num_entities = 0;

	for (i = 0; i < AMDGPU_HW_IP_NUM; ++i)
		num_entities += amdgpu_ctx_num_entities[i];

	return num_entities;
}
54

55
static int amdgpu_ctx_priority_permit(struct drm_file *filp,
56
				      enum drm_sched_priority priority)
57 58
{
	/* NORMAL and below are accessible by everyone */
59
	if (priority <= DRM_SCHED_PRIORITY_NORMAL)
60 61 62 63 64 65 66 67 68 69 70 71
		return 0;

	if (capable(CAP_SYS_NICE))
		return 0;

	if (drm_is_current_master(filp))
		return 0;

	return -EACCES;
}

static int amdgpu_ctx_init(struct amdgpu_device *adev,
72
			   enum drm_sched_priority priority,
73 74
			   struct drm_file *filp,
			   struct amdgpu_ctx *ctx)
A
Alex Deucher 已提交
75
{
76
	unsigned num_entities = amdgpu_ctx_total_num_entities();
77
	unsigned i, j;
78
	int r;
A
Alex Deucher 已提交
79

80
	if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX)
81 82 83 84 85 86
		return -EINVAL;

	r = amdgpu_ctx_priority_permit(filp, priority);
	if (r)
		return r;

87 88
	memset(ctx, 0, sizeof(*ctx));
	ctx->adev = adev;
89 90

	ctx->fences = kcalloc(amdgpu_sched_jobs * num_entities,
91
			      sizeof(struct dma_fence*), GFP_KERNEL);
92 93
	if (!ctx->fences)
		return -ENOMEM;
A
Alex Deucher 已提交
94

95 96 97 98 99 100 101
	ctx->entities[0] = kcalloc(num_entities,
				   sizeof(struct amdgpu_ctx_entity),
				   GFP_KERNEL);
	if (!ctx->entities[0]) {
		r = -ENOMEM;
		goto error_free_fences;
	}
102

103 104 105 106 107
	for (i = 0; i < num_entities; ++i) {
		struct amdgpu_ctx_entity *entity = &ctx->entities[0][i];

		entity->sequence = 1;
		entity->fences = &ctx->fences[amdgpu_sched_jobs * i];
108
	}
109 110 111 112 113 114 115
	for (i = 1; i < AMDGPU_HW_IP_NUM; ++i)
		ctx->entities[i] = ctx->entities[i - 1] +
			amdgpu_ctx_num_entities[i - 1];

	kref_init(&ctx->refcount);
	spin_lock_init(&ctx->ring_lock);
	mutex_init(&ctx->lock);
116 117

	ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
118
	ctx->reset_counter_query = ctx->reset_counter;
119
	ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
120
	ctx->init_priority = priority;
121
	ctx->override_priority = DRM_SCHED_PRIORITY_UNSET;
122

123
	for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
124 125 126
		struct drm_gpu_scheduler **scheds;
		struct drm_gpu_scheduler *sched;
		unsigned num_scheds = 0;
127 128 129

		switch (i) {
		case AMDGPU_HW_IP_GFX:
130 131
			scheds = adev->gfx.gfx_sched;
			num_scheds = 1;
132 133
			break;
		case AMDGPU_HW_IP_COMPUTE:
134 135
			scheds = adev->gfx.compute_sched;
			num_scheds = adev->gfx.num_compute_sched;
136 137
			break;
		case AMDGPU_HW_IP_DMA:
138 139
			scheds = adev->sdma.sdma_sched;
			num_scheds = adev->sdma.num_sdma_sched;
140 141
			break;
		case AMDGPU_HW_IP_UVD:
142 143 144
			sched = &adev->uvd.inst[0].ring.sched;
			scheds = &sched;
			num_scheds = 1;
145 146
			break;
		case AMDGPU_HW_IP_VCE:
147 148 149
			sched = &adev->vce.ring[0].sched;
			scheds = &sched;
			num_scheds = 1;
150 151
			break;
		case AMDGPU_HW_IP_UVD_ENC:
152 153 154
			sched = &adev->uvd.inst[0].ring_enc[0].sched;
			scheds = &sched;
			num_scheds = 1;
155 156
			break;
		case AMDGPU_HW_IP_VCN_DEC:
157 158
			scheds = adev->vcn.vcn_dec_sched;
			num_scheds =  adev->vcn.num_vcn_dec_sched;
159 160
			break;
		case AMDGPU_HW_IP_VCN_ENC:
161 162
			scheds = adev->vcn.vcn_enc_sched;
			num_scheds =  adev->vcn.num_vcn_enc_sched;
163 164
			break;
		case AMDGPU_HW_IP_VCN_JPEG:
165 166
			scheds = adev->jpeg.jpeg_sched;
			num_scheds =  adev->jpeg.num_jpeg_sched;
167 168
			break;
		}
M
Monk Liu 已提交
169

170 171
		for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j)
			r = drm_sched_entity_init(&ctx->entities[i][j].entity,
172 173
						  priority, scheds,
						  num_scheds, &ctx->guilty);
174
		if (r)
175
			goto error_cleanup_entities;
176 177
	}

A
Alex Deucher 已提交
178
	return 0;
179

180 181 182 183 184 185
error_cleanup_entities:
	for (i = 0; i < num_entities; ++i)
		drm_sched_entity_destroy(&ctx->entities[0][i].entity);
	kfree(ctx->entities[0]);

error_free_fences:
186 187 188
	kfree(ctx->fences);
	ctx->fences = NULL;
	return r;
A
Alex Deucher 已提交
189 190
}

191
static void amdgpu_ctx_fini(struct kref *ref)
A
Alex Deucher 已提交
192
{
193
	struct amdgpu_ctx *ctx = container_of(ref, struct amdgpu_ctx, refcount);
194
	unsigned num_entities = amdgpu_ctx_total_num_entities();
195 196 197
	struct amdgpu_device *adev = ctx->adev;
	unsigned i, j;

198 199 200
	if (!adev)
		return;

201
	for (i = 0; i < num_entities; ++i)
202
		for (j = 0; j < amdgpu_sched_jobs; ++j)
203
			dma_fence_put(ctx->entities[0][i].fences[j]);
204
	kfree(ctx->fences);
205
	kfree(ctx->entities[0]);
206

207
	mutex_destroy(&ctx->lock);
208 209

	kfree(ctx);
210 211
}

212 213
int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance,
			  u32 ring, struct drm_sched_entity **entity)
214
{
215 216 217 218
	if (hw_ip >= AMDGPU_HW_IP_NUM) {
		DRM_ERROR("unknown HW IP type: %d\n", hw_ip);
		return -EINVAL;
	}
219 220 221 222 223 224 225

	/* Right now all IPs have only one instance - multiple rings. */
	if (instance != 0) {
		DRM_DEBUG("invalid ip instance: %d\n", instance);
		return -EINVAL;
	}

226 227
	if (ring >= amdgpu_ctx_num_entities[hw_ip]) {
		DRM_DEBUG("invalid ring: %d %d\n", hw_ip, ring);
228 229 230
		return -EINVAL;
	}

231
	*entity = &ctx->entities[hw_ip][ring].entity;
232 233 234
	return 0;
}

235 236
static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
			    struct amdgpu_fpriv *fpriv,
237
			    struct drm_file *filp,
238
			    enum drm_sched_priority priority,
239 240 241
			    uint32_t *id)
{
	struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
A
Alex Deucher 已提交
242
	struct amdgpu_ctx *ctx;
243
	int r;
A
Alex Deucher 已提交
244

245 246 247 248 249
	ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
	if (!ctx)
		return -ENOMEM;

	mutex_lock(&mgr->lock);
250
	r = idr_alloc(&mgr->ctx_handles, ctx, 1, AMDGPU_VM_MAX_NUM_CTX, GFP_KERNEL);
251
	if (r < 0) {
252
		mutex_unlock(&mgr->lock);
253 254 255
		kfree(ctx);
		return r;
	}
256

257
	*id = (uint32_t)r;
258
	r = amdgpu_ctx_init(adev, priority, filp, ctx);
259 260 261 262 263
	if (r) {
		idr_remove(&mgr->ctx_handles, *id);
		*id = 0;
		kfree(ctx);
	}
264 265 266 267 268 269 270
	mutex_unlock(&mgr->lock);
	return r;
}

static void amdgpu_ctx_do_release(struct kref *ref)
{
	struct amdgpu_ctx *ctx;
271
	unsigned num_entities;
272
	u32 i;
273 274 275

	ctx = container_of(ref, struct amdgpu_ctx, refcount);

276
	num_entities = amdgpu_ctx_total_num_entities();
277 278
	for (i = 0; i < num_entities; i++)
		drm_sched_entity_destroy(&ctx->entities[0][i].entity);
279

280
	amdgpu_ctx_fini(ref);
281 282 283 284 285 286 287 288
}

static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id)
{
	struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
	struct amdgpu_ctx *ctx;

	mutex_lock(&mgr->lock);
289 290
	ctx = idr_remove(&mgr->ctx_handles, id);
	if (ctx)
291
		kref_put(&ctx->refcount, amdgpu_ctx_do_release);
292
	mutex_unlock(&mgr->lock);
293
	return ctx ? 0 : -EINVAL;
A
Alex Deucher 已提交
294 295
}

296 297 298
static int amdgpu_ctx_query(struct amdgpu_device *adev,
			    struct amdgpu_fpriv *fpriv, uint32_t id,
			    union drm_amdgpu_ctx_out *out)
A
Alex Deucher 已提交
299 300
{
	struct amdgpu_ctx *ctx;
301
	struct amdgpu_ctx_mgr *mgr;
302
	unsigned reset_counter;
A
Alex Deucher 已提交
303

304 305 306 307
	if (!fpriv)
		return -EINVAL;

	mgr = &fpriv->ctx_mgr;
308
	mutex_lock(&mgr->lock);
A
Alex Deucher 已提交
309
	ctx = idr_find(&mgr->ctx_handles, id);
310
	if (!ctx) {
311
		mutex_unlock(&mgr->lock);
312
		return -EINVAL;
A
Alex Deucher 已提交
313
	}
314 315

	/* TODO: these two are always zero */
316 317
	out->state.flags = 0x0;
	out->state.hangs = 0x0;
318 319 320 321

	/* determine if a GPU reset has occured since the last call */
	reset_counter = atomic_read(&adev->gpu_reset_counter);
	/* TODO: this should ideally return NO, GUILTY, or INNOCENT. */
322
	if (ctx->reset_counter_query == reset_counter)
323 324 325
		out->state.reset_status = AMDGPU_CTX_NO_RESET;
	else
		out->state.reset_status = AMDGPU_CTX_UNKNOWN_RESET;
326
	ctx->reset_counter_query = reset_counter;
327

328
	mutex_unlock(&mgr->lock);
329
	return 0;
A
Alex Deucher 已提交
330 331
}

M
Monk Liu 已提交
332 333 334 335 336 337
static int amdgpu_ctx_query2(struct amdgpu_device *adev,
	struct amdgpu_fpriv *fpriv, uint32_t id,
	union drm_amdgpu_ctx_out *out)
{
	struct amdgpu_ctx *ctx;
	struct amdgpu_ctx_mgr *mgr;
338
	unsigned long ras_counter;
M
Monk Liu 已提交
339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362

	if (!fpriv)
		return -EINVAL;

	mgr = &fpriv->ctx_mgr;
	mutex_lock(&mgr->lock);
	ctx = idr_find(&mgr->ctx_handles, id);
	if (!ctx) {
		mutex_unlock(&mgr->lock);
		return -EINVAL;
	}

	out->state.flags = 0x0;
	out->state.hangs = 0x0;

	if (ctx->reset_counter != atomic_read(&adev->gpu_reset_counter))
		out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RESET;

	if (ctx->vram_lost_counter != atomic_read(&adev->vram_lost_counter))
		out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_VRAMLOST;

	if (atomic_read(&ctx->guilty))
		out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_GUILTY;

363 364 365 366 367 368 369 370 371 372 373 374 375 376 377
	/*query ue count*/
	ras_counter = amdgpu_ras_query_error_count(adev, false);
	/*ras counter is monotonic increasing*/
	if (ras_counter != ctx->ras_counter_ue) {
		out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_UE;
		ctx->ras_counter_ue = ras_counter;
	}

	/*query ce count*/
	ras_counter = amdgpu_ras_query_error_count(adev, true);
	if (ras_counter != ctx->ras_counter_ce) {
		out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_CE;
		ctx->ras_counter_ce = ras_counter;
	}

M
Monk Liu 已提交
378 379 380 381
	mutex_unlock(&mgr->lock);
	return 0;
}

A
Alex Deucher 已提交
382
int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
383
		     struct drm_file *filp)
A
Alex Deucher 已提交
384 385 386
{
	int r;
	uint32_t id;
387
	enum drm_sched_priority priority;
A
Alex Deucher 已提交
388 389 390 391 392 393 394

	union drm_amdgpu_ctx *args = data;
	struct amdgpu_device *adev = dev->dev_private;
	struct amdgpu_fpriv *fpriv = filp->driver_priv;

	r = 0;
	id = args->in.ctx_id;
395 396
	priority = amdgpu_to_sched_priority(args->in.priority);

397 398
	/* For backwards compatibility reasons, we need to accept
	 * ioctls with garbage in the priority field */
399 400
	if (priority == DRM_SCHED_PRIORITY_INVALID)
		priority = DRM_SCHED_PRIORITY_NORMAL;
A
Alex Deucher 已提交
401 402

	switch (args->in.op) {
403
	case AMDGPU_CTX_OP_ALLOC_CTX:
404
		r = amdgpu_ctx_alloc(adev, fpriv, filp, priority, &id);
405 406 407 408 409 410 411 412
		args->out.alloc.ctx_id = id;
		break;
	case AMDGPU_CTX_OP_FREE_CTX:
		r = amdgpu_ctx_free(fpriv, id);
		break;
	case AMDGPU_CTX_OP_QUERY_STATE:
		r = amdgpu_ctx_query(adev, fpriv, id, &args->out);
		break;
M
Monk Liu 已提交
413 414 415
	case AMDGPU_CTX_OP_QUERY_STATE2:
		r = amdgpu_ctx_query2(adev, fpriv, id, &args->out);
		break;
416 417
	default:
		return -EINVAL;
A
Alex Deucher 已提交
418 419 420 421
	}

	return r;
}
422 423 424 425

struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id)
{
	struct amdgpu_ctx *ctx;
426 427 428 429 430 431
	struct amdgpu_ctx_mgr *mgr;

	if (!fpriv)
		return NULL;

	mgr = &fpriv->ctx_mgr;
432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448

	mutex_lock(&mgr->lock);
	ctx = idr_find(&mgr->ctx_handles, id);
	if (ctx)
		kref_get(&ctx->refcount);
	mutex_unlock(&mgr->lock);
	return ctx;
}

int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
{
	if (ctx == NULL)
		return -EINVAL;

	kref_put(&ctx->refcount, amdgpu_ctx_do_release);
	return 0;
}
449

450 451 452
void amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx,
			  struct drm_sched_entity *entity,
			  struct dma_fence *fence, uint64_t* handle)
453
{
454 455
	struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
	uint64_t seq = centity->sequence;
456
	struct dma_fence *other = NULL;
457
	unsigned idx = 0;
458

459
	idx = seq & (amdgpu_sched_jobs - 1);
460
	other = centity->fences[idx];
461 462
	if (other)
		BUG_ON(!dma_fence_is_signaled(other));
463

464
	dma_fence_get(fence);
465 466

	spin_lock(&ctx->ring_lock);
467 468
	centity->fences[idx] = fence;
	centity->sequence++;
469 470
	spin_unlock(&ctx->ring_lock);

471
	dma_fence_put(other);
472 473
	if (handle)
		*handle = seq;
474 475
}

476
struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
477 478
				       struct drm_sched_entity *entity,
				       uint64_t seq)
479
{
480
	struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
481
	struct dma_fence *fence;
482 483

	spin_lock(&ctx->ring_lock);
484

M
Monk Liu 已提交
485
	if (seq == ~0ull)
486
		seq = centity->sequence - 1;
M
Monk Liu 已提交
487

488
	if (seq >= centity->sequence) {
489 490 491 492
		spin_unlock(&ctx->ring_lock);
		return ERR_PTR(-EINVAL);
	}

493

494
	if (seq + amdgpu_sched_jobs < centity->sequence) {
495 496 497 498
		spin_unlock(&ctx->ring_lock);
		return NULL;
	}

499
	fence = dma_fence_get(centity->fences[seq & (amdgpu_sched_jobs - 1)]);
500 501 502 503
	spin_unlock(&ctx->ring_lock);

	return fence;
}
504

505
void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
506
				  enum drm_sched_priority priority)
507
{
508
	unsigned num_entities = amdgpu_ctx_total_num_entities();
509
	enum drm_sched_priority ctx_prio;
510
	unsigned i;
511 512 513

	ctx->override_priority = priority;

514
	ctx_prio = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ?
515 516
			ctx->init_priority : ctx->override_priority;

517 518
	for (i = 0; i < num_entities; i++) {
		struct drm_sched_entity *entity = &ctx->entities[0][i].entity;
519

520
		drm_sched_entity_set_priority(entity, ctx_prio);
521 522 523
	}
}

524 525
int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx,
			       struct drm_sched_entity *entity)
526
{
527
	struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
528 529 530
	struct dma_fence *other;
	unsigned idx;
	long r;
531

532 533 534 535
	spin_lock(&ctx->ring_lock);
	idx = centity->sequence & (amdgpu_sched_jobs - 1);
	other = dma_fence_get(centity->fences[idx]);
	spin_unlock(&ctx->ring_lock);
536

537 538
	if (!other)
		return 0;
539

540 541 542 543 544 545
	r = dma_fence_wait(other, true);
	if (r < 0 && r != -ERESTARTSYS)
		DRM_ERROR("Error (%ld) waiting for fence!\n", r);

	dma_fence_put(other);
	return r;
546 547
}

548 549 550 551 552 553
void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
{
	mutex_init(&mgr->lock);
	idr_init(&mgr->ctx_handles);
}

554
long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout)
555
{
556
	unsigned num_entities = amdgpu_ctx_total_num_entities();
557 558 559 560 561 562
	struct amdgpu_ctx *ctx;
	struct idr *idp;
	uint32_t id, i;

	idp = &mgr->ctx_handles;

563
	mutex_lock(&mgr->lock);
564
	idr_for_each_entry(idp, ctx, id) {
565 566
		for (i = 0; i < num_entities; i++) {
			struct drm_sched_entity *entity;
567

568
			entity = &ctx->entities[0][i].entity;
569
			timeout = drm_sched_entity_flush(entity, timeout);
570
		}
571
	}
572
	mutex_unlock(&mgr->lock);
573
	return timeout;
574 575
}

576
void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
577
{
578
	unsigned num_entities = amdgpu_ctx_total_num_entities();
579 580 581 582 583 584 585
	struct amdgpu_ctx *ctx;
	struct idr *idp;
	uint32_t id, i;

	idp = &mgr->ctx_handles;

	idr_for_each_entry(idp, ctx, id) {
586 587 588
		if (kref_read(&ctx->refcount) != 1) {
			DRM_ERROR("ctx %p is still alive\n", ctx);
			continue;
589
		}
590

591
		for (i = 0; i < num_entities; i++)
592
			drm_sched_entity_fini(&ctx->entities[0][i].entity);
593 594 595
	}
}

596 597 598 599 600 601
void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
{
	struct amdgpu_ctx *ctx;
	struct idr *idp;
	uint32_t id;

602
	amdgpu_ctx_mgr_entity_fini(mgr);
603

604 605 606
	idp = &mgr->ctx_handles;

	idr_for_each_entry(idp, ctx, id) {
607
		if (kref_put(&ctx->refcount, amdgpu_ctx_fini) != 1)
608 609 610 611 612 613
			DRM_ERROR("ctx %p is still alive\n", ctx);
	}

	idr_destroy(&mgr->ctx_handles);
	mutex_destroy(&mgr->lock);
}
614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655

void amdgpu_ctx_init_sched(struct amdgpu_device *adev)
{
	int i, j;

	for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
		adev->gfx.gfx_sched[i] = &adev->gfx.gfx_ring[i].sched;
		adev->gfx.num_gfx_sched++;
	}

	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
		adev->gfx.compute_sched[i] = &adev->gfx.compute_ring[i].sched;
		adev->gfx.num_compute_sched++;
	}

	for (i = 0; i < adev->sdma.num_instances; i++) {
		adev->sdma.sdma_sched[i] = &adev->sdma.instance[i].ring.sched;
		adev->sdma.num_sdma_sched++;
	}

	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
		if (adev->vcn.harvest_config & (1 << i))
			continue;
		adev->vcn.vcn_dec_sched[adev->vcn.num_vcn_dec_sched++] =
			&adev->vcn.inst[i].ring_dec.sched;
	}

	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
		if (adev->vcn.harvest_config & (1 << i))
			continue;
		for (j = 0; j < adev->vcn.num_enc_rings; ++j)
			adev->vcn.vcn_enc_sched[adev->vcn.num_vcn_enc_sched++] =
				&adev->vcn.inst[i].ring_enc[j].sched;
	}

	for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
		if (adev->jpeg.harvest_config & (1 << i))
			continue;
		adev->jpeg.jpeg_sched[adev->jpeg.num_jpeg_sched++] =
			&adev->jpeg.inst[i].ring_dec.sched;
	}
}