gpu_scheduler.c 18.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
/*
 * Copyright 2015 Advanced Micro Devices, Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 *
 */
#include <linux/kthread.h>
#include <linux/wait.h>
#include <linux/sched.h>
27
#include <uapi/linux/sched/types.h>
28 29 30
#include <drm/drmP.h>
#include "gpu_scheduler.h"

31 32 33
#define CREATE_TRACE_POINTS
#include "gpu_sched_trace.h"

34
static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity);
35
static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
36
static void amd_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb);
37

38
/* Initialize a given run queue struct */
39
static void amd_sched_rq_init(struct amd_sched_rq *rq)
40
{
41
	spin_lock_init(&rq->lock);
42 43
	INIT_LIST_HEAD(&rq->entities);
	rq->current_entity = NULL;
44 45
}

46 47
static void amd_sched_rq_add_entity(struct amd_sched_rq *rq,
				    struct amd_sched_entity *entity)
48
{
49 50
	if (!list_empty(&entity->list))
		return;
51
	spin_lock(&rq->lock);
52
	list_add_tail(&entity->list, &rq->entities);
53
	spin_unlock(&rq->lock);
54 55
}

56 57
static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq,
				       struct amd_sched_entity *entity)
58
{
59 60
	if (list_empty(&entity->list))
		return;
61
	spin_lock(&rq->lock);
62 63 64
	list_del_init(&entity->list);
	if (rq->current_entity == entity)
		rq->current_entity = NULL;
65
	spin_unlock(&rq->lock);
66 67 68
}

/**
69 70 71 72 73
 * Select an entity which could provide a job to run
 *
 * @rq		The run queue to check.
 *
 * Try to find a ready entity, returns NULL if none found.
74
 */
75 76
static struct amd_sched_entity *
amd_sched_rq_select_entity(struct amd_sched_rq *rq)
77
{
78
	struct amd_sched_entity *entity;
79

80 81 82
	spin_lock(&rq->lock);

	entity = rq->current_entity;
83 84
	if (entity) {
		list_for_each_entry_continue(entity, &rq->entities, list) {
85
			if (amd_sched_entity_is_ready(entity)) {
86
				rq->current_entity = entity;
87
				spin_unlock(&rq->lock);
88
				return entity;
89
			}
90 91 92
		}
	}

93
	list_for_each_entry(entity, &rq->entities, list) {
94

95
		if (amd_sched_entity_is_ready(entity)) {
96
			rq->current_entity = entity;
97
			spin_unlock(&rq->lock);
98
			return entity;
99
		}
100

101 102 103
		if (entity == rq->current_entity)
			break;
	}
104

105 106
	spin_unlock(&rq->lock);

107
	return NULL;
108 109 110 111 112 113
}

/**
 * Init a context entity used by scheduler when submit to HW ring.
 *
 * @sched	The pointer to the scheduler
114
 * @entity	The pointer to a valid amd_sched_entity
115
 * @rq		The run queue this entity belongs
116
 * @kernel	If this is an entity for the kernel
117
 * @jobs	The max number of jobs in the job queue
118 119 120
 *
 * return 0 if succeed. negative error code on failure
*/
121
int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
122
			  struct amd_sched_entity *entity,
123
			  struct amd_sched_rq *rq,
124
			  uint32_t jobs)
125
{
126 127
	int r;

128 129 130
	if (!(sched && entity && rq))
		return -EINVAL;

131
	memset(entity, 0, sizeof(struct amd_sched_entity));
132 133 134
	INIT_LIST_HEAD(&entity->list);
	entity->rq = rq;
	entity->sched = sched;
135

136
	spin_lock_init(&entity->rq_lock);
137
	spin_lock_init(&entity->queue_lock);
138 139 140 141
	r = kfifo_alloc(&entity->job_queue, jobs * sizeof(void *), GFP_KERNEL);
	if (r)
		return r;

142
	atomic_set(&entity->fence_seq, 0);
143
	entity->fence_context = dma_fence_context_alloc(2);
144 145 146 147 148 149 150 151 152 153 154 155

	return 0;
}

/**
 * Query if entity is initialized
 *
 * @sched       Pointer to scheduler instance
 * @entity	The pointer to a valid scheduler entity
 *
 * return true if entity is initialized, false otherwise
*/
156 157
static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler *sched,
					    struct amd_sched_entity *entity)
158
{
159 160
	return entity->sched == sched &&
		entity->rq != NULL;
161 162
}

163 164 165 166 167 168 169 170
/**
 * Check if entity is idle
 *
 * @entity	The pointer to a valid scheduler entity
 *
 * Return true if entity don't has any unscheduled jobs.
 */
static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity)
171
{
172 173
	rmb();
	if (kfifo_is_empty(&entity->job_queue))
174 175 176 177 178
		return true;

	return false;
}

179 180 181 182 183 184 185 186 187 188 189 190
/**
 * Check if entity is ready
 *
 * @entity	The pointer to a valid scheduler entity
 *
 * Return true if entity could provide a job.
 */
static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity)
{
	if (kfifo_is_empty(&entity->job_queue))
		return false;

191
	if (READ_ONCE(entity->dependency))
192 193 194 195 196
		return false;

	return true;
}

197 198 199 200 201 202
/**
 * Destroy a context entity
 *
 * @sched       Pointer to scheduler instance
 * @entity	The pointer to a valid scheduler entity
 *
203
 * Cleanup and free the allocated resources.
204
 */
205 206
void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
			   struct amd_sched_entity *entity)
207
{
208
	int r;
209

210
	if (!amd_sched_entity_is_initialized(sched, entity))
211
		return;
212 213
	/**
	 * The client will not queue more IBs during this fini, consume existing
214
	 * queued IBs or discard them on SIGKILL
215
	*/
216 217 218 219 220
	if ((current->flags & PF_SIGNALED) && current->exit_code == SIGKILL)
		r = -ERESTARTSYS;
	else
		r = wait_event_killable(sched->job_scheduled,
					amd_sched_entity_is_idle(entity));
221
	amd_sched_entity_set_rq(entity, NULL);
222 223 224 225 226 227 228 229
	if (r) {
		struct amd_sched_job *job;

		/* Park the kernel for a moment to make sure it isn't processing
		 * our enity.
		 */
		kthread_park(sched->thread);
		kthread_unpark(sched->thread);
230 231 232 233 234 235
		while (kfifo_out(&entity->job_queue, &job, sizeof(job))) {
			struct amd_sched_fence *s_fence = job->s_fence;
			amd_sched_fence_scheduled(s_fence);
			dma_fence_set_error(&s_fence->finished, -ESRCH);
			amd_sched_fence_finished(s_fence);
			dma_fence_put(&s_fence->finished);
236
			sched->ops->free_job(job);
237
		}
238

239
	}
240 241 242
	kfifo_free(&entity->job_queue);
}

243
static void amd_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb)
244 245 246 247
{
	struct amd_sched_entity *entity =
		container_of(cb, struct amd_sched_entity, cb);
	entity->dependency = NULL;
248
	dma_fence_put(f);
249
	amd_sched_wakeup(entity->sched);
250 251
}

252
static void amd_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb *cb)
253 254 255 256
{
	struct amd_sched_entity *entity =
		container_of(cb, struct amd_sched_entity, cb);
	entity->dependency = NULL;
257
	dma_fence_put(f);
258 259
}

260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277
void amd_sched_entity_set_rq(struct amd_sched_entity *entity,
			     struct amd_sched_rq *rq)
{
	if (entity->rq == rq)
		return;

	spin_lock(&entity->rq_lock);

	if (entity->rq)
		amd_sched_rq_remove_entity(entity->rq, entity);

	entity->rq = rq;
	if (rq)
		amd_sched_rq_add_entity(rq, entity);

	spin_unlock(&entity->rq_lock);
}

C
Chunming Zhou 已提交
278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294
bool amd_sched_dependency_optimized(struct dma_fence* fence,
				    struct amd_sched_entity *entity)
{
	struct amd_gpu_scheduler *sched = entity->sched;
	struct amd_sched_fence *s_fence;

	if (!fence || dma_fence_is_signaled(fence))
		return false;
	if (fence->context == entity->fence_context)
		return true;
	s_fence = to_amd_sched_fence(fence);
	if (s_fence && s_fence->sched == sched)
		return true;

	return false;
}

295 296 297
static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)
{
	struct amd_gpu_scheduler *sched = entity->sched;
298
	struct dma_fence * fence = entity->dependency;
299 300 301 302
	struct amd_sched_fence *s_fence;

	if (fence->context == entity->fence_context) {
		/* We can ignore fences from ourself */
303
		dma_fence_put(entity->dependency);
304 305 306 307 308 309
		return false;
	}

	s_fence = to_amd_sched_fence(fence);
	if (s_fence && s_fence->sched == sched) {

310 311 312 313
		/*
		 * Fence is from the same scheduler, only need to wait for
		 * it to be scheduled
		 */
314 315
		fence = dma_fence_get(&s_fence->scheduled);
		dma_fence_put(entity->dependency);
316
		entity->dependency = fence;
317 318
		if (!dma_fence_add_callback(fence, &entity->cb,
					    amd_sched_entity_clear_dep))
319 320 321
			return true;

		/* Ignore it when it is already scheduled */
322
		dma_fence_put(fence);
323
		return false;
324 325
	}

326 327
	if (!dma_fence_add_callback(entity->dependency, &entity->cb,
				    amd_sched_entity_wakeup))
328 329
		return true;

330
	dma_fence_put(entity->dependency);
331 332 333
	return false;
}

334
static struct amd_sched_job *
335
amd_sched_entity_peek_job(struct amd_sched_entity *entity)
336
{
337
	struct amd_gpu_scheduler *sched = entity->sched;
338
	struct amd_sched_job *sched_job;
339

340
	if (!kfifo_out_peek(&entity->job_queue, &sched_job, sizeof(sched_job)))
341 342
		return NULL;

343 344
	while ((entity->dependency = sched->ops->dependency(sched_job)))
		if (amd_sched_entity_add_dependency_cb(entity))
345 346
			return NULL;

347
	return sched_job;
348 349
}

350
/**
351
 * Helper to submit a job to the job queue
352
 *
353
 * @sched_job		The pointer to job required to submit
354 355 356
 *
 * Returns true if we could submit the job.
 */
357
static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
358
{
359
	struct amd_gpu_scheduler *sched = sched_job->sched;
360
	struct amd_sched_entity *entity = sched_job->s_entity;
361 362 363
	bool added, first = false;

	spin_lock(&entity->queue_lock);
364 365
	added = kfifo_in(&entity->job_queue, &sched_job,
			sizeof(sched_job)) == sizeof(sched_job);
366

367
	if (added && kfifo_len(&entity->job_queue) == sizeof(sched_job))
368 369 370 371 372
		first = true;

	spin_unlock(&entity->queue_lock);

	/* first job wakes up scheduler */
373 374
	if (first) {
		/* Add the entity to the run queue */
375
		spin_lock(&entity->rq_lock);
376
		amd_sched_rq_add_entity(entity->rq, entity);
377
		spin_unlock(&entity->rq_lock);
378
		amd_sched_wakeup(sched);
379
	}
380 381 382
	return added;
}

383
/* job_finish is called after hw fence signaled
384
 */
385
static void amd_sched_job_finish(struct work_struct *work)
386
{
387 388
	struct amd_sched_job *s_job = container_of(work, struct amd_sched_job,
						   finish_work);
389 390
	struct amd_gpu_scheduler *sched = s_job->sched;

391
	/* remove job from ring_mirror_list */
392
	spin_lock(&sched->job_list_lock);
393
	list_del_init(&s_job->node);
394
	if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
395 396
		struct amd_sched_job *next;

397
		spin_unlock(&sched->job_list_lock);
398
		cancel_delayed_work_sync(&s_job->work_tdr);
399
		spin_lock(&sched->job_list_lock);
400 401 402 403 404

		/* queue TDR for next job */
		next = list_first_entry_or_null(&sched->ring_mirror_list,
						struct amd_sched_job, node);

405
		if (next)
406 407
			schedule_delayed_work(&next->work_tdr, sched->timeout);
	}
408
	spin_unlock(&sched->job_list_lock);
409
	dma_fence_put(&s_job->s_fence->finished);
410 411 412
	sched->ops->free_job(s_job);
}

413 414
static void amd_sched_job_finish_cb(struct dma_fence *f,
				    struct dma_fence_cb *cb)
415 416 417 418
{
	struct amd_sched_job *job = container_of(cb, struct amd_sched_job,
						 finish_cb);
	schedule_work(&job->finish_work);
419 420
}

421
static void amd_sched_job_begin(struct amd_sched_job *s_job)
422 423 424
{
	struct amd_gpu_scheduler *sched = s_job->sched;

425 426 427
	dma_fence_add_callback(&s_job->s_fence->finished, &s_job->finish_cb,
			       amd_sched_job_finish_cb);

428
	spin_lock(&sched->job_list_lock);
429
	list_add_tail(&s_job->node, &sched->ring_mirror_list);
430
	if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
431 432
	    list_first_entry_or_null(&sched->ring_mirror_list,
				     struct amd_sched_job, node) == s_job)
433
		schedule_delayed_work(&s_job->work_tdr, sched->timeout);
434
	spin_unlock(&sched->job_list_lock);
435 436
}

437 438 439 440 441 442 443 444
static void amd_sched_job_timedout(struct work_struct *work)
{
	struct amd_sched_job *job = container_of(work, struct amd_sched_job,
						 work_tdr.work);

	job->sched->ops->timedout_job(job);
}

445 446 447 448 449 450
void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched)
{
	struct amd_sched_job *s_job;

	spin_lock(&sched->job_list_lock);
	list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) {
451 452 453
		if (s_job->s_fence->parent &&
		    dma_fence_remove_callback(s_job->s_fence->parent,
					      &s_job->s_fence->cb)) {
454
			dma_fence_put(s_job->s_fence->parent);
455
			s_job->s_fence->parent = NULL;
456
			atomic_dec(&sched->hw_rq_count);
457 458
		}
	}
459 460 461 462 463 464 465 466 467
	spin_unlock(&sched->job_list_lock);
}

void amd_sched_job_kickout(struct amd_sched_job *s_job)
{
	struct amd_gpu_scheduler *sched = s_job->sched;

	spin_lock(&sched->job_list_lock);
	list_del_init(&s_job->node);
468 469 470
	spin_unlock(&sched->job_list_lock);
}

471 472
void amd_sched_job_recovery(struct amd_gpu_scheduler *sched)
{
473
	struct amd_sched_job *s_job, *tmp;
474 475 476 477 478
	int r;

	spin_lock(&sched->job_list_lock);
	s_job = list_first_entry_or_null(&sched->ring_mirror_list,
					 struct amd_sched_job, node);
479
	if (s_job && sched->timeout != MAX_SCHEDULE_TIMEOUT)
480 481
		schedule_delayed_work(&s_job->work_tdr, sched->timeout);

482
	list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
483
		struct amd_sched_fence *s_fence = s_job->s_fence;
484
		struct dma_fence *fence;
485

486 487
		spin_unlock(&sched->job_list_lock);
		fence = sched->ops->run_job(s_job);
488
		atomic_inc(&sched->hw_rq_count);
489
		if (fence) {
490 491 492
			s_fence->parent = dma_fence_get(fence);
			r = dma_fence_add_callback(fence, &s_fence->cb,
						   amd_sched_process_job);
493 494 495 496 497
			if (r == -ENOENT)
				amd_sched_process_job(fence, &s_fence->cb);
			else if (r)
				DRM_ERROR("fence add callback failed (%d)\n",
					  r);
498
			dma_fence_put(fence);
499 500 501 502
		} else {
			DRM_ERROR("Failed to run job!\n");
			amd_sched_process_job(NULL, &s_fence->cb);
		}
503
		spin_lock(&sched->job_list_lock);
504 505 506 507
	}
	spin_unlock(&sched->job_list_lock);
}

508 509 510
/**
 * Submit a job to the job queue
 *
511
 * @sched_job		The pointer to job required to submit
512 513 514
 *
 * Returns 0 for success, negative error code otherwise.
 */
515
void amd_sched_entity_push_job(struct amd_sched_job *sched_job)
516 517 518
{
	struct amd_sched_entity *entity = sched_job->s_entity;

519
	trace_amd_sched_job(sched_job);
520
	wait_event(entity->sched->job_scheduled,
521
		   amd_sched_entity_in(sched_job));
522 523
}

524 525
/* init a sched_job with basic field */
int amd_sched_job_init(struct amd_sched_job *job,
526 527
		       struct amd_gpu_scheduler *sched,
		       struct amd_sched_entity *entity,
528
		       void *owner)
529 530 531
{
	job->sched = sched;
	job->s_entity = entity;
532
	job->s_priority = entity->rq - sched->sched_rq;
533 534 535
	job->s_fence = amd_sched_fence_create(entity, owner);
	if (!job->s_fence)
		return -ENOMEM;
536
	job->id = atomic64_inc_return(&sched->job_id_count);
537

538 539
	INIT_WORK(&job->finish_work, amd_sched_job_finish);
	INIT_LIST_HEAD(&job->node);
540
	INIT_DELAYED_WORK(&job->work_tdr, amd_sched_job_timedout);
541

542 543 544
	return 0;
}

545 546 547 548 549 550 551 552 553
/**
 * Return ture if we can push more jobs to the hw.
 */
static bool amd_sched_ready(struct amd_gpu_scheduler *sched)
{
	return atomic_read(&sched->hw_rq_count) <
		sched->hw_submission_limit;
}

554 555 556 557 558 559
/**
 * Wake up the scheduler when it is ready
 */
static void amd_sched_wakeup(struct amd_gpu_scheduler *sched)
{
	if (amd_sched_ready(sched))
560
		wake_up_interruptible(&sched->wake_up_worker);
561 562
}

563
/**
564
 * Select next entity to process
565
*/
566 567
static struct amd_sched_entity *
amd_sched_select_entity(struct amd_gpu_scheduler *sched)
568
{
569
	struct amd_sched_entity *entity;
570
	int i;
571 572 573 574 575

	if (!amd_sched_ready(sched))
		return NULL;

	/* Kernel run queue has higher priority than normal run queue*/
576
	for (i = AMD_SCHED_PRIORITY_MAX - 1; i >= AMD_SCHED_PRIORITY_MIN; i--) {
577 578 579 580
		entity = amd_sched_rq_select_entity(&sched->sched_rq[i]);
		if (entity)
			break;
	}
581

582
	return entity;
583 584
}

585
static void amd_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
586
{
587 588
	struct amd_sched_fence *s_fence =
		container_of(cb, struct amd_sched_fence, cb);
589
	struct amd_gpu_scheduler *sched = s_fence->sched;
590

591
	dma_fence_get(&s_fence->finished);
592
	atomic_dec(&sched->hw_rq_count);
593
	amd_sched_fence_finished(s_fence);
M
Monk Liu 已提交
594

595
	trace_amd_sched_process_job(s_fence);
596
	dma_fence_put(&s_fence->finished);
597
	wake_up_interruptible(&sched->wake_up_worker);
598 599
}

600 601 602 603 604 605 606 607 608 609
static bool amd_sched_blocked(struct amd_gpu_scheduler *sched)
{
	if (kthread_should_park()) {
		kthread_parkme();
		return true;
	}

	return false;
}

610 611 612 613
static int amd_sched_main(void *param)
{
	struct sched_param sparam = {.sched_priority = 1};
	struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param;
614
	int r, count;
615 616 617 618

	sched_setscheduler(current, SCHED_FIFO, &sparam);

	while (!kthread_should_stop()) {
619
		struct amd_sched_entity *entity = NULL;
620
		struct amd_sched_fence *s_fence;
621
		struct amd_sched_job *sched_job;
622
		struct dma_fence *fence;
623

624
		wait_event_interruptible(sched->wake_up_worker,
625 626 627
					 (!amd_sched_blocked(sched) &&
					  (entity = amd_sched_select_entity(sched))) ||
					 kthread_should_stop());
628

629 630 631
		if (!entity)
			continue;

632
		sched_job = amd_sched_entity_peek_job(entity);
633
		if (!sched_job)
634 635
			continue;

636
		s_fence = sched_job->s_fence;
637

638
		atomic_inc(&sched->hw_rq_count);
639 640
		amd_sched_job_begin(sched_job);

641
		fence = sched->ops->run_job(sched_job);
642
		amd_sched_fence_scheduled(s_fence);
643

644
		if (fence) {
645 646 647
			s_fence->parent = dma_fence_get(fence);
			r = dma_fence_add_callback(fence, &s_fence->cb,
						   amd_sched_process_job);
648
			if (r == -ENOENT)
649
				amd_sched_process_job(fence, &s_fence->cb);
650
			else if (r)
651 652
				DRM_ERROR("fence add callback failed (%d)\n",
					  r);
653
			dma_fence_put(fence);
654 655
		} else {
			DRM_ERROR("Failed to run job!\n");
656
			amd_sched_process_job(NULL, &s_fence->cb);
657
		}
658

659 660 661
		count = kfifo_out(&entity->job_queue, &sched_job,
				sizeof(sched_job));
		WARN_ON(count != sizeof(sched_job));
662
		wake_up(&sched->job_scheduled);
663 664 665 666 667
	}
	return 0;
}

/**
668
 * Init a gpu scheduler instance
669
 *
670
 * @sched		The pointer to the scheduler
671 672
 * @ops			The backend operations for this scheduler.
 * @hw_submissions	Number of hw submissions to do.
673
 * @name		Name used for debugging
674
 *
675
 * Return 0 on success, otherwise error code.
676
*/
677
int amd_sched_init(struct amd_gpu_scheduler *sched,
678
		   const struct amd_sched_backend_ops *ops,
M
Monk Liu 已提交
679 680 681 682
		   unsigned hw_submission,
		   unsigned hang_limit,
		   long timeout,
		   const char *name)
683
{
684
	int i;
685
	sched->ops = ops;
686
	sched->hw_submission_limit = hw_submission;
687
	sched->name = name;
688
	sched->timeout = timeout;
M
Monk Liu 已提交
689
	sched->hang_limit = hang_limit;
690
	for (i = AMD_SCHED_PRIORITY_MIN; i < AMD_SCHED_PRIORITY_MAX; i++)
691
		amd_sched_rq_init(&sched->sched_rq[i]);
692

693 694
	init_waitqueue_head(&sched->wake_up_worker);
	init_waitqueue_head(&sched->job_scheduled);
695 696
	INIT_LIST_HEAD(&sched->ring_mirror_list);
	spin_lock_init(&sched->job_list_lock);
697
	atomic_set(&sched->hw_rq_count, 0);
698
	atomic64_set(&sched->job_id_count, 0);
699

700
	/* Each scheduler will run on a seperate kernel thread */
701
	sched->thread = kthread_run(amd_sched_main, sched, sched->name);
702
	if (IS_ERR(sched->thread)) {
703 704
		DRM_ERROR("Failed to create scheduler for %s.\n", name);
		return PTR_ERR(sched->thread);
705 706
	}

707
	return 0;
708 709 710 711 712 713 714
}

/**
 * Destroy a gpu scheduler
 *
 * @sched	The pointer to the scheduler
 */
715
void amd_sched_fini(struct amd_gpu_scheduler *sched)
716
{
717 718
	if (sched->thread)
		kthread_stop(sched->thread);
719
}