gpu_scheduler.c 19.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
/*
 * Copyright 2015 Advanced Micro Devices, Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 *
 */
#include <linux/kthread.h>
#include <linux/wait.h>
#include <linux/sched.h>
27
#include <uapi/linux/sched/types.h>
28 29 30
#include <drm/drmP.h>
#include "gpu_scheduler.h"

31 32
#include "spsc_queue.h"

33 34 35
#define CREATE_TRACE_POINTS
#include "gpu_sched_trace.h"

36 37 38
#define to_amd_sched_job(sched_job)		\
		container_of((sched_job), struct amd_sched_job, queue_node)

39
static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity);
40
static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
41
static void amd_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb);
42

43
/* Initialize a given run queue struct */
44
static void amd_sched_rq_init(struct amd_sched_rq *rq)
45
{
46
	spin_lock_init(&rq->lock);
47 48
	INIT_LIST_HEAD(&rq->entities);
	rq->current_entity = NULL;
49 50
}

51 52
static void amd_sched_rq_add_entity(struct amd_sched_rq *rq,
				    struct amd_sched_entity *entity)
53
{
54 55
	if (!list_empty(&entity->list))
		return;
56
	spin_lock(&rq->lock);
57
	list_add_tail(&entity->list, &rq->entities);
58
	spin_unlock(&rq->lock);
59 60
}

61 62
static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq,
				       struct amd_sched_entity *entity)
63
{
64 65
	if (list_empty(&entity->list))
		return;
66
	spin_lock(&rq->lock);
67 68 69
	list_del_init(&entity->list);
	if (rq->current_entity == entity)
		rq->current_entity = NULL;
70
	spin_unlock(&rq->lock);
71 72 73
}

/**
74 75 76 77 78
 * Select an entity which could provide a job to run
 *
 * @rq		The run queue to check.
 *
 * Try to find a ready entity, returns NULL if none found.
79
 */
80 81
static struct amd_sched_entity *
amd_sched_rq_select_entity(struct amd_sched_rq *rq)
82
{
83
	struct amd_sched_entity *entity;
84

85 86 87
	spin_lock(&rq->lock);

	entity = rq->current_entity;
88 89
	if (entity) {
		list_for_each_entry_continue(entity, &rq->entities, list) {
90
			if (amd_sched_entity_is_ready(entity)) {
91
				rq->current_entity = entity;
92
				spin_unlock(&rq->lock);
93
				return entity;
94
			}
95 96 97
		}
	}

98
	list_for_each_entry(entity, &rq->entities, list) {
99

100
		if (amd_sched_entity_is_ready(entity)) {
101
			rq->current_entity = entity;
102
			spin_unlock(&rq->lock);
103
			return entity;
104
		}
105

106 107 108
		if (entity == rq->current_entity)
			break;
	}
109

110 111
	spin_unlock(&rq->lock);

112
	return NULL;
113 114 115 116 117 118
}

/**
 * Init a context entity used by scheduler when submit to HW ring.
 *
 * @sched	The pointer to the scheduler
119
 * @entity	The pointer to a valid amd_sched_entity
120
 * @rq		The run queue this entity belongs
121
 * @kernel	If this is an entity for the kernel
122
 * @jobs	The max number of jobs in the job queue
123 124 125
 *
 * return 0 if succeed. negative error code on failure
*/
126
int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
127
			  struct amd_sched_entity *entity,
128
			  struct amd_sched_rq *rq,
129
			  uint32_t jobs, atomic_t *guilty)
130 131 132 133
{
	if (!(sched && entity && rq))
		return -EINVAL;

134
	memset(entity, 0, sizeof(struct amd_sched_entity));
135 136 137
	INIT_LIST_HEAD(&entity->list);
	entity->rq = rq;
	entity->sched = sched;
138
	entity->guilty = guilty;
139

140
	spin_lock_init(&entity->rq_lock);
141
	spin_lock_init(&entity->queue_lock);
142
	spsc_queue_init(&entity->job_queue);
143

144
	atomic_set(&entity->fence_seq, 0);
145
	entity->fence_context = dma_fence_context_alloc(2);
146 147 148 149 150 151 152 153 154 155 156 157

	return 0;
}

/**
 * Query if entity is initialized
 *
 * @sched       Pointer to scheduler instance
 * @entity	The pointer to a valid scheduler entity
 *
 * return true if entity is initialized, false otherwise
*/
158 159
static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler *sched,
					    struct amd_sched_entity *entity)
160
{
161 162
	return entity->sched == sched &&
		entity->rq != NULL;
163 164
}

165 166 167 168 169 170 171 172
/**
 * Check if entity is idle
 *
 * @entity	The pointer to a valid scheduler entity
 *
 * Return true if entity don't has any unscheduled jobs.
 */
static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity)
173
{
174
	rmb();
175
	if (spsc_queue_peek(&entity->job_queue) == NULL)
176 177 178 179 180
		return true;

	return false;
}

181 182 183 184 185 186 187 188 189
/**
 * Check if entity is ready
 *
 * @entity	The pointer to a valid scheduler entity
 *
 * Return true if entity could provide a job.
 */
static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity)
{
190
	if (spsc_queue_peek(&entity->job_queue) == NULL)
191 192
		return false;

193
	if (READ_ONCE(entity->dependency))
194 195 196 197 198
		return false;

	return true;
}

199 200 201 202 203 204
/**
 * Destroy a context entity
 *
 * @sched       Pointer to scheduler instance
 * @entity	The pointer to a valid scheduler entity
 *
205
 * Cleanup and free the allocated resources.
206
 */
207 208
void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
			   struct amd_sched_entity *entity)
209
{
210
	int r;
211

212
	if (!amd_sched_entity_is_initialized(sched, entity))
213
		return;
214 215
	/**
	 * The client will not queue more IBs during this fini, consume existing
216
	 * queued IBs or discard them on SIGKILL
217
	*/
218 219 220 221 222
	if ((current->flags & PF_SIGNALED) && current->exit_code == SIGKILL)
		r = -ERESTARTSYS;
	else
		r = wait_event_killable(sched->job_scheduled,
					amd_sched_entity_is_idle(entity));
223
	amd_sched_entity_set_rq(entity, NULL);
224 225 226 227 228 229 230 231
	if (r) {
		struct amd_sched_job *job;

		/* Park the kernel for a moment to make sure it isn't processing
		 * our enity.
		 */
		kthread_park(sched->thread);
		kthread_unpark(sched->thread);
232
		while ((job = to_amd_sched_job(spsc_queue_pop(&entity->job_queue)))) {
233 234 235 236 237
			struct amd_sched_fence *s_fence = job->s_fence;
			amd_sched_fence_scheduled(s_fence);
			dma_fence_set_error(&s_fence->finished, -ESRCH);
			amd_sched_fence_finished(s_fence);
			dma_fence_put(&s_fence->finished);
238
			sched->ops->free_job(job);
239
		}
240
	}
241 242
}

243
static void amd_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb)
244 245 246 247
{
	struct amd_sched_entity *entity =
		container_of(cb, struct amd_sched_entity, cb);
	entity->dependency = NULL;
248
	dma_fence_put(f);
249
	amd_sched_wakeup(entity->sched);
250 251
}

252
static void amd_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb *cb)
253 254 255 256
{
	struct amd_sched_entity *entity =
		container_of(cb, struct amd_sched_entity, cb);
	entity->dependency = NULL;
257
	dma_fence_put(f);
258 259
}

260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277
void amd_sched_entity_set_rq(struct amd_sched_entity *entity,
			     struct amd_sched_rq *rq)
{
	if (entity->rq == rq)
		return;

	spin_lock(&entity->rq_lock);

	if (entity->rq)
		amd_sched_rq_remove_entity(entity->rq, entity);

	entity->rq = rq;
	if (rq)
		amd_sched_rq_add_entity(rq, entity);

	spin_unlock(&entity->rq_lock);
}

C
Chunming Zhou 已提交
278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294
bool amd_sched_dependency_optimized(struct dma_fence* fence,
				    struct amd_sched_entity *entity)
{
	struct amd_gpu_scheduler *sched = entity->sched;
	struct amd_sched_fence *s_fence;

	if (!fence || dma_fence_is_signaled(fence))
		return false;
	if (fence->context == entity->fence_context)
		return true;
	s_fence = to_amd_sched_fence(fence);
	if (s_fence && s_fence->sched == sched)
		return true;

	return false;
}

295 296 297
static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)
{
	struct amd_gpu_scheduler *sched = entity->sched;
298
	struct dma_fence * fence = entity->dependency;
299 300 301 302
	struct amd_sched_fence *s_fence;

	if (fence->context == entity->fence_context) {
		/* We can ignore fences from ourself */
303
		dma_fence_put(entity->dependency);
304 305 306 307 308 309
		return false;
	}

	s_fence = to_amd_sched_fence(fence);
	if (s_fence && s_fence->sched == sched) {

310 311 312 313
		/*
		 * Fence is from the same scheduler, only need to wait for
		 * it to be scheduled
		 */
314 315
		fence = dma_fence_get(&s_fence->scheduled);
		dma_fence_put(entity->dependency);
316
		entity->dependency = fence;
317 318
		if (!dma_fence_add_callback(fence, &entity->cb,
					    amd_sched_entity_clear_dep))
319 320 321
			return true;

		/* Ignore it when it is already scheduled */
322
		dma_fence_put(fence);
323
		return false;
324 325
	}

326 327
	if (!dma_fence_add_callback(entity->dependency, &entity->cb,
				    amd_sched_entity_wakeup))
328 329
		return true;

330
	dma_fence_put(entity->dependency);
331 332 333
	return false;
}

334
static struct amd_sched_job *
335
amd_sched_entity_pop_job(struct amd_sched_entity *entity)
336
{
337
	struct amd_gpu_scheduler *sched = entity->sched;
338 339
	struct amd_sched_job *sched_job = to_amd_sched_job(
						spsc_queue_peek(&entity->job_queue));
340

341
	if (!sched_job)
342 343
		return NULL;

344
	while ((entity->dependency = sched->ops->dependency(sched_job, entity)))
345
		if (amd_sched_entity_add_dependency_cb(entity))
346 347
			return NULL;

348 349 350 351
	/* skip jobs from entity that marked guilty */
	if (entity->guilty && atomic_read(entity->guilty))
		dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED);

352
	spsc_queue_pop(&entity->job_queue);
353
	return sched_job;
354 355
}

356
/**
357
 * Submit a job to the job queue
358
 *
359
 * @sched_job		The pointer to job required to submit
360
 *
361
 * Returns 0 for success, negative error code otherwise.
362
 */
363 364
void amd_sched_entity_push_job(struct amd_sched_job *sched_job,
			       struct amd_sched_entity *entity)
365
{
366
	struct amd_gpu_scheduler *sched = sched_job->sched;
367
	bool first = false;
368

369
	trace_amd_sched_job(sched_job, entity);
370

371 372
	spin_lock(&entity->queue_lock);
	first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
373 374 375 376

	spin_unlock(&entity->queue_lock);

	/* first job wakes up scheduler */
377 378
	if (first) {
		/* Add the entity to the run queue */
379
		spin_lock(&entity->rq_lock);
380
		amd_sched_rq_add_entity(entity->rq, entity);
381
		spin_unlock(&entity->rq_lock);
382
		amd_sched_wakeup(sched);
383
	}
384 385
}

386
/* job_finish is called after hw fence signaled
387
 */
388
static void amd_sched_job_finish(struct work_struct *work)
389
{
390 391
	struct amd_sched_job *s_job = container_of(work, struct amd_sched_job,
						   finish_work);
392 393
	struct amd_gpu_scheduler *sched = s_job->sched;

394
	/* remove job from ring_mirror_list */
395
	spin_lock(&sched->job_list_lock);
396
	list_del_init(&s_job->node);
397
	if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
398 399
		struct amd_sched_job *next;

400
		spin_unlock(&sched->job_list_lock);
401
		cancel_delayed_work_sync(&s_job->work_tdr);
402
		spin_lock(&sched->job_list_lock);
403 404 405 406 407

		/* queue TDR for next job */
		next = list_first_entry_or_null(&sched->ring_mirror_list,
						struct amd_sched_job, node);

408
		if (next)
409 410
			schedule_delayed_work(&next->work_tdr, sched->timeout);
	}
411
	spin_unlock(&sched->job_list_lock);
412
	dma_fence_put(&s_job->s_fence->finished);
413 414 415
	sched->ops->free_job(s_job);
}

416 417
static void amd_sched_job_finish_cb(struct dma_fence *f,
				    struct dma_fence_cb *cb)
418 419 420 421
{
	struct amd_sched_job *job = container_of(cb, struct amd_sched_job,
						 finish_cb);
	schedule_work(&job->finish_work);
422 423
}

424
static void amd_sched_job_begin(struct amd_sched_job *s_job)
425 426 427
{
	struct amd_gpu_scheduler *sched = s_job->sched;

428 429 430
	dma_fence_add_callback(&s_job->s_fence->finished, &s_job->finish_cb,
			       amd_sched_job_finish_cb);

431
	spin_lock(&sched->job_list_lock);
432
	list_add_tail(&s_job->node, &sched->ring_mirror_list);
433
	if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
434 435
	    list_first_entry_or_null(&sched->ring_mirror_list,
				     struct amd_sched_job, node) == s_job)
436
		schedule_delayed_work(&s_job->work_tdr, sched->timeout);
437
	spin_unlock(&sched->job_list_lock);
438 439
}

440 441 442 443 444 445 446 447
static void amd_sched_job_timedout(struct work_struct *work)
{
	struct amd_sched_job *job = container_of(work, struct amd_sched_job,
						 work_tdr.work);

	job->sched->ops->timedout_job(job);
}

448
void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched, struct amd_sched_job *bad)
449 450
{
	struct amd_sched_job *s_job;
451 452
	struct amd_sched_entity *entity, *tmp;
	int i;;
453 454 455

	spin_lock(&sched->job_list_lock);
	list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) {
456 457 458
		if (s_job->s_fence->parent &&
		    dma_fence_remove_callback(s_job->s_fence->parent,
					      &s_job->s_fence->cb)) {
459
			dma_fence_put(s_job->s_fence->parent);
460
			s_job->s_fence->parent = NULL;
461
			atomic_dec(&sched->hw_rq_count);
462 463
		}
	}
464
	spin_unlock(&sched->job_list_lock);
465

466 467
	if (bad && bad->s_priority != AMD_SCHED_PRIORITY_KERNEL) {
		atomic_inc(&bad->karma);
468 469 470 471 472
		/* don't increase @bad's karma if it's from KERNEL RQ,
		 * becuase sometimes GPU hang would cause kernel jobs (like VM updating jobs)
		 * corrupt but keep in mind that kernel jobs always considered good.
		 */
		for (i = AMD_SCHED_PRIORITY_MIN; i < AMD_SCHED_PRIORITY_KERNEL; i++ ) {
473 474 475 476 477
			struct amd_sched_rq *rq = &sched->sched_rq[i];

			spin_lock(&rq->lock);
			list_for_each_entry_safe(entity, tmp, &rq->entities, list) {
				if (bad->s_fence->scheduled.context == entity->fence_context) {
478
				    if (atomic_read(&bad->karma) > bad->sched->hang_limit)
479 480
						if (entity->guilty)
							atomic_set(entity->guilty, 1);
481 482 483 484
					break;
				}
			}
			spin_unlock(&rq->lock);
485
			if (&entity->list != &rq->entities)
486 487 488
				break;
		}
	}
489 490 491 492 493 494 495 496
}

void amd_sched_job_kickout(struct amd_sched_job *s_job)
{
	struct amd_gpu_scheduler *sched = s_job->sched;

	spin_lock(&sched->job_list_lock);
	list_del_init(&s_job->node);
497 498 499
	spin_unlock(&sched->job_list_lock);
}

500 501
void amd_sched_job_recovery(struct amd_gpu_scheduler *sched)
{
502
	struct amd_sched_job *s_job, *tmp;
503
	bool found_guilty = false;
504 505 506 507 508
	int r;

	spin_lock(&sched->job_list_lock);
	s_job = list_first_entry_or_null(&sched->ring_mirror_list,
					 struct amd_sched_job, node);
509
	if (s_job && sched->timeout != MAX_SCHEDULE_TIMEOUT)
510 511
		schedule_delayed_work(&s_job->work_tdr, sched->timeout);

512
	list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
513
		struct amd_sched_fence *s_fence = s_job->s_fence;
514
		struct dma_fence *fence;
515 516 517 518 519 520 521 522 523
		uint64_t guilty_context;

		if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) {
			found_guilty = true;
			guilty_context = s_job->s_fence->scheduled.context;
		}

		if (found_guilty && s_job->s_fence->scheduled.context == guilty_context)
			dma_fence_set_error(&s_fence->finished, -ECANCELED);
524

525 526
		spin_unlock(&sched->job_list_lock);
		fence = sched->ops->run_job(s_job);
527
		atomic_inc(&sched->hw_rq_count);
528
		if (fence) {
529 530 531
			s_fence->parent = dma_fence_get(fence);
			r = dma_fence_add_callback(fence, &s_fence->cb,
						   amd_sched_process_job);
532 533 534 535 536
			if (r == -ENOENT)
				amd_sched_process_job(fence, &s_fence->cb);
			else if (r)
				DRM_ERROR("fence add callback failed (%d)\n",
					  r);
537
			dma_fence_put(fence);
538 539 540
		} else {
			amd_sched_process_job(NULL, &s_fence->cb);
		}
541
		spin_lock(&sched->job_list_lock);
542 543 544 545
	}
	spin_unlock(&sched->job_list_lock);
}

546 547
/* init a sched_job with basic field */
int amd_sched_job_init(struct amd_sched_job *job,
548 549
		       struct amd_gpu_scheduler *sched,
		       struct amd_sched_entity *entity,
550
		       void *owner)
551 552
{
	job->sched = sched;
553
	job->s_priority = entity->rq - sched->sched_rq;
554 555 556
	job->s_fence = amd_sched_fence_create(entity, owner);
	if (!job->s_fence)
		return -ENOMEM;
557
	job->id = atomic64_inc_return(&sched->job_id_count);
558

559 560
	INIT_WORK(&job->finish_work, amd_sched_job_finish);
	INIT_LIST_HEAD(&job->node);
561
	INIT_DELAYED_WORK(&job->work_tdr, amd_sched_job_timedout);
562

563 564 565
	return 0;
}

566 567 568 569 570 571 572 573 574
/**
 * Return ture if we can push more jobs to the hw.
 */
static bool amd_sched_ready(struct amd_gpu_scheduler *sched)
{
	return atomic_read(&sched->hw_rq_count) <
		sched->hw_submission_limit;
}

575 576 577 578 579 580
/**
 * Wake up the scheduler when it is ready
 */
static void amd_sched_wakeup(struct amd_gpu_scheduler *sched)
{
	if (amd_sched_ready(sched))
581
		wake_up_interruptible(&sched->wake_up_worker);
582 583
}

584
/**
585
 * Select next entity to process
586
*/
587 588
static struct amd_sched_entity *
amd_sched_select_entity(struct amd_gpu_scheduler *sched)
589
{
590
	struct amd_sched_entity *entity;
591
	int i;
592 593 594 595 596

	if (!amd_sched_ready(sched))
		return NULL;

	/* Kernel run queue has higher priority than normal run queue*/
597
	for (i = AMD_SCHED_PRIORITY_MAX - 1; i >= AMD_SCHED_PRIORITY_MIN; i--) {
598 599 600 601
		entity = amd_sched_rq_select_entity(&sched->sched_rq[i]);
		if (entity)
			break;
	}
602

603
	return entity;
604 605
}

606
static void amd_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
607
{
608 609
	struct amd_sched_fence *s_fence =
		container_of(cb, struct amd_sched_fence, cb);
610
	struct amd_gpu_scheduler *sched = s_fence->sched;
611

612
	dma_fence_get(&s_fence->finished);
613
	atomic_dec(&sched->hw_rq_count);
614
	amd_sched_fence_finished(s_fence);
M
Monk Liu 已提交
615

616
	trace_amd_sched_process_job(s_fence);
617
	dma_fence_put(&s_fence->finished);
618
	wake_up_interruptible(&sched->wake_up_worker);
619 620
}

621 622 623 624 625 626 627 628 629 630
static bool amd_sched_blocked(struct amd_gpu_scheduler *sched)
{
	if (kthread_should_park()) {
		kthread_parkme();
		return true;
	}

	return false;
}

631 632 633 634
static int amd_sched_main(void *param)
{
	struct sched_param sparam = {.sched_priority = 1};
	struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param;
635
	int r;
636 637 638 639

	sched_setscheduler(current, SCHED_FIFO, &sparam);

	while (!kthread_should_stop()) {
640
		struct amd_sched_entity *entity = NULL;
641
		struct amd_sched_fence *s_fence;
642
		struct amd_sched_job *sched_job;
643
		struct dma_fence *fence;
644

645
		wait_event_interruptible(sched->wake_up_worker,
646 647 648
					 (!amd_sched_blocked(sched) &&
					  (entity = amd_sched_select_entity(sched))) ||
					 kthread_should_stop());
649

650 651 652
		if (!entity)
			continue;

653
		sched_job = amd_sched_entity_pop_job(entity);
654
		if (!sched_job)
655 656
			continue;

657
		s_fence = sched_job->s_fence;
658

659
		atomic_inc(&sched->hw_rq_count);
660 661
		amd_sched_job_begin(sched_job);

662
		fence = sched->ops->run_job(sched_job);
663
		amd_sched_fence_scheduled(s_fence);
664

665
		if (fence) {
666 667 668
			s_fence->parent = dma_fence_get(fence);
			r = dma_fence_add_callback(fence, &s_fence->cb,
						   amd_sched_process_job);
669
			if (r == -ENOENT)
670
				amd_sched_process_job(fence, &s_fence->cb);
671
			else if (r)
672 673
				DRM_ERROR("fence add callback failed (%d)\n",
					  r);
674
			dma_fence_put(fence);
675
		} else {
676
			amd_sched_process_job(NULL, &s_fence->cb);
677
		}
678

679
		wake_up(&sched->job_scheduled);
680 681 682 683 684
	}
	return 0;
}

/**
685
 * Init a gpu scheduler instance
686
 *
687
 * @sched		The pointer to the scheduler
688 689
 * @ops			The backend operations for this scheduler.
 * @hw_submissions	Number of hw submissions to do.
690
 * @name		Name used for debugging
691
 *
692
 * Return 0 on success, otherwise error code.
693
*/
694
int amd_sched_init(struct amd_gpu_scheduler *sched,
695
		   const struct amd_sched_backend_ops *ops,
M
Monk Liu 已提交
696 697 698 699
		   unsigned hw_submission,
		   unsigned hang_limit,
		   long timeout,
		   const char *name)
700
{
701
	int i;
702
	sched->ops = ops;
703
	sched->hw_submission_limit = hw_submission;
704
	sched->name = name;
705
	sched->timeout = timeout;
M
Monk Liu 已提交
706
	sched->hang_limit = hang_limit;
707
	for (i = AMD_SCHED_PRIORITY_MIN; i < AMD_SCHED_PRIORITY_MAX; i++)
708
		amd_sched_rq_init(&sched->sched_rq[i]);
709

710 711
	init_waitqueue_head(&sched->wake_up_worker);
	init_waitqueue_head(&sched->job_scheduled);
712 713
	INIT_LIST_HEAD(&sched->ring_mirror_list);
	spin_lock_init(&sched->job_list_lock);
714
	atomic_set(&sched->hw_rq_count, 0);
715
	atomic64_set(&sched->job_id_count, 0);
716

717
	/* Each scheduler will run on a seperate kernel thread */
718
	sched->thread = kthread_run(amd_sched_main, sched, sched->name);
719
	if (IS_ERR(sched->thread)) {
720 721
		DRM_ERROR("Failed to create scheduler for %s.\n", name);
		return PTR_ERR(sched->thread);
722 723
	}

724
	return 0;
725 726 727 728 729 730 731
}

/**
 * Destroy a gpu scheduler
 *
 * @sched	The pointer to the scheduler
 */
732
void amd_sched_fini(struct amd_gpu_scheduler *sched)
733
{
734 735
	if (sched->thread)
		kthread_stop(sched->thread);
736
}