gpu_scheduler.c 16.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
/*
 * Copyright 2015 Advanced Micro Devices, Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 *
 */
#include <linux/kthread.h>
#include <linux/wait.h>
#include <linux/sched.h>
27
#include <uapi/linux/sched/types.h>
28 29 30
#include <drm/drmP.h>
#include "gpu_scheduler.h"

31 32 33
#define CREATE_TRACE_POINTS
#include "gpu_sched_trace.h"

34
static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity);
35
static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
36
static void amd_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb);
37

38
/* Initialize a given run queue struct */
39
static void amd_sched_rq_init(struct amd_sched_rq *rq)
40
{
41
	spin_lock_init(&rq->lock);
42 43
	INIT_LIST_HEAD(&rq->entities);
	rq->current_entity = NULL;
44 45
}

46 47
static void amd_sched_rq_add_entity(struct amd_sched_rq *rq,
				    struct amd_sched_entity *entity)
48
{
49 50
	if (!list_empty(&entity->list))
		return;
51
	spin_lock(&rq->lock);
52
	list_add_tail(&entity->list, &rq->entities);
53
	spin_unlock(&rq->lock);
54 55
}

56 57
static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq,
				       struct amd_sched_entity *entity)
58
{
59 60
	if (list_empty(&entity->list))
		return;
61
	spin_lock(&rq->lock);
62 63 64
	list_del_init(&entity->list);
	if (rq->current_entity == entity)
		rq->current_entity = NULL;
65
	spin_unlock(&rq->lock);
66 67 68
}

/**
69 70 71 72 73
 * Select an entity which could provide a job to run
 *
 * @rq		The run queue to check.
 *
 * Try to find a ready entity, returns NULL if none found.
74
 */
75 76
static struct amd_sched_entity *
amd_sched_rq_select_entity(struct amd_sched_rq *rq)
77
{
78
	struct amd_sched_entity *entity;
79

80 81 82
	spin_lock(&rq->lock);

	entity = rq->current_entity;
83 84
	if (entity) {
		list_for_each_entry_continue(entity, &rq->entities, list) {
85
			if (amd_sched_entity_is_ready(entity)) {
86
				rq->current_entity = entity;
87
				spin_unlock(&rq->lock);
88
				return entity;
89
			}
90 91 92
		}
	}

93
	list_for_each_entry(entity, &rq->entities, list) {
94

95
		if (amd_sched_entity_is_ready(entity)) {
96
			rq->current_entity = entity;
97
			spin_unlock(&rq->lock);
98
			return entity;
99
		}
100

101 102 103
		if (entity == rq->current_entity)
			break;
	}
104

105 106
	spin_unlock(&rq->lock);

107
	return NULL;
108 109 110 111 112 113
}

/**
 * Init a context entity used by scheduler when submit to HW ring.
 *
 * @sched	The pointer to the scheduler
114
 * @entity	The pointer to a valid amd_sched_entity
115
 * @rq		The run queue this entity belongs
116
 * @kernel	If this is an entity for the kernel
117
 * @jobs	The max number of jobs in the job queue
118 119 120
 *
 * return 0 if succeed. negative error code on failure
*/
121
int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
122
			  struct amd_sched_entity *entity,
123
			  struct amd_sched_rq *rq,
124
			  uint32_t jobs)
125
{
126 127
	int r;

128 129 130
	if (!(sched && entity && rq))
		return -EINVAL;

131
	memset(entity, 0, sizeof(struct amd_sched_entity));
132 133 134
	INIT_LIST_HEAD(&entity->list);
	entity->rq = rq;
	entity->sched = sched;
135 136

	spin_lock_init(&entity->queue_lock);
137 138 139 140
	r = kfifo_alloc(&entity->job_queue, jobs * sizeof(void *), GFP_KERNEL);
	if (r)
		return r;

141
	atomic_set(&entity->fence_seq, 0);
142
	entity->fence_context = dma_fence_context_alloc(2);
143 144 145 146 147 148 149 150 151 152 153 154

	return 0;
}

/**
 * Query if entity is initialized
 *
 * @sched       Pointer to scheduler instance
 * @entity	The pointer to a valid scheduler entity
 *
 * return true if entity is initialized, false otherwise
*/
155 156
static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler *sched,
					    struct amd_sched_entity *entity)
157
{
158 159
	return entity->sched == sched &&
		entity->rq != NULL;
160 161
}

162 163 164 165 166 167 168 169
/**
 * Check if entity is idle
 *
 * @entity	The pointer to a valid scheduler entity
 *
 * Return true if entity don't has any unscheduled jobs.
 */
static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity)
170
{
171 172
	rmb();
	if (kfifo_is_empty(&entity->job_queue))
173 174 175 176 177
		return true;

	return false;
}

178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195
/**
 * Check if entity is ready
 *
 * @entity	The pointer to a valid scheduler entity
 *
 * Return true if entity could provide a job.
 */
static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity)
{
	if (kfifo_is_empty(&entity->job_queue))
		return false;

	if (ACCESS_ONCE(entity->dependency))
		return false;

	return true;
}

196 197 198 199 200 201
/**
 * Destroy a context entity
 *
 * @sched       Pointer to scheduler instance
 * @entity	The pointer to a valid scheduler entity
 *
202
 * Cleanup and free the allocated resources.
203
 */
204 205
void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
			   struct amd_sched_entity *entity)
206
{
207
	struct amd_sched_rq *rq = entity->rq;
208

209
	if (!amd_sched_entity_is_initialized(sched, entity))
210
		return;
211

212 213 214 215
	/**
	 * The client will not queue more IBs during this fini, consume existing
	 * queued IBs
	*/
216
	wait_event(sched->job_scheduled, amd_sched_entity_is_idle(entity));
217

218
	amd_sched_rq_remove_entity(rq, entity);
219 220 221
	kfifo_free(&entity->job_queue);
}

222
static void amd_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb)
223 224 225 226
{
	struct amd_sched_entity *entity =
		container_of(cb, struct amd_sched_entity, cb);
	entity->dependency = NULL;
227
	dma_fence_put(f);
228
	amd_sched_wakeup(entity->sched);
229 230
}

231
static void amd_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb *cb)
232 233 234 235
{
	struct amd_sched_entity *entity =
		container_of(cb, struct amd_sched_entity, cb);
	entity->dependency = NULL;
236
	dma_fence_put(f);
237 238
}

239 240 241
static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)
{
	struct amd_gpu_scheduler *sched = entity->sched;
242
	struct dma_fence * fence = entity->dependency;
243 244 245 246
	struct amd_sched_fence *s_fence;

	if (fence->context == entity->fence_context) {
		/* We can ignore fences from ourself */
247
		dma_fence_put(entity->dependency);
248 249 250 251 252 253
		return false;
	}

	s_fence = to_amd_sched_fence(fence);
	if (s_fence && s_fence->sched == sched) {

254 255 256 257
		/*
		 * Fence is from the same scheduler, only need to wait for
		 * it to be scheduled
		 */
258 259
		fence = dma_fence_get(&s_fence->scheduled);
		dma_fence_put(entity->dependency);
260
		entity->dependency = fence;
261 262
		if (!dma_fence_add_callback(fence, &entity->cb,
					    amd_sched_entity_clear_dep))
263 264 265
			return true;

		/* Ignore it when it is already scheduled */
266
		dma_fence_put(fence);
267
		return false;
268 269
	}

270 271
	if (!dma_fence_add_callback(entity->dependency, &entity->cb,
				    amd_sched_entity_wakeup))
272 273
		return true;

274
	dma_fence_put(entity->dependency);
275 276 277
	return false;
}

278 279 280
static struct amd_sched_job *
amd_sched_entity_pop_job(struct amd_sched_entity *entity)
{
281
	struct amd_gpu_scheduler *sched = entity->sched;
282
	struct amd_sched_job *sched_job;
283

284
	if (!kfifo_out_peek(&entity->job_queue, &sched_job, sizeof(sched_job)))
285 286
		return NULL;

287 288
	while ((entity->dependency = sched->ops->dependency(sched_job)))
		if (amd_sched_entity_add_dependency_cb(entity))
289 290
			return NULL;

291
	return sched_job;
292 293
}

294
/**
295
 * Helper to submit a job to the job queue
296
 *
297
 * @sched_job		The pointer to job required to submit
298 299 300
 *
 * Returns true if we could submit the job.
 */
301
static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
302
{
303
	struct amd_gpu_scheduler *sched = sched_job->sched;
304
	struct amd_sched_entity *entity = sched_job->s_entity;
305 306 307
	bool added, first = false;

	spin_lock(&entity->queue_lock);
308 309
	added = kfifo_in(&entity->job_queue, &sched_job,
			sizeof(sched_job)) == sizeof(sched_job);
310

311
	if (added && kfifo_len(&entity->job_queue) == sizeof(sched_job))
312 313 314 315 316
		first = true;

	spin_unlock(&entity->queue_lock);

	/* first job wakes up scheduler */
317 318 319
	if (first) {
		/* Add the entity to the run queue */
		amd_sched_rq_add_entity(entity->rq, entity);
320
		amd_sched_wakeup(sched);
321
	}
322 323 324
	return added;
}

325 326 327
/* job_finish is called after hw fence signaled, and
 * the job had already been deleted from ring_mirror_list
 */
328
static void amd_sched_job_finish(struct work_struct *work)
329
{
330 331
	struct amd_sched_job *s_job = container_of(work, struct amd_sched_job,
						   finish_work);
332 333
	struct amd_gpu_scheduler *sched = s_job->sched;

334
	/* remove job from ring_mirror_list */
335
	spin_lock(&sched->job_list_lock);
336
	list_del_init(&s_job->node);
337
	if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
338 339
		struct amd_sched_job *next;

340
		spin_unlock(&sched->job_list_lock);
341
		cancel_delayed_work_sync(&s_job->work_tdr);
342
		spin_lock(&sched->job_list_lock);
343 344 345 346 347

		/* queue TDR for next job */
		next = list_first_entry_or_null(&sched->ring_mirror_list,
						struct amd_sched_job, node);

348
		if (next)
349 350
			schedule_delayed_work(&next->work_tdr, sched->timeout);
	}
351
	spin_unlock(&sched->job_list_lock);
352 353 354
	sched->ops->free_job(s_job);
}

355 356
static void amd_sched_job_finish_cb(struct dma_fence *f,
				    struct dma_fence_cb *cb)
357 358 359 360
{
	struct amd_sched_job *job = container_of(cb, struct amd_sched_job,
						 finish_cb);
	schedule_work(&job->finish_work);
361 362
}

363
static void amd_sched_job_begin(struct amd_sched_job *s_job)
364 365 366
{
	struct amd_gpu_scheduler *sched = s_job->sched;

367
	spin_lock(&sched->job_list_lock);
368
	list_add_tail(&s_job->node, &sched->ring_mirror_list);
369
	if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
370 371
	    list_first_entry_or_null(&sched->ring_mirror_list,
				     struct amd_sched_job, node) == s_job)
372
		schedule_delayed_work(&s_job->work_tdr, sched->timeout);
373
	spin_unlock(&sched->job_list_lock);
374 375
}

376 377 378 379 380 381 382 383
static void amd_sched_job_timedout(struct work_struct *work)
{
	struct amd_sched_job *job = container_of(work, struct amd_sched_job,
						 work_tdr.work);

	job->sched->ops->timedout_job(job);
}

384 385 386 387 388 389
void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched)
{
	struct amd_sched_job *s_job;

	spin_lock(&sched->job_list_lock);
	list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) {
390 391
		if (dma_fence_remove_callback(s_job->s_fence->parent, &s_job->s_fence->cb)) {
			dma_fence_put(s_job->s_fence->parent);
392 393 394
			s_job->s_fence->parent = NULL;
		}
	}
395
	atomic_set(&sched->hw_rq_count, 0);
396 397 398
	spin_unlock(&sched->job_list_lock);
}

399 400
void amd_sched_job_recovery(struct amd_gpu_scheduler *sched)
{
401
	struct amd_sched_job *s_job, *tmp;
402 403 404 405 406
	int r;

	spin_lock(&sched->job_list_lock);
	s_job = list_first_entry_or_null(&sched->ring_mirror_list,
					 struct amd_sched_job, node);
407
	if (s_job && sched->timeout != MAX_SCHEDULE_TIMEOUT)
408 409
		schedule_delayed_work(&s_job->work_tdr, sched->timeout);

410
	list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
411
		struct amd_sched_fence *s_fence = s_job->s_fence;
412
		struct dma_fence *fence;
413

414 415
		spin_unlock(&sched->job_list_lock);
		fence = sched->ops->run_job(s_job);
416
		atomic_inc(&sched->hw_rq_count);
417
		if (fence) {
418 419 420
			s_fence->parent = dma_fence_get(fence);
			r = dma_fence_add_callback(fence, &s_fence->cb,
						   amd_sched_process_job);
421 422 423 424 425
			if (r == -ENOENT)
				amd_sched_process_job(fence, &s_fence->cb);
			else if (r)
				DRM_ERROR("fence add callback failed (%d)\n",
					  r);
426
			dma_fence_put(fence);
427 428 429 430
		} else {
			DRM_ERROR("Failed to run job!\n");
			amd_sched_process_job(NULL, &s_fence->cb);
		}
431
		spin_lock(&sched->job_list_lock);
432 433 434 435
	}
	spin_unlock(&sched->job_list_lock);
}

436 437 438
/**
 * Submit a job to the job queue
 *
439
 * @sched_job		The pointer to job required to submit
440 441 442
 *
 * Returns 0 for success, negative error code otherwise.
 */
443
void amd_sched_entity_push_job(struct amd_sched_job *sched_job)
444 445 446
{
	struct amd_sched_entity *entity = sched_job->s_entity;

447
	trace_amd_sched_job(sched_job);
448 449
	dma_fence_add_callback(&sched_job->s_fence->finished, &sched_job->finish_cb,
			       amd_sched_job_finish_cb);
450
	wait_event(entity->sched->job_scheduled,
451
		   amd_sched_entity_in(sched_job));
452 453
}

454 455
/* init a sched_job with basic field */
int amd_sched_job_init(struct amd_sched_job *job,
456 457
		       struct amd_gpu_scheduler *sched,
		       struct amd_sched_entity *entity,
458
		       void *owner)
459 460 461 462
{
	job->sched = sched;
	job->s_entity = entity;
	job->s_fence = amd_sched_fence_create(entity, owner);
463
	job->id = atomic64_inc_return(&sched->job_id_count);
464 465 466
	if (!job->s_fence)
		return -ENOMEM;

467 468
	INIT_WORK(&job->finish_work, amd_sched_job_finish);
	INIT_LIST_HEAD(&job->node);
469
	INIT_DELAYED_WORK(&job->work_tdr, amd_sched_job_timedout);
470

471 472 473
	return 0;
}

474 475 476 477 478 479 480 481 482
/**
 * Return ture if we can push more jobs to the hw.
 */
static bool amd_sched_ready(struct amd_gpu_scheduler *sched)
{
	return atomic_read(&sched->hw_rq_count) <
		sched->hw_submission_limit;
}

483 484 485 486 487 488
/**
 * Wake up the scheduler when it is ready
 */
static void amd_sched_wakeup(struct amd_gpu_scheduler *sched)
{
	if (amd_sched_ready(sched))
489
		wake_up_interruptible(&sched->wake_up_worker);
490 491
}

492
/**
493
 * Select next entity to process
494
*/
495 496
static struct amd_sched_entity *
amd_sched_select_entity(struct amd_gpu_scheduler *sched)
497
{
498
	struct amd_sched_entity *entity;
499
	int i;
500 501 502 503 504

	if (!amd_sched_ready(sched))
		return NULL;

	/* Kernel run queue has higher priority than normal run queue*/
505
	for (i = AMD_SCHED_PRIORITY_MAX - 1; i >= AMD_SCHED_PRIORITY_MIN; i--) {
506 507 508 509
		entity = amd_sched_rq_select_entity(&sched->sched_rq[i]);
		if (entity)
			break;
	}
510

511
	return entity;
512 513
}

514
static void amd_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
515
{
516 517
	struct amd_sched_fence *s_fence =
		container_of(cb, struct amd_sched_fence, cb);
518
	struct amd_gpu_scheduler *sched = s_fence->sched;
519

520
	atomic_dec(&sched->hw_rq_count);
521
	amd_sched_fence_finished(s_fence);
M
Monk Liu 已提交
522

523
	trace_amd_sched_process_job(s_fence);
524
	dma_fence_put(&s_fence->finished);
525
	wake_up_interruptible(&sched->wake_up_worker);
526 527
}

528 529 530 531 532 533 534 535 536 537
static bool amd_sched_blocked(struct amd_gpu_scheduler *sched)
{
	if (kthread_should_park()) {
		kthread_parkme();
		return true;
	}

	return false;
}

538 539 540 541
static int amd_sched_main(void *param)
{
	struct sched_param sparam = {.sched_priority = 1};
	struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param;
542
	int r, count;
543 544 545 546

	sched_setscheduler(current, SCHED_FIFO, &sparam);

	while (!kthread_should_stop()) {
547
		struct amd_sched_entity *entity = NULL;
548
		struct amd_sched_fence *s_fence;
549
		struct amd_sched_job *sched_job;
550
		struct dma_fence *fence;
551

552
		wait_event_interruptible(sched->wake_up_worker,
553 554 555
					 (!amd_sched_blocked(sched) &&
					  (entity = amd_sched_select_entity(sched))) ||
					 kthread_should_stop());
556

557 558 559 560
		if (!entity)
			continue;

		sched_job = amd_sched_entity_pop_job(entity);
561
		if (!sched_job)
562 563
			continue;

564
		s_fence = sched_job->s_fence;
565

566
		atomic_inc(&sched->hw_rq_count);
567 568
		amd_sched_job_begin(sched_job);

569
		fence = sched->ops->run_job(sched_job);
570
		amd_sched_fence_scheduled(s_fence);
571
		if (fence) {
572 573 574
			s_fence->parent = dma_fence_get(fence);
			r = dma_fence_add_callback(fence, &s_fence->cb,
						   amd_sched_process_job);
575
			if (r == -ENOENT)
576
				amd_sched_process_job(fence, &s_fence->cb);
577
			else if (r)
578 579
				DRM_ERROR("fence add callback failed (%d)\n",
					  r);
580
			dma_fence_put(fence);
581 582
		} else {
			DRM_ERROR("Failed to run job!\n");
583
			amd_sched_process_job(NULL, &s_fence->cb);
584
		}
585

586 587 588
		count = kfifo_out(&entity->job_queue, &sched_job,
				sizeof(sched_job));
		WARN_ON(count != sizeof(sched_job));
589
		wake_up(&sched->job_scheduled);
590 591 592 593 594
	}
	return 0;
}

/**
595
 * Init a gpu scheduler instance
596
 *
597
 * @sched		The pointer to the scheduler
598 599
 * @ops			The backend operations for this scheduler.
 * @hw_submissions	Number of hw submissions to do.
600
 * @name		Name used for debugging
601
 *
602
 * Return 0 on success, otherwise error code.
603
*/
604
int amd_sched_init(struct amd_gpu_scheduler *sched,
605
		   const struct amd_sched_backend_ops *ops,
606
		   unsigned hw_submission, long timeout, const char *name)
607
{
608
	int i;
609
	sched->ops = ops;
610
	sched->hw_submission_limit = hw_submission;
611
	sched->name = name;
612
	sched->timeout = timeout;
613
	for (i = AMD_SCHED_PRIORITY_MIN; i < AMD_SCHED_PRIORITY_MAX; i++)
614
		amd_sched_rq_init(&sched->sched_rq[i]);
615

616 617
	init_waitqueue_head(&sched->wake_up_worker);
	init_waitqueue_head(&sched->job_scheduled);
618 619
	INIT_LIST_HEAD(&sched->ring_mirror_list);
	spin_lock_init(&sched->job_list_lock);
620
	atomic_set(&sched->hw_rq_count, 0);
621
	atomic64_set(&sched->job_id_count, 0);
622

623
	/* Each scheduler will run on a seperate kernel thread */
624
	sched->thread = kthread_run(amd_sched_main, sched, sched->name);
625
	if (IS_ERR(sched->thread)) {
626 627
		DRM_ERROR("Failed to create scheduler for %s.\n", name);
		return PTR_ERR(sched->thread);
628 629
	}

630
	return 0;
631 632 633 634 635 636 637
}

/**
 * Destroy a gpu scheduler
 *
 * @sched	The pointer to the scheduler
 */
638
void amd_sched_fini(struct amd_gpu_scheduler *sched)
639
{
640 641
	if (sched->thread)
		kthread_stop(sched->thread);
642
}