gpu_scheduler.c 14.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29
/*
 * Copyright 2015 Advanced Micro Devices, Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 *
 */
#include <linux/kthread.h>
#include <linux/wait.h>
#include <linux/sched.h>
#include <drm/drmP.h>
#include "gpu_scheduler.h"

30 31 32
#define CREATE_TRACE_POINTS
#include "gpu_sched_trace.h"

33
static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity);
34 35
static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);

36 37 38
struct kmem_cache *sched_fence_slab;
atomic_t sched_fence_slab_ref = ATOMIC_INIT(0);

39
/* Initialize a given run queue struct */
40
static void amd_sched_rq_init(struct amd_sched_rq *rq)
41
{
42
	spin_lock_init(&rq->lock);
43 44
	INIT_LIST_HEAD(&rq->entities);
	rq->current_entity = NULL;
45 46
}

47 48
static void amd_sched_rq_add_entity(struct amd_sched_rq *rq,
				    struct amd_sched_entity *entity)
49
{
50 51
	if (!list_empty(&entity->list))
		return;
52
	spin_lock(&rq->lock);
53
	list_add_tail(&entity->list, &rq->entities);
54
	spin_unlock(&rq->lock);
55 56
}

57 58
static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq,
				       struct amd_sched_entity *entity)
59
{
60 61
	if (list_empty(&entity->list))
		return;
62
	spin_lock(&rq->lock);
63 64 65
	list_del_init(&entity->list);
	if (rq->current_entity == entity)
		rq->current_entity = NULL;
66
	spin_unlock(&rq->lock);
67 68 69
}

/**
70 71 72 73 74
 * Select an entity which could provide a job to run
 *
 * @rq		The run queue to check.
 *
 * Try to find a ready entity, returns NULL if none found.
75
 */
76 77
static struct amd_sched_entity *
amd_sched_rq_select_entity(struct amd_sched_rq *rq)
78
{
79
	struct amd_sched_entity *entity;
80

81 82 83
	spin_lock(&rq->lock);

	entity = rq->current_entity;
84 85
	if (entity) {
		list_for_each_entry_continue(entity, &rq->entities, list) {
86
			if (amd_sched_entity_is_ready(entity)) {
87
				rq->current_entity = entity;
88
				spin_unlock(&rq->lock);
89
				return entity;
90
			}
91 92 93
		}
	}

94
	list_for_each_entry(entity, &rq->entities, list) {
95

96
		if (amd_sched_entity_is_ready(entity)) {
97
			rq->current_entity = entity;
98
			spin_unlock(&rq->lock);
99
			return entity;
100
		}
101

102 103 104
		if (entity == rq->current_entity)
			break;
	}
105

106 107
	spin_unlock(&rq->lock);

108
	return NULL;
109 110 111 112 113 114
}

/**
 * Init a context entity used by scheduler when submit to HW ring.
 *
 * @sched	The pointer to the scheduler
115
 * @entity	The pointer to a valid amd_sched_entity
116
 * @rq		The run queue this entity belongs
117
 * @kernel	If this is an entity for the kernel
118
 * @jobs	The max number of jobs in the job queue
119 120 121
 *
 * return 0 if succeed. negative error code on failure
*/
122
int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
123
			  struct amd_sched_entity *entity,
124
			  struct amd_sched_rq *rq,
125
			  uint32_t jobs)
126
{
127 128
	int r;

129 130 131
	if (!(sched && entity && rq))
		return -EINVAL;

132
	memset(entity, 0, sizeof(struct amd_sched_entity));
133 134 135
	INIT_LIST_HEAD(&entity->list);
	entity->rq = rq;
	entity->sched = sched;
136 137

	spin_lock_init(&entity->queue_lock);
138 139 140 141
	r = kfifo_alloc(&entity->job_queue, jobs * sizeof(void *), GFP_KERNEL);
	if (r)
		return r;

142
	atomic_set(&entity->fence_seq, 0);
143
	entity->fence_context = fence_context_alloc(1);
144 145 146 147 148 149 150 151 152 153 154 155

	return 0;
}

/**
 * Query if entity is initialized
 *
 * @sched       Pointer to scheduler instance
 * @entity	The pointer to a valid scheduler entity
 *
 * return true if entity is initialized, false otherwise
*/
156 157
static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler *sched,
					    struct amd_sched_entity *entity)
158
{
159 160
	return entity->sched == sched &&
		entity->rq != NULL;
161 162
}

163 164 165 166 167 168 169 170
/**
 * Check if entity is idle
 *
 * @entity	The pointer to a valid scheduler entity
 *
 * Return true if entity don't has any unscheduled jobs.
 */
static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity)
171
{
172 173
	rmb();
	if (kfifo_is_empty(&entity->job_queue))
174 175 176 177 178
		return true;

	return false;
}

179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196
/**
 * Check if entity is ready
 *
 * @entity	The pointer to a valid scheduler entity
 *
 * Return true if entity could provide a job.
 */
static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity)
{
	if (kfifo_is_empty(&entity->job_queue))
		return false;

	if (ACCESS_ONCE(entity->dependency))
		return false;

	return true;
}

197 198 199 200 201 202
/**
 * Destroy a context entity
 *
 * @sched       Pointer to scheduler instance
 * @entity	The pointer to a valid scheduler entity
 *
203
 * Cleanup and free the allocated resources.
204
 */
205 206
void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
			   struct amd_sched_entity *entity)
207
{
208
	struct amd_sched_rq *rq = entity->rq;
209

210
	if (!amd_sched_entity_is_initialized(sched, entity))
211
		return;
212

213 214 215 216
	/**
	 * The client will not queue more IBs during this fini, consume existing
	 * queued IBs
	*/
217
	wait_event(sched->job_scheduled, amd_sched_entity_is_idle(entity));
218

219
	amd_sched_rq_remove_entity(rq, entity);
220 221 222
	kfifo_free(&entity->job_queue);
}

223 224 225 226 227 228
static void amd_sched_entity_wakeup(struct fence *f, struct fence_cb *cb)
{
	struct amd_sched_entity *entity =
		container_of(cb, struct amd_sched_entity, cb);
	entity->dependency = NULL;
	fence_put(f);
229
	amd_sched_wakeup(entity->sched);
230 231
}

232 233 234 235 236 237 238 239
static void amd_sched_entity_clear_dep(struct fence *f, struct fence_cb *cb)
{
	struct amd_sched_entity *entity =
		container_of(cb, struct amd_sched_entity, cb);
	entity->dependency = NULL;
	fence_put(f);
}

240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261
static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)
{
	struct amd_gpu_scheduler *sched = entity->sched;
	struct fence * fence = entity->dependency;
	struct amd_sched_fence *s_fence;

	if (fence->context == entity->fence_context) {
		/* We can ignore fences from ourself */
		fence_put(entity->dependency);
		return false;
	}

	s_fence = to_amd_sched_fence(fence);
	if (s_fence && s_fence->sched == sched) {
		/* Fence is from the same scheduler */
		if (test_bit(AMD_SCHED_FENCE_SCHEDULED_BIT, &fence->flags)) {
			/* Ignore it when it is already scheduled */
			fence_put(entity->dependency);
			return false;
		}

		/* Wait for fence to be scheduled */
262
		entity->cb.func = amd_sched_entity_clear_dep;
263 264 265 266 267 268 269 270 271 272 273 274
		list_add_tail(&entity->cb.node, &s_fence->scheduled_cb);
		return true;
	}

	if (!fence_add_callback(entity->dependency, &entity->cb,
				amd_sched_entity_wakeup))
		return true;

	fence_put(entity->dependency);
	return false;
}

275 276 277
static struct amd_sched_job *
amd_sched_entity_pop_job(struct amd_sched_entity *entity)
{
278
	struct amd_gpu_scheduler *sched = entity->sched;
279
	struct amd_sched_job *sched_job;
280

281
	if (!kfifo_out_peek(&entity->job_queue, &sched_job, sizeof(sched_job)))
282 283
		return NULL;

284 285
	while ((entity->dependency = sched->ops->dependency(sched_job)))
		if (amd_sched_entity_add_dependency_cb(entity))
286 287
			return NULL;

288
	return sched_job;
289 290
}

291
/**
292
 * Helper to submit a job to the job queue
293
 *
294
 * @sched_job		The pointer to job required to submit
295 296 297
 *
 * Returns true if we could submit the job.
 */
298
static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
299
{
300
	struct amd_gpu_scheduler *sched = sched_job->sched;
301
	struct amd_sched_entity *entity = sched_job->s_entity;
302 303 304
	bool added, first = false;

	spin_lock(&entity->queue_lock);
305 306
	added = kfifo_in(&entity->job_queue, &sched_job,
			sizeof(sched_job)) == sizeof(sched_job);
307

308
	if (added && kfifo_len(&entity->job_queue) == sizeof(sched_job))
309 310 311 312 313
		first = true;

	spin_unlock(&entity->queue_lock);

	/* first job wakes up scheduler */
314 315 316
	if (first) {
		/* Add the entity to the run queue */
		amd_sched_rq_add_entity(entity->rq, entity);
317
		amd_sched_wakeup(sched);
318
	}
319 320 321
	return added;
}

322 323 324 325 326
static void amd_sched_free_job(struct fence *f, struct fence_cb *cb) {
	struct amd_sched_job *job = container_of(cb, struct amd_sched_job, cb_free_job);
	schedule_work(&job->work_free_job);
}

327 328 329 330 331 332 333 334 335
/* job_finish is called after hw fence signaled, and
 * the job had already been deleted from ring_mirror_list
 */
void amd_sched_job_finish(struct amd_sched_job *s_job)
{
	struct amd_sched_job *next;
	struct amd_gpu_scheduler *sched = s_job->sched;

	if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
M
Monk Liu 已提交
336 337
		if (cancel_delayed_work(&s_job->work_tdr))
			amd_sched_job_put(s_job);
338 339 340 341 342 343 344

		/* queue TDR for next job */
		next = list_first_entry_or_null(&sched->ring_mirror_list,
						struct amd_sched_job, node);

		if (next) {
			INIT_DELAYED_WORK(&next->work_tdr, s_job->timeout_callback);
M
Monk Liu 已提交
345
			amd_sched_job_get(next);
346 347 348 349 350 351 352 353 354 355 356 357 358
			schedule_delayed_work(&next->work_tdr, sched->timeout);
		}
	}
}

void amd_sched_job_begin(struct amd_sched_job *s_job)
{
	struct amd_gpu_scheduler *sched = s_job->sched;

	if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
		list_first_entry_or_null(&sched->ring_mirror_list, struct amd_sched_job, node) == s_job)
	{
		INIT_DELAYED_WORK(&s_job->work_tdr, s_job->timeout_callback);
M
Monk Liu 已提交
359
		amd_sched_job_get(s_job);
360 361 362 363
		schedule_delayed_work(&s_job->work_tdr, sched->timeout);
	}
}

364 365 366
/**
 * Submit a job to the job queue
 *
367
 * @sched_job		The pointer to job required to submit
368 369 370
 *
 * Returns 0 for success, negative error code otherwise.
 */
371
void amd_sched_entity_push_job(struct amd_sched_job *sched_job)
372 373 374
{
	struct amd_sched_entity *entity = sched_job->s_entity;

375 376 377
	sched_job->use_sched = 1;
	fence_add_callback(&sched_job->s_fence->base,
					&sched_job->cb_free_job, amd_sched_free_job);
378
	trace_amd_sched_job(sched_job);
379
	wait_event(entity->sched->job_scheduled,
380
		   amd_sched_entity_in(sched_job));
381 382
}

383 384 385 386
/* init a sched_job with basic field */
int amd_sched_job_init(struct amd_sched_job *job,
						struct amd_gpu_scheduler *sched,
						struct amd_sched_entity *entity,
387
						void (*timeout_cb)(struct work_struct *work),
M
Monk Liu 已提交
388
						void (*free_cb)(struct kref *refcount),
389 390
						void *owner, struct fence **fence)
{
391
	INIT_LIST_HEAD(&job->node);
M
Monk Liu 已提交
392
	kref_init(&job->refcount);
393 394 395 396 397 398
	job->sched = sched;
	job->s_entity = entity;
	job->s_fence = amd_sched_fence_create(entity, owner);
	if (!job->s_fence)
		return -ENOMEM;

399
	job->s_fence->s_job = job;
400
	job->timeout_callback = timeout_cb;
M
Monk Liu 已提交
401
	job->free_callback = free_cb;
402

403 404 405 406 407
	if (fence)
		*fence = &job->s_fence->base;
	return 0;
}

408 409 410 411 412 413 414 415 416
/**
 * Return ture if we can push more jobs to the hw.
 */
static bool amd_sched_ready(struct amd_gpu_scheduler *sched)
{
	return atomic_read(&sched->hw_rq_count) <
		sched->hw_submission_limit;
}

417 418 419 420 421 422
/**
 * Wake up the scheduler when it is ready
 */
static void amd_sched_wakeup(struct amd_gpu_scheduler *sched)
{
	if (amd_sched_ready(sched))
423
		wake_up_interruptible(&sched->wake_up_worker);
424 425
}

426
/**
427
 * Select next entity to process
428
*/
429 430
static struct amd_sched_entity *
amd_sched_select_entity(struct amd_gpu_scheduler *sched)
431
{
432
	struct amd_sched_entity *entity;
433
	int i;
434 435 436 437 438

	if (!amd_sched_ready(sched))
		return NULL;

	/* Kernel run queue has higher priority than normal run queue*/
439 440 441 442 443
	for (i = 0; i < AMD_SCHED_MAX_PRIORITY; i++) {
		entity = amd_sched_rq_select_entity(&sched->sched_rq[i]);
		if (entity)
			break;
	}
444

445
	return entity;
446 447
}

448 449
static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
{
450 451
	struct amd_sched_fence *s_fence =
		container_of(cb, struct amd_sched_fence, cb);
452
	struct amd_gpu_scheduler *sched = s_fence->sched;
453
	unsigned long flags;
454

455
	atomic_dec(&sched->hw_rq_count);
456 457 458 459

	/* remove job from ring_mirror_list */
	spin_lock_irqsave(&sched->job_list_lock, flags);
	list_del_init(&s_fence->s_job->node);
460
	sched->ops->finish_job(s_fence->s_job);
461 462
	spin_unlock_irqrestore(&sched->job_list_lock, flags);

463
	amd_sched_fence_signal(s_fence);
M
Monk Liu 已提交
464

465
	trace_amd_sched_process_job(s_fence);
466
	fence_put(&s_fence->base);
467
	wake_up_interruptible(&sched->wake_up_worker);
468 469
}

470 471 472 473
static int amd_sched_main(void *param)
{
	struct sched_param sparam = {.sched_priority = 1};
	struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param;
474
	int r, count;
475 476 477 478

	sched_setscheduler(current, SCHED_FIFO, &sparam);

	while (!kthread_should_stop()) {
479
		struct amd_sched_entity *entity;
480
		struct amd_sched_fence *s_fence;
481
		struct amd_sched_job *sched_job;
482 483
		struct fence *fence;

484
		wait_event_interruptible(sched->wake_up_worker,
485 486
			(entity = amd_sched_select_entity(sched)) ||
			kthread_should_stop());
487

488 489 490 491
		if (!entity)
			continue;

		sched_job = amd_sched_entity_pop_job(entity);
492
		if (!sched_job)
493 494
			continue;

495
		s_fence = sched_job->s_fence;
496

497
		atomic_inc(&sched->hw_rq_count);
498
		amd_sched_job_pre_schedule(sched, sched_job);
499
		fence = sched->ops->run_job(sched_job);
500
		amd_sched_fence_scheduled(s_fence);
501
		if (fence) {
502
			r = fence_add_callback(fence, &s_fence->cb,
503 504
					       amd_sched_process_job);
			if (r == -ENOENT)
505
				amd_sched_process_job(fence, &s_fence->cb);
506 507 508
			else if (r)
				DRM_ERROR("fence add callback failed (%d)\n", r);
			fence_put(fence);
509 510
		} else {
			DRM_ERROR("Failed to run job!\n");
511
			amd_sched_process_job(NULL, &s_fence->cb);
512
		}
513

514 515 516
		count = kfifo_out(&entity->job_queue, &sched_job,
				sizeof(sched_job));
		WARN_ON(count != sizeof(sched_job));
517
		wake_up(&sched->job_scheduled);
518 519 520 521 522
	}
	return 0;
}

/**
523
 * Init a gpu scheduler instance
524
 *
525
 * @sched		The pointer to the scheduler
526 527
 * @ops			The backend operations for this scheduler.
 * @hw_submissions	Number of hw submissions to do.
528
 * @name		Name used for debugging
529
 *
530
 * Return 0 on success, otherwise error code.
531
*/
532
int amd_sched_init(struct amd_gpu_scheduler *sched,
533
		   const struct amd_sched_backend_ops *ops,
534
		   unsigned hw_submission, long timeout, const char *name)
535
{
536
	int i;
537
	sched->ops = ops;
538
	sched->hw_submission_limit = hw_submission;
539
	sched->name = name;
540
	sched->timeout = timeout;
541 542
	for (i = 0; i < AMD_SCHED_MAX_PRIORITY; i++)
		amd_sched_rq_init(&sched->sched_rq[i]);
543

544 545
	init_waitqueue_head(&sched->wake_up_worker);
	init_waitqueue_head(&sched->job_scheduled);
546 547
	INIT_LIST_HEAD(&sched->ring_mirror_list);
	spin_lock_init(&sched->job_list_lock);
548
	atomic_set(&sched->hw_rq_count, 0);
549 550 551 552 553 554 555
	if (atomic_inc_return(&sched_fence_slab_ref) == 1) {
		sched_fence_slab = kmem_cache_create(
			"amd_sched_fence", sizeof(struct amd_sched_fence), 0,
			SLAB_HWCACHE_ALIGN, NULL);
		if (!sched_fence_slab)
			return -ENOMEM;
	}
556

557
	/* Each scheduler will run on a seperate kernel thread */
558
	sched->thread = kthread_run(amd_sched_main, sched, sched->name);
559
	if (IS_ERR(sched->thread)) {
560 561
		DRM_ERROR("Failed to create scheduler for %s.\n", name);
		return PTR_ERR(sched->thread);
562 563
	}

564
	return 0;
565 566 567 568 569 570 571
}

/**
 * Destroy a gpu scheduler
 *
 * @sched	The pointer to the scheduler
 */
572
void amd_sched_fini(struct amd_gpu_scheduler *sched)
573
{
574 575
	if (sched->thread)
		kthread_stop(sched->thread);
576 577
	if (atomic_dec_and_test(&sched_fence_slab_ref))
		kmem_cache_destroy(sched_fence_slab);
578
}