sched.c 14.1 KB
Newer Older
1 2 3 4 5
/* sched.c - SPU scheduler.
 *
 * Copyright (C) IBM 2005
 * Author: Mark Nutter <mnutter@us.ibm.com>
 *
6
 * 2006-03-31	NUMA domains added.
7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2, or (at your option)
 * any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 */

23 24
#undef DEBUG

25 26 27 28 29 30 31 32 33 34
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/completion.h>
#include <linux/vmalloc.h>
#include <linux/smp.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
35 36
#include <linux/numa.h>
#include <linux/mutex.h>
37
#include <linux/notifier.h>
38
#include <linux/kthread.h>
39 40 41 42 43

#include <asm/io.h>
#include <asm/mmu_context.h>
#include <asm/spu.h>
#include <asm/spu_csa.h>
44
#include <asm/spu_priv1.h>
45 46 47
#include "spufs.h"

struct spu_prio_array {
48
	DECLARE_BITMAP(bitmap, MAX_PRIO);
49 50
	struct list_head runq[MAX_PRIO];
	spinlock_t runq_lock;
51 52
	struct list_head active_list[MAX_NUMNODES];
	struct mutex active_mutex[MAX_NUMNODES];
53 54
};

55
static struct spu_prio_array *spu_prio;
56 57
static struct task_struct *spusched_task;
static struct timer_list spusched_timer;
58

59 60 61 62 63 64 65 66 67 68 69 70 71 72
/*
 * Priority of a normal, non-rt, non-niced'd process (aka nice level 0).
 */
#define NORMAL_PRIO		120

/*
 * Frequency of the spu scheduler tick.  By default we do one SPU scheduler
 * tick for every 10 CPU scheduler ticks.
 */
#define SPUSCHED_TICK		(10)

/*
 * These are the 'tuning knobs' of the scheduler:
 *
73 74
 * Minimum timeslice is 5 msecs (or 1 spu scheduler tick, whichever is
 * larger), default timeslice is 100 msecs, maximum timeslice is 800 msecs.
75
 */
76 77
#define MIN_SPU_TIMESLICE	max(5 * HZ / (1000 * SPUSCHED_TICK), 1)
#define DEF_SPU_TIMESLICE	(100 * HZ / (1000 * SPUSCHED_TICK))
78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98

#define MAX_USER_PRIO		(MAX_PRIO - MAX_RT_PRIO)
#define SCALE_PRIO(x, prio) \
	max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_SPU_TIMESLICE)

/*
 * scale user-nice values [ -20 ... 0 ... 19 ] to time slice values:
 * [800ms ... 100ms ... 5ms]
 *
 * The higher a thread's priority, the bigger timeslices
 * it gets during one round of execution. But even the lowest
 * priority thread gets MIN_TIMESLICE worth of execution time.
 */
void spu_set_timeslice(struct spu_context *ctx)
{
	if (ctx->prio < NORMAL_PRIO)
		ctx->time_slice = SCALE_PRIO(DEF_SPU_TIMESLICE * 4, ctx->prio);
	else
		ctx->time_slice = SCALE_PRIO(DEF_SPU_TIMESLICE, ctx->prio);
}

99
static inline int node_allowed(int node)
100
{
101
	cpumask_t mask;
102

103 104 105 106 107 108
	if (!nr_cpus_node(node))
		return 0;
	mask = node_to_cpumask(node);
	if (!cpus_intersects(mask, current->cpus_allowed))
		return 0;
	return 1;
109 110
}

111 112 113 114 115 116 117 118 119 120 121
/**
 * spu_add_to_active_list - add spu to active list
 * @spu:	spu to add to the active list
 */
static void spu_add_to_active_list(struct spu *spu)
{
	mutex_lock(&spu_prio->active_mutex[spu->node]);
	list_add_tail(&spu->list, &spu_prio->active_list[spu->node]);
	mutex_unlock(&spu_prio->active_mutex[spu->node]);
}

122 123 124 125 126
static void __spu_remove_from_active_list(struct spu *spu)
{
	list_del_init(&spu->list);
}

127 128 129 130
/**
 * spu_remove_from_active_list - remove spu from active list
 * @spu:       spu to remove from the active list
 */
131
static void spu_remove_from_active_list(struct spu *spu)
132 133 134 135
{
	int node = spu->node;

	mutex_lock(&spu_prio->active_mutex[node]);
136
	__spu_remove_from_active_list(spu);
137 138 139
	mutex_unlock(&spu_prio->active_mutex[node]);
}

140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157
static BLOCKING_NOTIFIER_HEAD(spu_switch_notifier);

static void spu_switch_notify(struct spu *spu, struct spu_context *ctx)
{
	blocking_notifier_call_chain(&spu_switch_notifier,
			    ctx ? ctx->object_id : 0, spu);
}

int spu_switch_event_register(struct notifier_block * n)
{
	return blocking_notifier_chain_register(&spu_switch_notifier, n);
}

int spu_switch_event_unregister(struct notifier_block * n)
{
	return blocking_notifier_chain_unregister(&spu_switch_notifier, n);
}

158 159 160 161 162 163
/**
 * spu_bind_context - bind spu context to physical spu
 * @spu:	physical spu to bind to
 * @ctx:	context to bind
 */
static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
164
{
165 166
	pr_debug("%s: pid=%d SPU=%d NODE=%d\n", __FUNCTION__, current->pid,
		 spu->number, spu->node);
167 168 169 170 171
	spu->ctx = ctx;
	spu->flags = 0;
	ctx->spu = spu;
	ctx->ops = &spu_hw_ops;
	spu->pid = current->pid;
172
	spu_associate_mm(spu, ctx->owner);
173 174
	spu->ibox_callback = spufs_ibox_callback;
	spu->wbox_callback = spufs_wbox_callback;
175
	spu->stop_callback = spufs_stop_callback;
176
	spu->mfc_callback = spufs_mfc_callback;
177
	spu->dma_callback = spufs_dma_callback;
178
	mb();
179
	spu_unmap_mappings(ctx);
180
	spu_restore(&ctx->csa, spu);
181
	spu->timestamp = jiffies;
182
	spu_cpu_affinity_set(spu, raw_smp_processor_id());
183
	spu_switch_notify(spu, ctx);
184
	ctx->state = SPU_STATE_RUNNABLE;
185 186
}

187 188 189 190 191
/**
 * spu_unbind_context - unbind spu context from physical spu
 * @spu:	physical spu to unbind from
 * @ctx:	context to unbind
 */
192
static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
193
{
194 195
	pr_debug("%s: unbind pid=%d SPU=%d NODE=%d\n", __FUNCTION__,
		 spu->pid, spu->number, spu->node);
196

197
	spu_switch_notify(spu, NULL);
198
	spu_unmap_mappings(ctx);
199
	spu_save(&ctx->csa, spu);
200
	spu->timestamp = jiffies;
201 202 203
	ctx->state = SPU_STATE_SAVED;
	spu->ibox_callback = NULL;
	spu->wbox_callback = NULL;
204
	spu->stop_callback = NULL;
205
	spu->mfc_callback = NULL;
206
	spu->dma_callback = NULL;
207
	spu_associate_mm(spu, NULL);
208 209 210
	spu->pid = 0;
	ctx->ops = &spu_backing_ops;
	ctx->spu = NULL;
211
	spu->flags = 0;
212 213 214
	spu->ctx = NULL;
}

215 216 217 218
/**
 * spu_add_to_rq - add a context to the runqueue
 * @ctx:       context to add
 */
219
static void __spu_add_to_rq(struct spu_context *ctx)
220
{
221 222 223 224
	int prio = ctx->prio;

	list_add_tail(&ctx->rq, &spu_prio->runq[prio]);
	set_bit(prio, spu_prio->bitmap);
225
}
226

227
static void __spu_del_from_rq(struct spu_context *ctx)
228
{
229 230
	int prio = ctx->prio;

231 232 233
	if (!list_empty(&ctx->rq))
		list_del_init(&ctx->rq);
	if (list_empty(&spu_prio->runq[prio]))
234
		clear_bit(prio, spu_prio->bitmap);
235
}
236

237
static void spu_prio_wait(struct spu_context *ctx)
238
{
239
	DEFINE_WAIT(wait);
240

241
	spin_lock(&spu_prio->runq_lock);
242
	prepare_to_wait_exclusive(&ctx->stop_wq, &wait, TASK_INTERRUPTIBLE);
243
	if (!signal_pending(current)) {
244 245
		__spu_add_to_rq(ctx);
		spin_unlock(&spu_prio->runq_lock);
246
		mutex_unlock(&ctx->state_mutex);
247
		schedule();
248
		mutex_lock(&ctx->state_mutex);
249 250
		spin_lock(&spu_prio->runq_lock);
		__spu_del_from_rq(ctx);
251
	}
252
	spin_unlock(&spu_prio->runq_lock);
253 254
	__set_current_state(TASK_RUNNING);
	remove_wait_queue(&ctx->stop_wq, &wait);
255 256
}

257
static struct spu *spu_get_idle(struct spu_context *ctx)
258 259 260 261 262 263 264 265 266 267 268 269 270 271 272
{
	struct spu *spu = NULL;
	int node = cpu_to_node(raw_smp_processor_id());
	int n;

	for (n = 0; n < MAX_NUMNODES; n++, node++) {
		node = (node < MAX_NUMNODES) ? node : 0;
		if (!node_allowed(node))
			continue;
		spu = spu_alloc_node(node);
		if (spu)
			break;
	}
	return spu;
}
273

274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303
/**
 * find_victim - find a lower priority context to preempt
 * @ctx:	canidate context for running
 *
 * Returns the freed physical spu to run the new context on.
 */
static struct spu *find_victim(struct spu_context *ctx)
{
	struct spu_context *victim = NULL;
	struct spu *spu;
	int node, n;

	/*
	 * Look for a possible preemption candidate on the local node first.
	 * If there is no candidate look at the other nodes.  This isn't
	 * exactly fair, but so far the whole spu schedule tries to keep
	 * a strong node affinity.  We might want to fine-tune this in
	 * the future.
	 */
 restart:
	node = cpu_to_node(raw_smp_processor_id());
	for (n = 0; n < MAX_NUMNODES; n++, node++) {
		node = (node < MAX_NUMNODES) ? node : 0;
		if (!node_allowed(node))
			continue;

		mutex_lock(&spu_prio->active_mutex[node]);
		list_for_each_entry(spu, &spu_prio->active_list[node], list) {
			struct spu_context *tmp = spu->ctx;

304 305
			if (tmp->prio > ctx->prio &&
			    (!victim || tmp->prio > victim->prio))
306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332
				victim = spu->ctx;
		}
		mutex_unlock(&spu_prio->active_mutex[node]);

		if (victim) {
			/*
			 * This nests ctx->state_mutex, but we always lock
			 * higher priority contexts before lower priority
			 * ones, so this is safe until we introduce
			 * priority inheritance schemes.
			 */
			if (!mutex_trylock(&victim->state_mutex)) {
				victim = NULL;
				goto restart;
			}

			spu = victim->spu;
			if (!spu) {
				/*
				 * This race can happen because we've dropped
				 * the active list mutex.  No a problem, just
				 * restart the search.
				 */
				mutex_unlock(&victim->state_mutex);
				victim = NULL;
				goto restart;
			}
333
			spu_remove_from_active_list(spu);
334 335
			spu_unbind_context(spu, victim);
			mutex_unlock(&victim->state_mutex);
336 337 338 339 340 341
			/*
			 * We need to break out of the wait loop in spu_run
			 * manually to ensure this context gets put on the
			 * runqueue again ASAP.
			 */
			wake_up(&victim->stop_wq);
342 343 344 345 346 347 348
			return spu;
		}
	}

	return NULL;
}

349 350 351 352 353
/**
 * spu_activate - find a free spu for a context and execute it
 * @ctx:	spu context to schedule
 * @flags:	flags (currently ignored)
 *
354
 * Tries to find a free spu to run @ctx.  If no free spu is available
355 356 357
 * add the context to the runqueue so it gets woken up once an spu
 * is available.
 */
358
int spu_activate(struct spu_context *ctx, unsigned long flags)
359 360
{

361 362 363 364 365 366 367
	if (ctx->spu)
		return 0;

	do {
		struct spu *spu;

		spu = spu_get_idle(ctx);
368 369 370 371
		/*
		 * If this is a realtime thread we try to get it running by
		 * preempting a lower priority thread.
		 */
372
		if (!spu && rt_prio(ctx->prio))
373
			spu = find_victim(ctx);
374
		if (spu) {
375
			spu_bind_context(spu, ctx);
376
			spu_add_to_active_list(spu);
377
			return 0;
378
		}
379

380
		spu_prio_wait(ctx);
381 382 383
	} while (!signal_pending(current));

	return -ERESTARTSYS;
384 385
}

386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419
/**
 * grab_runnable_context - try to find a runnable context
 *
 * Remove the highest priority context on the runqueue and return it
 * to the caller.  Returns %NULL if no runnable context was found.
 */
static struct spu_context *grab_runnable_context(int prio)
{
	struct spu_context *ctx = NULL;
	int best;

	spin_lock(&spu_prio->runq_lock);
	best = sched_find_first_bit(spu_prio->bitmap);
	if (best < prio) {
		struct list_head *rq = &spu_prio->runq[best];

		BUG_ON(list_empty(rq));

		ctx = list_entry(rq->next, struct spu_context, rq);
		__spu_del_from_rq(ctx);
	}
	spin_unlock(&spu_prio->runq_lock);

	return ctx;
}

static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio)
{
	struct spu *spu = ctx->spu;
	struct spu_context *new = NULL;

	if (spu) {
		new = grab_runnable_context(max_prio);
		if (new || force) {
420
			spu_remove_from_active_list(spu);
421 422 423 424 425 426 427 428 429 430 431
			spu_unbind_context(spu, ctx);
			spu_free(spu);
			if (new)
				wake_up(&new->stop_wq);
		}

	}

	return new != NULL;
}

432 433 434 435 436 437 438
/**
 * spu_deactivate - unbind a context from it's physical spu
 * @ctx:	spu context to unbind
 *
 * Unbind @ctx from the physical spu it is running on and schedule
 * the highest priority context to run on the freed physical spu.
 */
439 440
void spu_deactivate(struct spu_context *ctx)
{
441
	__spu_deactivate(ctx, 1, MAX_PRIO);
442 443
}

444 445 446 447 448 449 450 451
/**
 * spu_yield -  yield a physical spu if others are waiting
 * @ctx:	spu context to yield
 *
 * Check if there is a higher priority context waiting and if yes
 * unbind @ctx from the physical spu and schedule the highest
 * priority context to run on the freed physical spu instead.
 */
452 453
void spu_yield(struct spu_context *ctx)
{
454 455 456 457 458
	if (!(ctx->flags & SPU_CREATE_NOSCHED)) {
		mutex_lock(&ctx->state_mutex);
		__spu_deactivate(ctx, 0, MAX_PRIO);
		mutex_unlock(&ctx->state_mutex);
	}
459
}
460

461
static void spusched_tick(struct spu_context *ctx)
462
{
463
	if (ctx->policy == SCHED_FIFO || --ctx->time_slice)
464
		return;
465 466

	/*
467 468 469
	 * Unfortunately active_mutex ranks outside of state_mutex, so
	 * we have to trylock here.  If we fail give the context another
	 * tick and try again.
470
	 */
471 472 473 474
	if (mutex_trylock(&ctx->state_mutex)) {
		struct spu_context *new = grab_runnable_context(ctx->prio + 1);
		if (new) {
 			struct spu *spu = ctx->spu;
475

476 477 478 479 480 481 482 483 484 485 486
			__spu_remove_from_active_list(spu);
			spu_unbind_context(spu, ctx);
			spu_free(spu);
			wake_up(&new->stop_wq);
			/*
			 * We need to break out of the wait loop in
			 * spu_run manually to ensure this context
			 * gets put on the runqueue again ASAP.
			 */
			wake_up(&ctx->stop_wq);
		}
487
		spu_set_timeslice(ctx);
488
		mutex_unlock(&ctx->state_mutex);
489
	} else {
490
		ctx->time_slice++;
491 492 493
	}
}

494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524
static void spusched_wake(unsigned long data)
{
	mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);
	wake_up_process(spusched_task);
}

static int spusched_thread(void *unused)
{
	struct spu *spu, *next;
	int node;

	setup_timer(&spusched_timer, spusched_wake, 0);
	__mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);

	while (!kthread_should_stop()) {
		set_current_state(TASK_INTERRUPTIBLE);
		schedule();
		for (node = 0; node < MAX_NUMNODES; node++) {
			mutex_lock(&spu_prio->active_mutex[node]);
			list_for_each_entry_safe(spu, next,
						 &spu_prio->active_list[node],
						 list)
				spusched_tick(spu->ctx);
			mutex_unlock(&spu_prio->active_mutex[node]);
		}
	}

	del_timer_sync(&spusched_timer);
	return 0;
}

525 526 527 528
int __init spu_sched_init(void)
{
	int i;

529
	spu_prio = kzalloc(sizeof(struct spu_prio_array), GFP_KERNEL);
530 531 532
	if (!spu_prio)
		return -ENOMEM;

533
	for (i = 0; i < MAX_PRIO; i++) {
534
		INIT_LIST_HEAD(&spu_prio->runq[i]);
535
		__clear_bit(i, spu_prio->bitmap);
536
	}
537 538 539 540
	__set_bit(MAX_PRIO, spu_prio->bitmap);
	for (i = 0; i < MAX_NUMNODES; i++) {
		mutex_init(&spu_prio->active_mutex[i]);
		INIT_LIST_HEAD(&spu_prio->active_list[i]);
541
	}
542
	spin_lock_init(&spu_prio->runq_lock);
543 544 545 546 547 548

	spusched_task = kthread_run(spusched_thread, NULL, "spusched");
	if (IS_ERR(spusched_task)) {
		kfree(spu_prio);
		return PTR_ERR(spusched_task);
	}
549
	return 0;
550

551 552 553 554
}

void __exit spu_sched_exit(void)
{
555 556 557
	struct spu *spu, *tmp;
	int node;

558 559
	kthread_stop(spusched_task);

560 561 562 563 564 565 566 567
	for (node = 0; node < MAX_NUMNODES; node++) {
		mutex_lock(&spu_prio->active_mutex[node]);
		list_for_each_entry_safe(spu, tmp, &spu_prio->active_list[node],
					 list) {
			list_del_init(&spu->list);
			spu_free(spu);
		}
		mutex_unlock(&spu_prio->active_mutex[node]);
568
	}
569
	kfree(spu_prio);
570
}