sched.c 19.3 KB
Newer Older
1 2 3 4 5
/* sched.c - SPU scheduler.
 *
 * Copyright (C) IBM 2005
 * Author: Mark Nutter <mnutter@us.ibm.com>
 *
6
 * 2006-03-31	NUMA domains added.
7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2, or (at your option)
 * any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 */

23 24
#undef DEBUG

25 26 27 28 29 30 31 32 33 34
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/completion.h>
#include <linux/vmalloc.h>
#include <linux/smp.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
35 36
#include <linux/numa.h>
#include <linux/mutex.h>
37
#include <linux/notifier.h>
38
#include <linux/kthread.h>
39 40 41
#include <linux/pid_namespace.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
42 43 44 45 46

#include <asm/io.h>
#include <asm/mmu_context.h>
#include <asm/spu.h>
#include <asm/spu_csa.h>
47
#include <asm/spu_priv1.h>
48 49 50
#include "spufs.h"

struct spu_prio_array {
51
	DECLARE_BITMAP(bitmap, MAX_PRIO);
52 53
	struct list_head runq[MAX_PRIO];
	spinlock_t runq_lock;
54 55
	struct list_head active_list[MAX_NUMNODES];
	struct mutex active_mutex[MAX_NUMNODES];
56 57
	int nr_active[MAX_NUMNODES];
	int nr_waiting;
58 59
};

60
static unsigned long spu_avenrun[3];
61
static struct spu_prio_array *spu_prio;
62 63
static struct task_struct *spusched_task;
static struct timer_list spusched_timer;
64

65 66 67 68 69 70 71 72 73 74 75 76 77 78
/*
 * Priority of a normal, non-rt, non-niced'd process (aka nice level 0).
 */
#define NORMAL_PRIO		120

/*
 * Frequency of the spu scheduler tick.  By default we do one SPU scheduler
 * tick for every 10 CPU scheduler ticks.
 */
#define SPUSCHED_TICK		(10)

/*
 * These are the 'tuning knobs' of the scheduler:
 *
79 80
 * Minimum timeslice is 5 msecs (or 1 spu scheduler tick, whichever is
 * larger), default timeslice is 100 msecs, maximum timeslice is 800 msecs.
81
 */
82 83
#define MIN_SPU_TIMESLICE	max(5 * HZ / (1000 * SPUSCHED_TICK), 1)
#define DEF_SPU_TIMESLICE	(100 * HZ / (1000 * SPUSCHED_TICK))
84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104

#define MAX_USER_PRIO		(MAX_PRIO - MAX_RT_PRIO)
#define SCALE_PRIO(x, prio) \
	max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_SPU_TIMESLICE)

/*
 * scale user-nice values [ -20 ... 0 ... 19 ] to time slice values:
 * [800ms ... 100ms ... 5ms]
 *
 * The higher a thread's priority, the bigger timeslices
 * it gets during one round of execution. But even the lowest
 * priority thread gets MIN_TIMESLICE worth of execution time.
 */
void spu_set_timeslice(struct spu_context *ctx)
{
	if (ctx->prio < NORMAL_PRIO)
		ctx->time_slice = SCALE_PRIO(DEF_SPU_TIMESLICE * 4, ctx->prio);
	else
		ctx->time_slice = SCALE_PRIO(DEF_SPU_TIMESLICE, ctx->prio);
}

105 106 107 108 109
/*
 * Update scheduling information from the owning thread.
 */
void __spu_update_sched_info(struct spu_context *ctx)
{
110 111 112 113 114 115 116
	/*
	 * 32-Bit assignment are atomic on powerpc, and we don't care about
	 * memory ordering here because retriving the controlling thread is
	 * per defintion racy.
	 */
	ctx->tid = current->pid;

117 118 119 120 121 122 123 124 125 126 127
	/*
	 * We do our own priority calculations, so we normally want
	 * ->static_prio to start with. Unfortunately thies field
	 * contains junk for threads with a realtime scheduling
	 * policy so we have to look at ->prio in this case.
	 */
	if (rt_prio(current->prio))
		ctx->prio = current->prio;
	else
		ctx->prio = current->static_prio;
	ctx->policy = current->policy;
128 129 130 131 132 133 134 135 136 137

	/*
	 * A lot of places that don't hold active_mutex poke into
	 * cpus_allowed, including grab_runnable_context which
	 * already holds the runq_lock.  So abuse runq_lock
	 * to protect this field aswell.
	 */
	spin_lock(&spu_prio->runq_lock);
	ctx->cpus_allowed = current->cpus_allowed;
	spin_unlock(&spu_prio->runq_lock);
138 139 140 141 142 143 144 145 146 147 148
}

void spu_update_sched_info(struct spu_context *ctx)
{
	int node = ctx->spu->node;

	mutex_lock(&spu_prio->active_mutex[node]);
	__spu_update_sched_info(ctx);
	mutex_unlock(&spu_prio->active_mutex[node]);
}

149
static int __node_allowed(struct spu_context *ctx, int node)
150
{
151 152
	if (nr_cpus_node(node)) {
		cpumask_t mask = node_to_cpumask(node);
153

154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169
		if (cpus_intersects(mask, ctx->cpus_allowed))
			return 1;
	}

	return 0;
}

static int node_allowed(struct spu_context *ctx, int node)
{
	int rval;

	spin_lock(&spu_prio->runq_lock);
	rval = __node_allowed(ctx, node);
	spin_unlock(&spu_prio->runq_lock);

	return rval;
170 171
}

172 173 174 175 176 177
/**
 * spu_add_to_active_list - add spu to active list
 * @spu:	spu to add to the active list
 */
static void spu_add_to_active_list(struct spu *spu)
{
178 179 180 181 182 183
	int node = spu->node;

	mutex_lock(&spu_prio->active_mutex[node]);
	spu_prio->nr_active[node]++;
	list_add_tail(&spu->list, &spu_prio->active_list[node]);
	mutex_unlock(&spu_prio->active_mutex[node]);
184 185
}

186 187 188
static void __spu_remove_from_active_list(struct spu *spu)
{
	list_del_init(&spu->list);
189
	spu_prio->nr_active[spu->node]--;
190 191
}

192 193 194 195
/**
 * spu_remove_from_active_list - remove spu from active list
 * @spu:       spu to remove from the active list
 */
196
static void spu_remove_from_active_list(struct spu *spu)
197 198 199 200
{
	int node = spu->node;

	mutex_lock(&spu_prio->active_mutex[node]);
201
	__spu_remove_from_active_list(spu);
202 203 204
	mutex_unlock(&spu_prio->active_mutex[node]);
}

205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222
static BLOCKING_NOTIFIER_HEAD(spu_switch_notifier);

static void spu_switch_notify(struct spu *spu, struct spu_context *ctx)
{
	blocking_notifier_call_chain(&spu_switch_notifier,
			    ctx ? ctx->object_id : 0, spu);
}

int spu_switch_event_register(struct notifier_block * n)
{
	return blocking_notifier_chain_register(&spu_switch_notifier, n);
}

int spu_switch_event_unregister(struct notifier_block * n)
{
	return blocking_notifier_chain_unregister(&spu_switch_notifier, n);
}

223 224 225 226 227 228
/**
 * spu_bind_context - bind spu context to physical spu
 * @spu:	physical spu to bind to
 * @ctx:	context to bind
 */
static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
229
{
230 231
	pr_debug("%s: pid=%d SPU=%d NODE=%d\n", __FUNCTION__, current->pid,
		 spu->number, spu->node);
232 233 234 235

	ctx->stats.slb_flt_base = spu->stats.slb_flt;
	ctx->stats.class2_intr_base = spu->stats.class2_intr;

236 237 238 239 240
	spu->ctx = ctx;
	spu->flags = 0;
	ctx->spu = spu;
	ctx->ops = &spu_hw_ops;
	spu->pid = current->pid;
241
	spu_associate_mm(spu, ctx->owner);
242 243
	spu->ibox_callback = spufs_ibox_callback;
	spu->wbox_callback = spufs_wbox_callback;
244
	spu->stop_callback = spufs_stop_callback;
245
	spu->mfc_callback = spufs_mfc_callback;
246
	spu->dma_callback = spufs_dma_callback;
247
	mb();
248
	spu_unmap_mappings(ctx);
249
	spu_restore(&ctx->csa, spu);
250
	spu->timestamp = jiffies;
251
	spu_cpu_affinity_set(spu, raw_smp_processor_id());
252
	spu_switch_notify(spu, ctx);
253
	ctx->state = SPU_STATE_RUNNABLE;
254 255
}

256 257 258 259 260
/**
 * spu_unbind_context - unbind spu context from physical spu
 * @spu:	physical spu to unbind from
 * @ctx:	context to unbind
 */
261
static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
262
{
263 264
	pr_debug("%s: unbind pid=%d SPU=%d NODE=%d\n", __FUNCTION__,
		 spu->pid, spu->number, spu->node);
265

266
	spu_switch_notify(spu, NULL);
267
	spu_unmap_mappings(ctx);
268
	spu_save(&ctx->csa, spu);
269
	spu->timestamp = jiffies;
270 271 272
	ctx->state = SPU_STATE_SAVED;
	spu->ibox_callback = NULL;
	spu->wbox_callback = NULL;
273
	spu->stop_callback = NULL;
274
	spu->mfc_callback = NULL;
275
	spu->dma_callback = NULL;
276
	spu_associate_mm(spu, NULL);
277 278 279
	spu->pid = 0;
	ctx->ops = &spu_backing_ops;
	ctx->spu = NULL;
280
	spu->flags = 0;
281
	spu->ctx = NULL;
282 283 284 285 286

	ctx->stats.slb_flt +=
		(spu->stats.slb_flt - ctx->stats.slb_flt_base);
	ctx->stats.class2_intr +=
		(spu->stats.class2_intr - ctx->stats.class2_intr_base);
287 288
}

289 290 291 292
/**
 * spu_add_to_rq - add a context to the runqueue
 * @ctx:       context to add
 */
293
static void __spu_add_to_rq(struct spu_context *ctx)
294
{
295 296
	int prio = ctx->prio;

297
	spu_prio->nr_waiting++;
298 299
	list_add_tail(&ctx->rq, &spu_prio->runq[prio]);
	set_bit(prio, spu_prio->bitmap);
300
}
301

302
static void __spu_del_from_rq(struct spu_context *ctx)
303
{
304 305
	int prio = ctx->prio;

306
	if (!list_empty(&ctx->rq)) {
307
		list_del_init(&ctx->rq);
308 309
		spu_prio->nr_waiting--;
	}
310
	if (list_empty(&spu_prio->runq[prio]))
311
		clear_bit(prio, spu_prio->bitmap);
312
}
313

314
static void spu_prio_wait(struct spu_context *ctx)
315
{
316
	DEFINE_WAIT(wait);
317

318
	spin_lock(&spu_prio->runq_lock);
319
	prepare_to_wait_exclusive(&ctx->stop_wq, &wait, TASK_INTERRUPTIBLE);
320
	if (!signal_pending(current)) {
321 322
		__spu_add_to_rq(ctx);
		spin_unlock(&spu_prio->runq_lock);
323
		mutex_unlock(&ctx->state_mutex);
324
		schedule();
325
		mutex_lock(&ctx->state_mutex);
326 327
		spin_lock(&spu_prio->runq_lock);
		__spu_del_from_rq(ctx);
328
	}
329
	spin_unlock(&spu_prio->runq_lock);
330 331
	__set_current_state(TASK_RUNNING);
	remove_wait_queue(&ctx->stop_wq, &wait);
332 333
}

334
static struct spu *spu_get_idle(struct spu_context *ctx)
335 336 337 338 339 340 341
{
	struct spu *spu = NULL;
	int node = cpu_to_node(raw_smp_processor_id());
	int n;

	for (n = 0; n < MAX_NUMNODES; n++, node++) {
		node = (node < MAX_NUMNODES) ? node : 0;
342
		if (!node_allowed(ctx, node))
343 344 345 346 347 348 349
			continue;
		spu = spu_alloc_node(node);
		if (spu)
			break;
	}
	return spu;
}
350

351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373
/**
 * find_victim - find a lower priority context to preempt
 * @ctx:	canidate context for running
 *
 * Returns the freed physical spu to run the new context on.
 */
static struct spu *find_victim(struct spu_context *ctx)
{
	struct spu_context *victim = NULL;
	struct spu *spu;
	int node, n;

	/*
	 * Look for a possible preemption candidate on the local node first.
	 * If there is no candidate look at the other nodes.  This isn't
	 * exactly fair, but so far the whole spu schedule tries to keep
	 * a strong node affinity.  We might want to fine-tune this in
	 * the future.
	 */
 restart:
	node = cpu_to_node(raw_smp_processor_id());
	for (n = 0; n < MAX_NUMNODES; n++, node++) {
		node = (node < MAX_NUMNODES) ? node : 0;
374
		if (!node_allowed(ctx, node))
375 376 377 378 379 380
			continue;

		mutex_lock(&spu_prio->active_mutex[node]);
		list_for_each_entry(spu, &spu_prio->active_list[node], list) {
			struct spu_context *tmp = spu->ctx;

381 382
			if (tmp->prio > ctx->prio &&
			    (!victim || tmp->prio > victim->prio))
383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409
				victim = spu->ctx;
		}
		mutex_unlock(&spu_prio->active_mutex[node]);

		if (victim) {
			/*
			 * This nests ctx->state_mutex, but we always lock
			 * higher priority contexts before lower priority
			 * ones, so this is safe until we introduce
			 * priority inheritance schemes.
			 */
			if (!mutex_trylock(&victim->state_mutex)) {
				victim = NULL;
				goto restart;
			}

			spu = victim->spu;
			if (!spu) {
				/*
				 * This race can happen because we've dropped
				 * the active list mutex.  No a problem, just
				 * restart the search.
				 */
				mutex_unlock(&victim->state_mutex);
				victim = NULL;
				goto restart;
			}
410
			spu_remove_from_active_list(spu);
411
			spu_unbind_context(spu, victim);
412
			victim->stats.invol_ctx_switch++;
413
			mutex_unlock(&victim->state_mutex);
414 415 416 417 418 419
			/*
			 * We need to break out of the wait loop in spu_run
			 * manually to ensure this context gets put on the
			 * runqueue again ASAP.
			 */
			wake_up(&victim->stop_wq);
420 421 422 423 424 425 426
			return spu;
		}
	}

	return NULL;
}

427 428 429 430 431
/**
 * spu_activate - find a free spu for a context and execute it
 * @ctx:	spu context to schedule
 * @flags:	flags (currently ignored)
 *
432
 * Tries to find a free spu to run @ctx.  If no free spu is available
433 434 435
 * add the context to the runqueue so it gets woken up once an spu
 * is available.
 */
436
int spu_activate(struct spu_context *ctx, unsigned long flags)
437
{
438
	spuctx_switch_state(ctx, SPUCTX_UTIL_SYSTEM);
439

440 441 442 443 444 445 446
	if (ctx->spu)
		return 0;

	do {
		struct spu *spu;

		spu = spu_get_idle(ctx);
447 448 449 450
		/*
		 * If this is a realtime thread we try to get it running by
		 * preempting a lower priority thread.
		 */
451
		if (!spu && rt_prio(ctx->prio))
452
			spu = find_victim(ctx);
453
		if (spu) {
454
			spu_bind_context(spu, ctx);
455
			spu_add_to_active_list(spu);
456
			return 0;
457
		}
458

459
		spu_prio_wait(ctx);
460 461 462
	} while (!signal_pending(current));

	return -ERESTARTSYS;
463 464
}

465 466 467 468 469 470
/**
 * grab_runnable_context - try to find a runnable context
 *
 * Remove the highest priority context on the runqueue and return it
 * to the caller.  Returns %NULL if no runnable context was found.
 */
471
static struct spu_context *grab_runnable_context(int prio, int node)
472
{
473
	struct spu_context *ctx;
474 475 476 477
	int best;

	spin_lock(&spu_prio->runq_lock);
	best = sched_find_first_bit(spu_prio->bitmap);
478
	while (best < prio) {
479 480
		struct list_head *rq = &spu_prio->runq[best];

481 482 483 484 485 486 487 488
		list_for_each_entry(ctx, rq, rq) {
			/* XXX(hch): check for affinity here aswell */
			if (__node_allowed(ctx, node)) {
				__spu_del_from_rq(ctx);
				goto found;
			}
		}
		best++;
489
	}
490 491
	ctx = NULL;
 found:
492 493 494 495 496 497 498 499 500 501
	spin_unlock(&spu_prio->runq_lock);
	return ctx;
}

static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio)
{
	struct spu *spu = ctx->spu;
	struct spu_context *new = NULL;

	if (spu) {
502
		new = grab_runnable_context(max_prio, spu->node);
503
		if (new || force) {
504
			spu_remove_from_active_list(spu);
505
			spu_unbind_context(spu, ctx);
506
			ctx->stats.vol_ctx_switch++;
507 508 509 510 511 512 513 514 515 516
			spu_free(spu);
			if (new)
				wake_up(&new->stop_wq);
		}

	}

	return new != NULL;
}

517 518 519 520 521 522 523
/**
 * spu_deactivate - unbind a context from it's physical spu
 * @ctx:	spu context to unbind
 *
 * Unbind @ctx from the physical spu it is running on and schedule
 * the highest priority context to run on the freed physical spu.
 */
524 525
void spu_deactivate(struct spu_context *ctx)
{
526 527 528 529 530 531 532 533 534
	/*
	 * We must never reach this for a nosched context,
	 * but handle the case gracefull instead of panicing.
	 */
	if (ctx->flags & SPU_CREATE_NOSCHED) {
		WARN_ON(1);
		return;
	}

535
	__spu_deactivate(ctx, 1, MAX_PRIO);
536
	spuctx_switch_state(ctx, SPUCTX_UTIL_USER);
537 538
}

539 540 541 542 543 544 545 546
/**
 * spu_yield -  yield a physical spu if others are waiting
 * @ctx:	spu context to yield
 *
 * Check if there is a higher priority context waiting and if yes
 * unbind @ctx from the physical spu and schedule the highest
 * priority context to run on the freed physical spu instead.
 */
547 548
void spu_yield(struct spu_context *ctx)
{
549 550
	if (!(ctx->flags & SPU_CREATE_NOSCHED)) {
		mutex_lock(&ctx->state_mutex);
551 552 553 554
		if (__spu_deactivate(ctx, 0, MAX_PRIO))
			spuctx_switch_state(ctx, SPUCTX_UTIL_USER);
		else
			spuctx_switch_state(ctx, SPUCTX_UTIL_LOADED);
555 556
		mutex_unlock(&ctx->state_mutex);
	}
557
}
558

559
static void spusched_tick(struct spu_context *ctx)
560
{
561 562 563 564 565 566
	if (ctx->flags & SPU_CREATE_NOSCHED)
		return;
	if (ctx->policy == SCHED_FIFO)
		return;

	if (--ctx->time_slice)
567
		return;
568 569

	/*
570 571 572
	 * Unfortunately active_mutex ranks outside of state_mutex, so
	 * we have to trylock here.  If we fail give the context another
	 * tick and try again.
573
	 */
574
	if (mutex_trylock(&ctx->state_mutex)) {
575
		struct spu *spu = ctx->spu;
576 577 578
		struct spu_context *new;

		new = grab_runnable_context(ctx->prio + 1, spu->node);
579
		if (new) {
580

581 582
			__spu_remove_from_active_list(spu);
			spu_unbind_context(spu, ctx);
583
			ctx->stats.invol_ctx_switch++;
584 585 586 587 588 589 590 591 592
			spu_free(spu);
			wake_up(&new->stop_wq);
			/*
			 * We need to break out of the wait loop in
			 * spu_run manually to ensure this context
			 * gets put on the runqueue again ASAP.
			 */
			wake_up(&ctx->stop_wq);
		}
593
		spu_set_timeslice(ctx);
594
		mutex_unlock(&ctx->state_mutex);
595
	} else {
596
		ctx->time_slice++;
597 598 599
	}
}

600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644
/**
 * count_active_contexts - count nr of active tasks
 *
 * Return the number of tasks currently running or waiting to run.
 *
 * Note that we don't take runq_lock / active_mutex here.  Reading
 * a single 32bit value is atomic on powerpc, and we don't care
 * about memory ordering issues here.
 */
static unsigned long count_active_contexts(void)
{
	int nr_active = 0, node;

	for (node = 0; node < MAX_NUMNODES; node++)
		nr_active += spu_prio->nr_active[node];
	nr_active += spu_prio->nr_waiting;

	return nr_active;
}

/**
 * spu_calc_load - given tick count, update the avenrun load estimates.
 * @tick:	tick count
 *
 * No locking against reading these values from userspace, as for
 * the CPU loadavg code.
 */
static void spu_calc_load(unsigned long ticks)
{
	unsigned long active_tasks; /* fixed-point */
	static int count = LOAD_FREQ;

	count -= ticks;

	if (unlikely(count < 0)) {
		active_tasks = count_active_contexts() * FIXED_1;
		do {
			CALC_LOAD(spu_avenrun[0], EXP_1, active_tasks);
			CALC_LOAD(spu_avenrun[1], EXP_5, active_tasks);
			CALC_LOAD(spu_avenrun[2], EXP_15, active_tasks);
			count += LOAD_FREQ;
		} while (count < 0);
	}
}

645 646 647 648
static void spusched_wake(unsigned long data)
{
	mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);
	wake_up_process(spusched_task);
649
	spu_calc_load(SPUSCHED_TICK);
650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676
}

static int spusched_thread(void *unused)
{
	struct spu *spu, *next;
	int node;

	setup_timer(&spusched_timer, spusched_wake, 0);
	__mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);

	while (!kthread_should_stop()) {
		set_current_state(TASK_INTERRUPTIBLE);
		schedule();
		for (node = 0; node < MAX_NUMNODES; node++) {
			mutex_lock(&spu_prio->active_mutex[node]);
			list_for_each_entry_safe(spu, next,
						 &spu_prio->active_list[node],
						 list)
				spusched_tick(spu->ctx);
			mutex_unlock(&spu_prio->active_mutex[node]);
		}
	}

	del_timer_sync(&spusched_timer);
	return 0;
}

677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714
#define LOAD_INT(x) ((x) >> FSHIFT)
#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)

static int show_spu_loadavg(struct seq_file *s, void *private)
{
	int a, b, c;

	a = spu_avenrun[0] + (FIXED_1/200);
	b = spu_avenrun[1] + (FIXED_1/200);
	c = spu_avenrun[2] + (FIXED_1/200);

	/*
	 * Note that last_pid doesn't really make much sense for the
	 * SPU loadavg (it even seems very odd on the CPU side..),
	 * but we include it here to have a 100% compatible interface.
	 */
	seq_printf(s, "%d.%02d %d.%02d %d.%02d %ld/%d %d\n",
		LOAD_INT(a), LOAD_FRAC(a),
		LOAD_INT(b), LOAD_FRAC(b),
		LOAD_INT(c), LOAD_FRAC(c),
		count_active_contexts(),
		atomic_read(&nr_spu_contexts),
		current->nsproxy->pid_ns->last_pid);
	return 0;
}

static int spu_loadavg_open(struct inode *inode, struct file *file)
{
	return single_open(file, show_spu_loadavg, NULL);
}

static const struct file_operations spu_loadavg_fops = {
	.open		= spu_loadavg_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
	.release	= single_release,
};

715 716
int __init spu_sched_init(void)
{
717 718
	struct proc_dir_entry *entry;
	int err = -ENOMEM, i;
719

720
	spu_prio = kzalloc(sizeof(struct spu_prio_array), GFP_KERNEL);
721
	if (!spu_prio)
722
		goto out;
723

724
	for (i = 0; i < MAX_PRIO; i++) {
725
		INIT_LIST_HEAD(&spu_prio->runq[i]);
726
		__clear_bit(i, spu_prio->bitmap);
727
	}
728 729 730 731
	__set_bit(MAX_PRIO, spu_prio->bitmap);
	for (i = 0; i < MAX_NUMNODES; i++) {
		mutex_init(&spu_prio->active_mutex[i]);
		INIT_LIST_HEAD(&spu_prio->active_list[i]);
732
	}
733
	spin_lock_init(&spu_prio->runq_lock);
734 735 736

	spusched_task = kthread_run(spusched_thread, NULL, "spusched");
	if (IS_ERR(spusched_task)) {
737 738
		err = PTR_ERR(spusched_task);
		goto out_free_spu_prio;
739
	}
740

741 742 743 744 745
	entry = create_proc_entry("spu_loadavg", 0, NULL);
	if (!entry)
		goto out_stop_kthread;
	entry->proc_fops = &spu_loadavg_fops;

746 747
	pr_debug("spusched: tick: %d, min ticks: %d, default ticks: %d\n",
			SPUSCHED_TICK, MIN_SPU_TIMESLICE, DEF_SPU_TIMESLICE);
748
	return 0;
749

750 751 752 753 754 755
 out_stop_kthread:
	kthread_stop(spusched_task);
 out_free_spu_prio:
	kfree(spu_prio);
 out:
	return err;
756 757 758 759
}

void __exit spu_sched_exit(void)
{
760 761 762
	struct spu *spu, *tmp;
	int node;

763 764
	remove_proc_entry("spu_loadavg", NULL);

765 766
	kthread_stop(spusched_task);

767 768 769 770 771 772 773 774
	for (node = 0; node < MAX_NUMNODES; node++) {
		mutex_lock(&spu_prio->active_mutex[node]);
		list_for_each_entry_safe(spu, tmp, &spu_prio->active_list[node],
					 list) {
			list_del_init(&spu->list);
			spu_free(spu);
		}
		mutex_unlock(&spu_prio->active_mutex[node]);
775
	}
776
	kfree(spu_prio);
777
}