sched.c 20.1 KB
Newer Older
1 2 3 4 5
/* sched.c - SPU scheduler.
 *
 * Copyright (C) IBM 2005
 * Author: Mark Nutter <mnutter@us.ibm.com>
 *
6
 * 2006-03-31	NUMA domains added.
7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2, or (at your option)
 * any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 */

23 24
#undef DEBUG

25 26 27 28 29 30 31 32 33 34
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/completion.h>
#include <linux/vmalloc.h>
#include <linux/smp.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
35 36
#include <linux/numa.h>
#include <linux/mutex.h>
37
#include <linux/notifier.h>
38
#include <linux/kthread.h>
39 40 41
#include <linux/pid_namespace.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
42 43 44 45 46

#include <asm/io.h>
#include <asm/mmu_context.h>
#include <asm/spu.h>
#include <asm/spu_csa.h>
47
#include <asm/spu_priv1.h>
48 49 50
#include "spufs.h"

struct spu_prio_array {
51
	DECLARE_BITMAP(bitmap, MAX_PRIO);
52 53
	struct list_head runq[MAX_PRIO];
	spinlock_t runq_lock;
54 55
	struct list_head active_list[MAX_NUMNODES];
	struct mutex active_mutex[MAX_NUMNODES];
56 57
	int nr_active[MAX_NUMNODES];
	int nr_waiting;
58 59
};

60
static unsigned long spu_avenrun[3];
61
static struct spu_prio_array *spu_prio;
62 63
static struct task_struct *spusched_task;
static struct timer_list spusched_timer;
64

65 66 67 68 69 70 71 72 73 74 75 76 77 78
/*
 * Priority of a normal, non-rt, non-niced'd process (aka nice level 0).
 */
#define NORMAL_PRIO		120

/*
 * Frequency of the spu scheduler tick.  By default we do one SPU scheduler
 * tick for every 10 CPU scheduler ticks.
 */
#define SPUSCHED_TICK		(10)

/*
 * These are the 'tuning knobs' of the scheduler:
 *
79 80
 * Minimum timeslice is 5 msecs (or 1 spu scheduler tick, whichever is
 * larger), default timeslice is 100 msecs, maximum timeslice is 800 msecs.
81
 */
82 83
#define MIN_SPU_TIMESLICE	max(5 * HZ / (1000 * SPUSCHED_TICK), 1)
#define DEF_SPU_TIMESLICE	(100 * HZ / (1000 * SPUSCHED_TICK))
84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104

#define MAX_USER_PRIO		(MAX_PRIO - MAX_RT_PRIO)
#define SCALE_PRIO(x, prio) \
	max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_SPU_TIMESLICE)

/*
 * scale user-nice values [ -20 ... 0 ... 19 ] to time slice values:
 * [800ms ... 100ms ... 5ms]
 *
 * The higher a thread's priority, the bigger timeslices
 * it gets during one round of execution. But even the lowest
 * priority thread gets MIN_TIMESLICE worth of execution time.
 */
void spu_set_timeslice(struct spu_context *ctx)
{
	if (ctx->prio < NORMAL_PRIO)
		ctx->time_slice = SCALE_PRIO(DEF_SPU_TIMESLICE * 4, ctx->prio);
	else
		ctx->time_slice = SCALE_PRIO(DEF_SPU_TIMESLICE, ctx->prio);
}

105 106 107 108 109
/*
 * Update scheduling information from the owning thread.
 */
void __spu_update_sched_info(struct spu_context *ctx)
{
110 111 112 113 114 115 116
	/*
	 * 32-Bit assignment are atomic on powerpc, and we don't care about
	 * memory ordering here because retriving the controlling thread is
	 * per defintion racy.
	 */
	ctx->tid = current->pid;

117 118 119 120 121 122 123 124 125 126 127
	/*
	 * We do our own priority calculations, so we normally want
	 * ->static_prio to start with. Unfortunately thies field
	 * contains junk for threads with a realtime scheduling
	 * policy so we have to look at ->prio in this case.
	 */
	if (rt_prio(current->prio))
		ctx->prio = current->prio;
	else
		ctx->prio = current->static_prio;
	ctx->policy = current->policy;
128 129 130 131 132 133 134 135 136 137

	/*
	 * A lot of places that don't hold active_mutex poke into
	 * cpus_allowed, including grab_runnable_context which
	 * already holds the runq_lock.  So abuse runq_lock
	 * to protect this field aswell.
	 */
	spin_lock(&spu_prio->runq_lock);
	ctx->cpus_allowed = current->cpus_allowed;
	spin_unlock(&spu_prio->runq_lock);
138 139 140 141 142 143 144 145 146 147 148
}

void spu_update_sched_info(struct spu_context *ctx)
{
	int node = ctx->spu->node;

	mutex_lock(&spu_prio->active_mutex[node]);
	__spu_update_sched_info(ctx);
	mutex_unlock(&spu_prio->active_mutex[node]);
}

149
static int __node_allowed(struct spu_context *ctx, int node)
150
{
151 152
	if (nr_cpus_node(node)) {
		cpumask_t mask = node_to_cpumask(node);
153

154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169
		if (cpus_intersects(mask, ctx->cpus_allowed))
			return 1;
	}

	return 0;
}

static int node_allowed(struct spu_context *ctx, int node)
{
	int rval;

	spin_lock(&spu_prio->runq_lock);
	rval = __node_allowed(ctx, node);
	spin_unlock(&spu_prio->runq_lock);

	return rval;
170 171
}

172 173 174 175 176 177
/**
 * spu_add_to_active_list - add spu to active list
 * @spu:	spu to add to the active list
 */
static void spu_add_to_active_list(struct spu *spu)
{
178 179 180 181 182 183
	int node = spu->node;

	mutex_lock(&spu_prio->active_mutex[node]);
	spu_prio->nr_active[node]++;
	list_add_tail(&spu->list, &spu_prio->active_list[node]);
	mutex_unlock(&spu_prio->active_mutex[node]);
184 185
}

186 187 188
static void __spu_remove_from_active_list(struct spu *spu)
{
	list_del_init(&spu->list);
189
	spu_prio->nr_active[spu->node]--;
190 191
}

192 193 194 195
/**
 * spu_remove_from_active_list - remove spu from active list
 * @spu:       spu to remove from the active list
 */
196
static void spu_remove_from_active_list(struct spu *spu)
197 198 199 200
{
	int node = spu->node;

	mutex_lock(&spu_prio->active_mutex[node]);
201
	__spu_remove_from_active_list(spu);
202 203 204
	mutex_unlock(&spu_prio->active_mutex[node]);
}

205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222
static BLOCKING_NOTIFIER_HEAD(spu_switch_notifier);

static void spu_switch_notify(struct spu *spu, struct spu_context *ctx)
{
	blocking_notifier_call_chain(&spu_switch_notifier,
			    ctx ? ctx->object_id : 0, spu);
}

int spu_switch_event_register(struct notifier_block * n)
{
	return blocking_notifier_chain_register(&spu_switch_notifier, n);
}

int spu_switch_event_unregister(struct notifier_block * n)
{
	return blocking_notifier_chain_unregister(&spu_switch_notifier, n);
}

223 224 225 226 227 228
/**
 * spu_bind_context - bind spu context to physical spu
 * @spu:	physical spu to bind to
 * @ctx:	context to bind
 */
static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
229
{
230 231
	pr_debug("%s: pid=%d SPU=%d NODE=%d\n", __FUNCTION__, current->pid,
		 spu->number, spu->node);
232
	spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
233 234 235 236

	ctx->stats.slb_flt_base = spu->stats.slb_flt;
	ctx->stats.class2_intr_base = spu->stats.class2_intr;

237 238 239 240 241
	spu->ctx = ctx;
	spu->flags = 0;
	ctx->spu = spu;
	ctx->ops = &spu_hw_ops;
	spu->pid = current->pid;
242
	spu_associate_mm(spu, ctx->owner);
243 244
	spu->ibox_callback = spufs_ibox_callback;
	spu->wbox_callback = spufs_wbox_callback;
245
	spu->stop_callback = spufs_stop_callback;
246
	spu->mfc_callback = spufs_mfc_callback;
247
	spu->dma_callback = spufs_dma_callback;
248
	mb();
249
	spu_unmap_mappings(ctx);
250
	spu_restore(&ctx->csa, spu);
251
	spu->timestamp = jiffies;
252
	spu_cpu_affinity_set(spu, raw_smp_processor_id());
253
	spu_switch_notify(spu, ctx);
254
	ctx->state = SPU_STATE_RUNNABLE;
255 256

	spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED);
257 258
}

259 260 261 262 263
/**
 * spu_unbind_context - unbind spu context from physical spu
 * @spu:	physical spu to unbind from
 * @ctx:	context to unbind
 */
264
static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
265
{
266 267
	pr_debug("%s: unbind pid=%d SPU=%d NODE=%d\n", __FUNCTION__,
		 spu->pid, spu->number, spu->node);
268
	spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
269

270
	spu_switch_notify(spu, NULL);
271
	spu_unmap_mappings(ctx);
272
	spu_save(&ctx->csa, spu);
273
	spu->timestamp = jiffies;
274 275 276
	ctx->state = SPU_STATE_SAVED;
	spu->ibox_callback = NULL;
	spu->wbox_callback = NULL;
277
	spu->stop_callback = NULL;
278
	spu->mfc_callback = NULL;
279
	spu->dma_callback = NULL;
280
	spu_associate_mm(spu, NULL);
281 282
	spu->pid = 0;
	ctx->ops = &spu_backing_ops;
283
	spu->flags = 0;
284
	spu->ctx = NULL;
285 286 287 288 289

	ctx->stats.slb_flt +=
		(spu->stats.slb_flt - ctx->stats.slb_flt_base);
	ctx->stats.class2_intr +=
		(spu->stats.class2_intr - ctx->stats.class2_intr_base);
290 291 292 293

	/* This maps the underlying spu state to idle */
	spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED);
	ctx->spu = NULL;
294 295
}

296 297 298 299
/**
 * spu_add_to_rq - add a context to the runqueue
 * @ctx:       context to add
 */
300
static void __spu_add_to_rq(struct spu_context *ctx)
301
{
302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320
	/*
	 * Unfortunately this code path can be called from multiple threads
	 * on behalf of a single context due to the way the problem state
	 * mmap support works.
	 *
	 * Fortunately we need to wake up all these threads at the same time
	 * and can simply skip the runqueue addition for every but the first
	 * thread getting into this codepath.
	 *
	 * It's still quite hacky, and long-term we should proxy all other
	 * threads through the owner thread so that spu_run is in control
	 * of all the scheduling activity for a given context.
	 */
	if (list_empty(&ctx->rq)) {
		list_add_tail(&ctx->rq, &spu_prio->runq[ctx->prio]);
		set_bit(ctx->prio, spu_prio->bitmap);
		if (!spu_prio->nr_waiting++)
			__mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);
	}
321
}
322

323
static void __spu_del_from_rq(struct spu_context *ctx)
324
{
325 326
	int prio = ctx->prio;

327
	if (!list_empty(&ctx->rq)) {
328 329
		if (!--spu_prio->nr_waiting)
			del_timer(&spusched_timer);
330
		list_del_init(&ctx->rq);
331 332 333

		if (list_empty(&spu_prio->runq[prio]))
			clear_bit(prio, spu_prio->bitmap);
334
	}
335
}
336

337
static void spu_prio_wait(struct spu_context *ctx)
338
{
339
	DEFINE_WAIT(wait);
340

341
	spin_lock(&spu_prio->runq_lock);
342
	prepare_to_wait_exclusive(&ctx->stop_wq, &wait, TASK_INTERRUPTIBLE);
343
	if (!signal_pending(current)) {
344 345
		__spu_add_to_rq(ctx);
		spin_unlock(&spu_prio->runq_lock);
346
		mutex_unlock(&ctx->state_mutex);
347
		schedule();
348
		mutex_lock(&ctx->state_mutex);
349 350
		spin_lock(&spu_prio->runq_lock);
		__spu_del_from_rq(ctx);
351
	}
352
	spin_unlock(&spu_prio->runq_lock);
353 354
	__set_current_state(TASK_RUNNING);
	remove_wait_queue(&ctx->stop_wq, &wait);
355 356
}

357
static struct spu *spu_get_idle(struct spu_context *ctx)
358 359 360 361 362 363 364
{
	struct spu *spu = NULL;
	int node = cpu_to_node(raw_smp_processor_id());
	int n;

	for (n = 0; n < MAX_NUMNODES; n++, node++) {
		node = (node < MAX_NUMNODES) ? node : 0;
365
		if (!node_allowed(ctx, node))
366 367 368 369 370 371 372
			continue;
		spu = spu_alloc_node(node);
		if (spu)
			break;
	}
	return spu;
}
373

374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396
/**
 * find_victim - find a lower priority context to preempt
 * @ctx:	canidate context for running
 *
 * Returns the freed physical spu to run the new context on.
 */
static struct spu *find_victim(struct spu_context *ctx)
{
	struct spu_context *victim = NULL;
	struct spu *spu;
	int node, n;

	/*
	 * Look for a possible preemption candidate on the local node first.
	 * If there is no candidate look at the other nodes.  This isn't
	 * exactly fair, but so far the whole spu schedule tries to keep
	 * a strong node affinity.  We might want to fine-tune this in
	 * the future.
	 */
 restart:
	node = cpu_to_node(raw_smp_processor_id());
	for (n = 0; n < MAX_NUMNODES; n++, node++) {
		node = (node < MAX_NUMNODES) ? node : 0;
397
		if (!node_allowed(ctx, node))
398 399 400 401 402 403
			continue;

		mutex_lock(&spu_prio->active_mutex[node]);
		list_for_each_entry(spu, &spu_prio->active_list[node], list) {
			struct spu_context *tmp = spu->ctx;

404 405
			if (tmp->prio > ctx->prio &&
			    (!victim || tmp->prio > victim->prio))
406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432
				victim = spu->ctx;
		}
		mutex_unlock(&spu_prio->active_mutex[node]);

		if (victim) {
			/*
			 * This nests ctx->state_mutex, but we always lock
			 * higher priority contexts before lower priority
			 * ones, so this is safe until we introduce
			 * priority inheritance schemes.
			 */
			if (!mutex_trylock(&victim->state_mutex)) {
				victim = NULL;
				goto restart;
			}

			spu = victim->spu;
			if (!spu) {
				/*
				 * This race can happen because we've dropped
				 * the active list mutex.  No a problem, just
				 * restart the search.
				 */
				mutex_unlock(&victim->state_mutex);
				victim = NULL;
				goto restart;
			}
433
			spu_remove_from_active_list(spu);
434
			spu_unbind_context(spu, victim);
435
			victim->stats.invol_ctx_switch++;
436
			spu->stats.invol_ctx_switch++;
437
			mutex_unlock(&victim->state_mutex);
438 439 440 441 442 443
			/*
			 * We need to break out of the wait loop in spu_run
			 * manually to ensure this context gets put on the
			 * runqueue again ASAP.
			 */
			wake_up(&victim->stop_wq);
444 445 446 447 448 449 450
			return spu;
		}
	}

	return NULL;
}

451 452 453 454 455
/**
 * spu_activate - find a free spu for a context and execute it
 * @ctx:	spu context to schedule
 * @flags:	flags (currently ignored)
 *
456
 * Tries to find a free spu to run @ctx.  If no free spu is available
457 458 459
 * add the context to the runqueue so it gets woken up once an spu
 * is available.
 */
460
int spu_activate(struct spu_context *ctx, unsigned long flags)
461
{
462 463 464
	do {
		struct spu *spu;

465 466 467 468 469 470 471 472 473
		/*
		 * If there are multiple threads waiting for a single context
		 * only one actually binds the context while the others will
		 * only be able to acquire the state_mutex once the context
		 * already is in runnable state.
		 */
		if (ctx->spu)
			return 0;

474
		spu = spu_get_idle(ctx);
475 476 477 478
		/*
		 * If this is a realtime thread we try to get it running by
		 * preempting a lower priority thread.
		 */
479
		if (!spu && rt_prio(ctx->prio))
480
			spu = find_victim(ctx);
481
		if (spu) {
482
			spu_bind_context(spu, ctx);
483
			spu_add_to_active_list(spu);
484
			return 0;
485
		}
486

487
		spu_prio_wait(ctx);
488 489 490
	} while (!signal_pending(current));

	return -ERESTARTSYS;
491 492
}

493 494 495 496 497 498
/**
 * grab_runnable_context - try to find a runnable context
 *
 * Remove the highest priority context on the runqueue and return it
 * to the caller.  Returns %NULL if no runnable context was found.
 */
499
static struct spu_context *grab_runnable_context(int prio, int node)
500
{
501
	struct spu_context *ctx;
502 503 504
	int best;

	spin_lock(&spu_prio->runq_lock);
505
	best = find_first_bit(spu_prio->bitmap, prio);
506
	while (best < prio) {
507 508
		struct list_head *rq = &spu_prio->runq[best];

509 510 511 512 513 514 515 516
		list_for_each_entry(ctx, rq, rq) {
			/* XXX(hch): check for affinity here aswell */
			if (__node_allowed(ctx, node)) {
				__spu_del_from_rq(ctx);
				goto found;
			}
		}
		best++;
517
	}
518 519
	ctx = NULL;
 found:
520 521 522 523 524 525 526 527 528 529
	spin_unlock(&spu_prio->runq_lock);
	return ctx;
}

static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio)
{
	struct spu *spu = ctx->spu;
	struct spu_context *new = NULL;

	if (spu) {
530
		new = grab_runnable_context(max_prio, spu->node);
531
		if (new || force) {
532
			spu_remove_from_active_list(spu);
533
			spu_unbind_context(spu, ctx);
534
			ctx->stats.vol_ctx_switch++;
535
			spu->stats.vol_ctx_switch++;
536 537 538 539 540 541 542 543 544 545
			spu_free(spu);
			if (new)
				wake_up(&new->stop_wq);
		}

	}

	return new != NULL;
}

546 547 548 549 550 551 552
/**
 * spu_deactivate - unbind a context from it's physical spu
 * @ctx:	spu context to unbind
 *
 * Unbind @ctx from the physical spu it is running on and schedule
 * the highest priority context to run on the freed physical spu.
 */
553 554
void spu_deactivate(struct spu_context *ctx)
{
555
	__spu_deactivate(ctx, 1, MAX_PRIO);
556 557
}

558 559 560 561 562 563 564 565
/**
 * spu_yield -  yield a physical spu if others are waiting
 * @ctx:	spu context to yield
 *
 * Check if there is a higher priority context waiting and if yes
 * unbind @ctx from the physical spu and schedule the highest
 * priority context to run on the freed physical spu instead.
 */
566 567
void spu_yield(struct spu_context *ctx)
{
568 569
	if (!(ctx->flags & SPU_CREATE_NOSCHED)) {
		mutex_lock(&ctx->state_mutex);
570
		__spu_deactivate(ctx, 0, MAX_PRIO);
571 572
		mutex_unlock(&ctx->state_mutex);
	}
573
}
574

575
static void spusched_tick(struct spu_context *ctx)
576
{
577 578 579 580 581 582
	if (ctx->flags & SPU_CREATE_NOSCHED)
		return;
	if (ctx->policy == SCHED_FIFO)
		return;

	if (--ctx->time_slice)
583
		return;
584 585

	/*
586 587 588
	 * Unfortunately active_mutex ranks outside of state_mutex, so
	 * we have to trylock here.  If we fail give the context another
	 * tick and try again.
589
	 */
590
	if (mutex_trylock(&ctx->state_mutex)) {
591
		struct spu *spu = ctx->spu;
592 593 594
		struct spu_context *new;

		new = grab_runnable_context(ctx->prio + 1, spu->node);
595
		if (new) {
596

597 598
			__spu_remove_from_active_list(spu);
			spu_unbind_context(spu, ctx);
599
			ctx->stats.invol_ctx_switch++;
600
			spu->stats.invol_ctx_switch++;
601 602 603 604 605 606 607 608 609
			spu_free(spu);
			wake_up(&new->stop_wq);
			/*
			 * We need to break out of the wait loop in
			 * spu_run manually to ensure this context
			 * gets put on the runqueue again ASAP.
			 */
			wake_up(&ctx->stop_wq);
		}
610
		spu_set_timeslice(ctx);
611
		mutex_unlock(&ctx->state_mutex);
612
	} else {
613
		ctx->time_slice++;
614 615 616
	}
}

617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661
/**
 * count_active_contexts - count nr of active tasks
 *
 * Return the number of tasks currently running or waiting to run.
 *
 * Note that we don't take runq_lock / active_mutex here.  Reading
 * a single 32bit value is atomic on powerpc, and we don't care
 * about memory ordering issues here.
 */
static unsigned long count_active_contexts(void)
{
	int nr_active = 0, node;

	for (node = 0; node < MAX_NUMNODES; node++)
		nr_active += spu_prio->nr_active[node];
	nr_active += spu_prio->nr_waiting;

	return nr_active;
}

/**
 * spu_calc_load - given tick count, update the avenrun load estimates.
 * @tick:	tick count
 *
 * No locking against reading these values from userspace, as for
 * the CPU loadavg code.
 */
static void spu_calc_load(unsigned long ticks)
{
	unsigned long active_tasks; /* fixed-point */
	static int count = LOAD_FREQ;

	count -= ticks;

	if (unlikely(count < 0)) {
		active_tasks = count_active_contexts() * FIXED_1;
		do {
			CALC_LOAD(spu_avenrun[0], EXP_1, active_tasks);
			CALC_LOAD(spu_avenrun[1], EXP_5, active_tasks);
			CALC_LOAD(spu_avenrun[2], EXP_15, active_tasks);
			count += LOAD_FREQ;
		} while (count < 0);
	}
}

662 663 664 665
static void spusched_wake(unsigned long data)
{
	mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);
	wake_up_process(spusched_task);
666
	spu_calc_load(SPUSCHED_TICK);
667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689
}

static int spusched_thread(void *unused)
{
	struct spu *spu, *next;
	int node;

	while (!kthread_should_stop()) {
		set_current_state(TASK_INTERRUPTIBLE);
		schedule();
		for (node = 0; node < MAX_NUMNODES; node++) {
			mutex_lock(&spu_prio->active_mutex[node]);
			list_for_each_entry_safe(spu, next,
						 &spu_prio->active_list[node],
						 list)
				spusched_tick(spu->ctx);
			mutex_unlock(&spu_prio->active_mutex[node]);
		}
	}

	return 0;
}

690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727
#define LOAD_INT(x) ((x) >> FSHIFT)
#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)

static int show_spu_loadavg(struct seq_file *s, void *private)
{
	int a, b, c;

	a = spu_avenrun[0] + (FIXED_1/200);
	b = spu_avenrun[1] + (FIXED_1/200);
	c = spu_avenrun[2] + (FIXED_1/200);

	/*
	 * Note that last_pid doesn't really make much sense for the
	 * SPU loadavg (it even seems very odd on the CPU side..),
	 * but we include it here to have a 100% compatible interface.
	 */
	seq_printf(s, "%d.%02d %d.%02d %d.%02d %ld/%d %d\n",
		LOAD_INT(a), LOAD_FRAC(a),
		LOAD_INT(b), LOAD_FRAC(b),
		LOAD_INT(c), LOAD_FRAC(c),
		count_active_contexts(),
		atomic_read(&nr_spu_contexts),
		current->nsproxy->pid_ns->last_pid);
	return 0;
}

static int spu_loadavg_open(struct inode *inode, struct file *file)
{
	return single_open(file, show_spu_loadavg, NULL);
}

static const struct file_operations spu_loadavg_fops = {
	.open		= spu_loadavg_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
	.release	= single_release,
};

728 729
int __init spu_sched_init(void)
{
730 731
	struct proc_dir_entry *entry;
	int err = -ENOMEM, i;
732

733
	spu_prio = kzalloc(sizeof(struct spu_prio_array), GFP_KERNEL);
734
	if (!spu_prio)
735
		goto out;
736

737
	for (i = 0; i < MAX_PRIO; i++) {
738
		INIT_LIST_HEAD(&spu_prio->runq[i]);
739
		__clear_bit(i, spu_prio->bitmap);
740
	}
741 742 743
	for (i = 0; i < MAX_NUMNODES; i++) {
		mutex_init(&spu_prio->active_mutex[i]);
		INIT_LIST_HEAD(&spu_prio->active_list[i]);
744
	}
745
	spin_lock_init(&spu_prio->runq_lock);
746

747 748
	setup_timer(&spusched_timer, spusched_wake, 0);

749 750
	spusched_task = kthread_run(spusched_thread, NULL, "spusched");
	if (IS_ERR(spusched_task)) {
751 752
		err = PTR_ERR(spusched_task);
		goto out_free_spu_prio;
753
	}
754

755 756 757 758 759
	entry = create_proc_entry("spu_loadavg", 0, NULL);
	if (!entry)
		goto out_stop_kthread;
	entry->proc_fops = &spu_loadavg_fops;

760 761
	pr_debug("spusched: tick: %d, min ticks: %d, default ticks: %d\n",
			SPUSCHED_TICK, MIN_SPU_TIMESLICE, DEF_SPU_TIMESLICE);
762
	return 0;
763

764 765 766 767 768 769
 out_stop_kthread:
	kthread_stop(spusched_task);
 out_free_spu_prio:
	kfree(spu_prio);
 out:
	return err;
770 771
}

772
void spu_sched_exit(void)
773
{
774 775 776
	struct spu *spu, *tmp;
	int node;

777 778
	remove_proc_entry("spu_loadavg", NULL);

779
	del_timer_sync(&spusched_timer);
780 781
	kthread_stop(spusched_task);

782 783 784 785 786 787 788 789
	for (node = 0; node < MAX_NUMNODES; node++) {
		mutex_lock(&spu_prio->active_mutex[node]);
		list_for_each_entry_safe(spu, tmp, &spu_prio->active_list[node],
					 list) {
			list_del_init(&spu->list);
			spu_free(spu);
		}
		mutex_unlock(&spu_prio->active_mutex[node]);
790
	}
791
	kfree(spu_prio);
792
}