sched.c 24.4 KB
Newer Older
1 2 3 4 5
/* sched.c - SPU scheduler.
 *
 * Copyright (C) IBM 2005
 * Author: Mark Nutter <mnutter@us.ibm.com>
 *
6
 * 2006-03-31	NUMA domains added.
7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2, or (at your option)
 * any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 */

23 24
#undef DEBUG

25 26 27 28 29 30 31 32 33 34
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/completion.h>
#include <linux/vmalloc.h>
#include <linux/smp.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
35 36
#include <linux/numa.h>
#include <linux/mutex.h>
37
#include <linux/notifier.h>
38
#include <linux/kthread.h>
39 40 41
#include <linux/pid_namespace.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
42 43 44 45 46

#include <asm/io.h>
#include <asm/mmu_context.h>
#include <asm/spu.h>
#include <asm/spu_csa.h>
47
#include <asm/spu_priv1.h>
48 49 50
#include "spufs.h"

struct spu_prio_array {
51
	DECLARE_BITMAP(bitmap, MAX_PRIO);
52 53
	struct list_head runq[MAX_PRIO];
	spinlock_t runq_lock;
54 55
	struct list_head active_list[MAX_NUMNODES];
	struct mutex active_mutex[MAX_NUMNODES];
56 57
	int nr_active[MAX_NUMNODES];
	int nr_waiting;
58 59
};

60
static unsigned long spu_avenrun[3];
61
static struct spu_prio_array *spu_prio;
62 63
static struct task_struct *spusched_task;
static struct timer_list spusched_timer;
64

65 66 67 68 69 70 71 72 73 74 75 76 77 78
/*
 * Priority of a normal, non-rt, non-niced'd process (aka nice level 0).
 */
#define NORMAL_PRIO		120

/*
 * Frequency of the spu scheduler tick.  By default we do one SPU scheduler
 * tick for every 10 CPU scheduler ticks.
 */
#define SPUSCHED_TICK		(10)

/*
 * These are the 'tuning knobs' of the scheduler:
 *
79 80
 * Minimum timeslice is 5 msecs (or 1 spu scheduler tick, whichever is
 * larger), default timeslice is 100 msecs, maximum timeslice is 800 msecs.
81
 */
82 83
#define MIN_SPU_TIMESLICE	max(5 * HZ / (1000 * SPUSCHED_TICK), 1)
#define DEF_SPU_TIMESLICE	(100 * HZ / (1000 * SPUSCHED_TICK))
84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104

#define MAX_USER_PRIO		(MAX_PRIO - MAX_RT_PRIO)
#define SCALE_PRIO(x, prio) \
	max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_SPU_TIMESLICE)

/*
 * scale user-nice values [ -20 ... 0 ... 19 ] to time slice values:
 * [800ms ... 100ms ... 5ms]
 *
 * The higher a thread's priority, the bigger timeslices
 * it gets during one round of execution. But even the lowest
 * priority thread gets MIN_TIMESLICE worth of execution time.
 */
void spu_set_timeslice(struct spu_context *ctx)
{
	if (ctx->prio < NORMAL_PRIO)
		ctx->time_slice = SCALE_PRIO(DEF_SPU_TIMESLICE * 4, ctx->prio);
	else
		ctx->time_slice = SCALE_PRIO(DEF_SPU_TIMESLICE, ctx->prio);
}

105 106 107 108 109
/*
 * Update scheduling information from the owning thread.
 */
void __spu_update_sched_info(struct spu_context *ctx)
{
110 111 112 113 114 115 116
	/*
	 * 32-Bit assignment are atomic on powerpc, and we don't care about
	 * memory ordering here because retriving the controlling thread is
	 * per defintion racy.
	 */
	ctx->tid = current->pid;

117 118 119 120 121 122 123 124 125 126 127
	/*
	 * We do our own priority calculations, so we normally want
	 * ->static_prio to start with. Unfortunately thies field
	 * contains junk for threads with a realtime scheduling
	 * policy so we have to look at ->prio in this case.
	 */
	if (rt_prio(current->prio))
		ctx->prio = current->prio;
	else
		ctx->prio = current->static_prio;
	ctx->policy = current->policy;
128 129 130 131 132 133 134 135 136 137

	/*
	 * A lot of places that don't hold active_mutex poke into
	 * cpus_allowed, including grab_runnable_context which
	 * already holds the runq_lock.  So abuse runq_lock
	 * to protect this field aswell.
	 */
	spin_lock(&spu_prio->runq_lock);
	ctx->cpus_allowed = current->cpus_allowed;
	spin_unlock(&spu_prio->runq_lock);
138 139 140 141 142 143 144 145 146 147 148
}

void spu_update_sched_info(struct spu_context *ctx)
{
	int node = ctx->spu->node;

	mutex_lock(&spu_prio->active_mutex[node]);
	__spu_update_sched_info(ctx);
	mutex_unlock(&spu_prio->active_mutex[node]);
}

149
static int __node_allowed(struct spu_context *ctx, int node)
150
{
151 152
	if (nr_cpus_node(node)) {
		cpumask_t mask = node_to_cpumask(node);
153

154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169
		if (cpus_intersects(mask, ctx->cpus_allowed))
			return 1;
	}

	return 0;
}

static int node_allowed(struct spu_context *ctx, int node)
{
	int rval;

	spin_lock(&spu_prio->runq_lock);
	rval = __node_allowed(ctx, node);
	spin_unlock(&spu_prio->runq_lock);

	return rval;
170 171
}

172 173 174 175 176 177
/**
 * spu_add_to_active_list - add spu to active list
 * @spu:	spu to add to the active list
 */
static void spu_add_to_active_list(struct spu *spu)
{
178 179 180 181 182 183
	int node = spu->node;

	mutex_lock(&spu_prio->active_mutex[node]);
	spu_prio->nr_active[node]++;
	list_add_tail(&spu->list, &spu_prio->active_list[node]);
	mutex_unlock(&spu_prio->active_mutex[node]);
184 185
}

186 187 188
static void __spu_remove_from_active_list(struct spu *spu)
{
	list_del_init(&spu->list);
189
	spu_prio->nr_active[spu->node]--;
190 191
}

192 193 194 195
/**
 * spu_remove_from_active_list - remove spu from active list
 * @spu:       spu to remove from the active list
 */
196
static void spu_remove_from_active_list(struct spu *spu)
197 198 199 200
{
	int node = spu->node;

	mutex_lock(&spu_prio->active_mutex[node]);
201
	__spu_remove_from_active_list(spu);
202 203 204
	mutex_unlock(&spu_prio->active_mutex[node]);
}

205 206
static BLOCKING_NOTIFIER_HEAD(spu_switch_notifier);

207
void spu_switch_notify(struct spu *spu, struct spu_context *ctx)
208 209 210 211 212
{
	blocking_notifier_call_chain(&spu_switch_notifier,
			    ctx ? ctx->object_id : 0, spu);
}

213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236
static void notify_spus_active(void)
{
	int node;

	/*
	 * Wake up the active spu_contexts.
	 *
	 * When the awakened processes see their "notify_active" flag is set,
	 * they will call spu_switch_notify();
	 */
	for_each_online_node(node) {
		struct spu *spu;
		mutex_lock(&spu_prio->active_mutex[node]);
		list_for_each_entry(spu, &spu_prio->active_list[node], list) {
			struct spu_context *ctx = spu->ctx;
			set_bit(SPU_SCHED_NOTIFY_ACTIVE, &ctx->sched_flags);
			mb();	/* make sure any tasks woken up below */
				/* can see the bit(s) set above */
			wake_up_all(&ctx->stop_wq);
		}
		mutex_unlock(&spu_prio->active_mutex[node]);
	}
}

237 238
int spu_switch_event_register(struct notifier_block * n)
{
239 240 241 242 243
	int ret;
	ret = blocking_notifier_chain_register(&spu_switch_notifier, n);
	if (!ret)
		notify_spus_active();
	return ret;
244
}
245
EXPORT_SYMBOL_GPL(spu_switch_event_register);
246 247 248 249 250

int spu_switch_event_unregister(struct notifier_block * n)
{
	return blocking_notifier_chain_unregister(&spu_switch_notifier, n);
}
251
EXPORT_SYMBOL_GPL(spu_switch_event_unregister);
252

253 254 255 256 257 258
/**
 * spu_bind_context - bind spu context to physical spu
 * @spu:	physical spu to bind to
 * @ctx:	context to bind
 */
static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
259
{
260 261
	pr_debug("%s: pid=%d SPU=%d NODE=%d\n", __FUNCTION__, current->pid,
		 spu->number, spu->node);
262
	spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
263

264 265
	if (ctx->flags & SPU_CREATE_NOSCHED)
		atomic_inc(&cbe_spu_info[spu->node].reserved_spus);
266 267
	if (!list_empty(&ctx->aff_list))
		atomic_inc(&ctx->gang->aff_sched_count);
268

269 270 271
	ctx->stats.slb_flt_base = spu->stats.slb_flt;
	ctx->stats.class2_intr_base = spu->stats.class2_intr;

272 273 274 275 276
	spu->ctx = ctx;
	spu->flags = 0;
	ctx->spu = spu;
	ctx->ops = &spu_hw_ops;
	spu->pid = current->pid;
277
	spu->tgid = current->tgid;
278
	spu_associate_mm(spu, ctx->owner);
279 280
	spu->ibox_callback = spufs_ibox_callback;
	spu->wbox_callback = spufs_wbox_callback;
281
	spu->stop_callback = spufs_stop_callback;
282
	spu->mfc_callback = spufs_mfc_callback;
283
	spu->dma_callback = spufs_dma_callback;
284
	mb();
285
	spu_unmap_mappings(ctx);
286
	spu_restore(&ctx->csa, spu);
287
	spu->timestamp = jiffies;
288
	spu_cpu_affinity_set(spu, raw_smp_processor_id());
289
	spu_switch_notify(spu, ctx);
290
	ctx->state = SPU_STATE_RUNNABLE;
291 292

	spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED);
293 294
}

295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431
/*
 * XXX(hch): needs locking.
 */
static inline int sched_spu(struct spu *spu)
{
	return (!spu->ctx || !(spu->ctx->flags & SPU_CREATE_NOSCHED));
}

static void aff_merge_remaining_ctxs(struct spu_gang *gang)
{
	struct spu_context *ctx;

	list_for_each_entry(ctx, &gang->aff_list_head, aff_list) {
		if (list_empty(&ctx->aff_list))
			list_add(&ctx->aff_list, &gang->aff_list_head);
	}
	gang->aff_flags |= AFF_MERGED;
}

static void aff_set_offsets(struct spu_gang *gang)
{
	struct spu_context *ctx;
	int offset;

	offset = -1;
	list_for_each_entry_reverse(ctx, &gang->aff_ref_ctx->aff_list,
								aff_list) {
		if (&ctx->aff_list == &gang->aff_list_head)
			break;
		ctx->aff_offset = offset--;
	}

	offset = 0;
	list_for_each_entry(ctx, gang->aff_ref_ctx->aff_list.prev, aff_list) {
		if (&ctx->aff_list == &gang->aff_list_head)
			break;
		ctx->aff_offset = offset++;
	}

	gang->aff_flags |= AFF_OFFSETS_SET;
}

static struct spu *aff_ref_location(struct spu_context *ctx, int mem_aff,
		 int group_size, int lowest_offset)
{
	struct spu *spu;
	int node, n;

	/*
	 * TODO: A better algorithm could be used to find a good spu to be
	 *       used as reference location for the ctxs chain.
	 */
	node = cpu_to_node(raw_smp_processor_id());
	for (n = 0; n < MAX_NUMNODES; n++, node++) {
		node = (node < MAX_NUMNODES) ? node : 0;
		if (!node_allowed(ctx, node))
			continue;
		list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
			if ((!mem_aff || spu->has_mem_affinity) &&
							sched_spu(spu))
				return spu;
		}
	}
	return NULL;
}

static void aff_set_ref_point_location(struct spu_gang *gang)
{
	int mem_aff, gs, lowest_offset;
	struct spu_context *ctx;
	struct spu *tmp;

	mem_aff = gang->aff_ref_ctx->flags & SPU_CREATE_AFFINITY_MEM;
	lowest_offset = 0;
	gs = 0;

	list_for_each_entry(tmp, &gang->aff_list_head, aff_list)
		gs++;

	list_for_each_entry_reverse(ctx, &gang->aff_ref_ctx->aff_list,
								aff_list) {
		if (&ctx->aff_list == &gang->aff_list_head)
			break;
		lowest_offset = ctx->aff_offset;
	}

	gang->aff_ref_spu = aff_ref_location(ctx, mem_aff, gs, lowest_offset);
}

static struct spu *ctx_location(struct spu *ref, int offset)
{
	struct spu *spu;

	spu = NULL;
	if (offset >= 0) {
		list_for_each_entry(spu, ref->aff_list.prev, aff_list) {
			if (offset == 0)
				break;
			if (sched_spu(spu))
				offset--;
		}
	} else {
		list_for_each_entry_reverse(spu, ref->aff_list.next, aff_list) {
			if (offset == 0)
				break;
			if (sched_spu(spu))
				offset++;
		}
	}
	return spu;
}

/*
 * affinity_check is called each time a context is going to be scheduled.
 * It returns the spu ptr on which the context must run.
 */
struct spu *affinity_check(struct spu_context *ctx)
{
	struct spu_gang *gang;

	if (list_empty(&ctx->aff_list))
		return NULL;
	gang = ctx->gang;
	mutex_lock(&gang->aff_mutex);
	if (!gang->aff_ref_spu) {
		if (!(gang->aff_flags & AFF_MERGED))
			aff_merge_remaining_ctxs(gang);
		if (!(gang->aff_flags & AFF_OFFSETS_SET))
			aff_set_offsets(gang);
		aff_set_ref_point_location(gang);
	}
	mutex_unlock(&gang->aff_mutex);
	if (!gang->aff_ref_spu)
		return NULL;
	return ctx_location(gang->aff_ref_spu, ctx->aff_offset);
}

432 433 434 435 436
/**
 * spu_unbind_context - unbind spu context from physical spu
 * @spu:	physical spu to unbind from
 * @ctx:	context to unbind
 */
437
static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
438
{
439 440
	pr_debug("%s: unbind pid=%d SPU=%d NODE=%d\n", __FUNCTION__,
		 spu->pid, spu->number, spu->node);
441
	spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
442

443 444
 	if (spu->ctx->flags & SPU_CREATE_NOSCHED)
		atomic_dec(&cbe_spu_info[spu->node].reserved_spus);
445 446 447
 	if (!list_empty(&ctx->aff_list))
 		if (atomic_dec_and_test(&ctx->gang->aff_sched_count))
 			ctx->gang->aff_ref_spu = NULL;
448
	spu_switch_notify(spu, NULL);
449
	spu_unmap_mappings(ctx);
450
	spu_save(&ctx->csa, spu);
451
	spu->timestamp = jiffies;
452 453 454
	ctx->state = SPU_STATE_SAVED;
	spu->ibox_callback = NULL;
	spu->wbox_callback = NULL;
455
	spu->stop_callback = NULL;
456
	spu->mfc_callback = NULL;
457
	spu->dma_callback = NULL;
458
	spu_associate_mm(spu, NULL);
459
	spu->pid = 0;
460
	spu->tgid = 0;
461
	ctx->ops = &spu_backing_ops;
462
	spu->flags = 0;
463
	spu->ctx = NULL;
464 465 466 467 468

	ctx->stats.slb_flt +=
		(spu->stats.slb_flt - ctx->stats.slb_flt_base);
	ctx->stats.class2_intr +=
		(spu->stats.class2_intr - ctx->stats.class2_intr_base);
469 470 471 472

	/* This maps the underlying spu state to idle */
	spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED);
	ctx->spu = NULL;
473 474
}

475 476 477 478
/**
 * spu_add_to_rq - add a context to the runqueue
 * @ctx:       context to add
 */
479
static void __spu_add_to_rq(struct spu_context *ctx)
480
{
481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499
	/*
	 * Unfortunately this code path can be called from multiple threads
	 * on behalf of a single context due to the way the problem state
	 * mmap support works.
	 *
	 * Fortunately we need to wake up all these threads at the same time
	 * and can simply skip the runqueue addition for every but the first
	 * thread getting into this codepath.
	 *
	 * It's still quite hacky, and long-term we should proxy all other
	 * threads through the owner thread so that spu_run is in control
	 * of all the scheduling activity for a given context.
	 */
	if (list_empty(&ctx->rq)) {
		list_add_tail(&ctx->rq, &spu_prio->runq[ctx->prio]);
		set_bit(ctx->prio, spu_prio->bitmap);
		if (!spu_prio->nr_waiting++)
			__mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);
	}
500
}
501

502
static void __spu_del_from_rq(struct spu_context *ctx)
503
{
504 505
	int prio = ctx->prio;

506
	if (!list_empty(&ctx->rq)) {
507 508
		if (!--spu_prio->nr_waiting)
			del_timer(&spusched_timer);
509
		list_del_init(&ctx->rq);
510 511 512

		if (list_empty(&spu_prio->runq[prio]))
			clear_bit(prio, spu_prio->bitmap);
513
	}
514
}
515

516
static void spu_prio_wait(struct spu_context *ctx)
517
{
518
	DEFINE_WAIT(wait);
519

520
	spin_lock(&spu_prio->runq_lock);
521
	prepare_to_wait_exclusive(&ctx->stop_wq, &wait, TASK_INTERRUPTIBLE);
522
	if (!signal_pending(current)) {
523 524
		__spu_add_to_rq(ctx);
		spin_unlock(&spu_prio->runq_lock);
525
		mutex_unlock(&ctx->state_mutex);
526
		schedule();
527
		mutex_lock(&ctx->state_mutex);
528 529
		spin_lock(&spu_prio->runq_lock);
		__spu_del_from_rq(ctx);
530
	}
531
	spin_unlock(&spu_prio->runq_lock);
532 533
	__set_current_state(TASK_RUNNING);
	remove_wait_queue(&ctx->stop_wq, &wait);
534 535
}

536
static struct spu *spu_get_idle(struct spu_context *ctx)
537 538 539 540 541
{
	struct spu *spu = NULL;
	int node = cpu_to_node(raw_smp_processor_id());
	int n;

542 543 544 545
	spu = affinity_check(ctx);
	if (spu)
		return spu_alloc_spu(spu);

546 547
	for (n = 0; n < MAX_NUMNODES; n++, node++) {
		node = (node < MAX_NUMNODES) ? node : 0;
548
		if (!node_allowed(ctx, node))
549 550 551 552 553 554 555
			continue;
		spu = spu_alloc_node(node);
		if (spu)
			break;
	}
	return spu;
}
556

557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579
/**
 * find_victim - find a lower priority context to preempt
 * @ctx:	canidate context for running
 *
 * Returns the freed physical spu to run the new context on.
 */
static struct spu *find_victim(struct spu_context *ctx)
{
	struct spu_context *victim = NULL;
	struct spu *spu;
	int node, n;

	/*
	 * Look for a possible preemption candidate on the local node first.
	 * If there is no candidate look at the other nodes.  This isn't
	 * exactly fair, but so far the whole spu schedule tries to keep
	 * a strong node affinity.  We might want to fine-tune this in
	 * the future.
	 */
 restart:
	node = cpu_to_node(raw_smp_processor_id());
	for (n = 0; n < MAX_NUMNODES; n++, node++) {
		node = (node < MAX_NUMNODES) ? node : 0;
580
		if (!node_allowed(ctx, node))
581 582 583 584 585 586
			continue;

		mutex_lock(&spu_prio->active_mutex[node]);
		list_for_each_entry(spu, &spu_prio->active_list[node], list) {
			struct spu_context *tmp = spu->ctx;

587 588
			if (tmp->prio > ctx->prio &&
			    (!victim || tmp->prio > victim->prio))
589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615
				victim = spu->ctx;
		}
		mutex_unlock(&spu_prio->active_mutex[node]);

		if (victim) {
			/*
			 * This nests ctx->state_mutex, but we always lock
			 * higher priority contexts before lower priority
			 * ones, so this is safe until we introduce
			 * priority inheritance schemes.
			 */
			if (!mutex_trylock(&victim->state_mutex)) {
				victim = NULL;
				goto restart;
			}

			spu = victim->spu;
			if (!spu) {
				/*
				 * This race can happen because we've dropped
				 * the active list mutex.  No a problem, just
				 * restart the search.
				 */
				mutex_unlock(&victim->state_mutex);
				victim = NULL;
				goto restart;
			}
616
			spu_remove_from_active_list(spu);
617
			spu_unbind_context(spu, victim);
618
			victim->stats.invol_ctx_switch++;
619
			spu->stats.invol_ctx_switch++;
620
			mutex_unlock(&victim->state_mutex);
621 622 623 624 625 626
			/*
			 * We need to break out of the wait loop in spu_run
			 * manually to ensure this context gets put on the
			 * runqueue again ASAP.
			 */
			wake_up(&victim->stop_wq);
627 628 629 630 631 632 633
			return spu;
		}
	}

	return NULL;
}

634 635 636 637 638
/**
 * spu_activate - find a free spu for a context and execute it
 * @ctx:	spu context to schedule
 * @flags:	flags (currently ignored)
 *
639
 * Tries to find a free spu to run @ctx.  If no free spu is available
640 641 642
 * add the context to the runqueue so it gets woken up once an spu
 * is available.
 */
643
int spu_activate(struct spu_context *ctx, unsigned long flags)
644
{
645 646 647
	do {
		struct spu *spu;

648 649 650 651 652 653 654 655 656
		/*
		 * If there are multiple threads waiting for a single context
		 * only one actually binds the context while the others will
		 * only be able to acquire the state_mutex once the context
		 * already is in runnable state.
		 */
		if (ctx->spu)
			return 0;

657
		spu = spu_get_idle(ctx);
658 659 660 661
		/*
		 * If this is a realtime thread we try to get it running by
		 * preempting a lower priority thread.
		 */
662
		if (!spu && rt_prio(ctx->prio))
663
			spu = find_victim(ctx);
664
		if (spu) {
665
			spu_bind_context(spu, ctx);
666
			spu_add_to_active_list(spu);
667
			return 0;
668
		}
669

670
		spu_prio_wait(ctx);
671 672 673
	} while (!signal_pending(current));

	return -ERESTARTSYS;
674 675
}

676 677 678 679 680 681
/**
 * grab_runnable_context - try to find a runnable context
 *
 * Remove the highest priority context on the runqueue and return it
 * to the caller.  Returns %NULL if no runnable context was found.
 */
682
static struct spu_context *grab_runnable_context(int prio, int node)
683
{
684
	struct spu_context *ctx;
685 686 687
	int best;

	spin_lock(&spu_prio->runq_lock);
688
	best = find_first_bit(spu_prio->bitmap, prio);
689
	while (best < prio) {
690 691
		struct list_head *rq = &spu_prio->runq[best];

692 693 694 695 696 697 698 699
		list_for_each_entry(ctx, rq, rq) {
			/* XXX(hch): check for affinity here aswell */
			if (__node_allowed(ctx, node)) {
				__spu_del_from_rq(ctx);
				goto found;
			}
		}
		best++;
700
	}
701 702
	ctx = NULL;
 found:
703 704 705 706 707 708 709 710 711 712
	spin_unlock(&spu_prio->runq_lock);
	return ctx;
}

static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio)
{
	struct spu *spu = ctx->spu;
	struct spu_context *new = NULL;

	if (spu) {
713
		new = grab_runnable_context(max_prio, spu->node);
714
		if (new || force) {
715
			spu_remove_from_active_list(spu);
716
			spu_unbind_context(spu, ctx);
717
			ctx->stats.vol_ctx_switch++;
718
			spu->stats.vol_ctx_switch++;
719 720 721 722 723 724 725 726 727 728
			spu_free(spu);
			if (new)
				wake_up(&new->stop_wq);
		}

	}

	return new != NULL;
}

729 730 731 732 733 734 735
/**
 * spu_deactivate - unbind a context from it's physical spu
 * @ctx:	spu context to unbind
 *
 * Unbind @ctx from the physical spu it is running on and schedule
 * the highest priority context to run on the freed physical spu.
 */
736 737
void spu_deactivate(struct spu_context *ctx)
{
738
	__spu_deactivate(ctx, 1, MAX_PRIO);
739 740
}

741
/**
742
 * spu_yield -	yield a physical spu if others are waiting
743 744 745 746 747 748
 * @ctx:	spu context to yield
 *
 * Check if there is a higher priority context waiting and if yes
 * unbind @ctx from the physical spu and schedule the highest
 * priority context to run on the freed physical spu instead.
 */
749 750
void spu_yield(struct spu_context *ctx)
{
751 752
	if (!(ctx->flags & SPU_CREATE_NOSCHED)) {
		mutex_lock(&ctx->state_mutex);
753
		__spu_deactivate(ctx, 0, MAX_PRIO);
754 755
		mutex_unlock(&ctx->state_mutex);
	}
756
}
757

758
static void spusched_tick(struct spu_context *ctx)
759
{
760 761 762 763 764 765
	if (ctx->flags & SPU_CREATE_NOSCHED)
		return;
	if (ctx->policy == SCHED_FIFO)
		return;

	if (--ctx->time_slice)
766
		return;
767 768

	/*
769 770 771
	 * Unfortunately active_mutex ranks outside of state_mutex, so
	 * we have to trylock here.  If we fail give the context another
	 * tick and try again.
772
	 */
773
	if (mutex_trylock(&ctx->state_mutex)) {
774
		struct spu *spu = ctx->spu;
775 776 777
		struct spu_context *new;

		new = grab_runnable_context(ctx->prio + 1, spu->node);
778
		if (new) {
779

780 781
			__spu_remove_from_active_list(spu);
			spu_unbind_context(spu, ctx);
782
			ctx->stats.invol_ctx_switch++;
783
			spu->stats.invol_ctx_switch++;
784 785 786 787 788 789 790 791 792
			spu_free(spu);
			wake_up(&new->stop_wq);
			/*
			 * We need to break out of the wait loop in
			 * spu_run manually to ensure this context
			 * gets put on the runqueue again ASAP.
			 */
			wake_up(&ctx->stop_wq);
		}
793
		spu_set_timeslice(ctx);
794
		mutex_unlock(&ctx->state_mutex);
795
	} else {
796
		ctx->time_slice++;
797 798 799
	}
}

800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844
/**
 * count_active_contexts - count nr of active tasks
 *
 * Return the number of tasks currently running or waiting to run.
 *
 * Note that we don't take runq_lock / active_mutex here.  Reading
 * a single 32bit value is atomic on powerpc, and we don't care
 * about memory ordering issues here.
 */
static unsigned long count_active_contexts(void)
{
	int nr_active = 0, node;

	for (node = 0; node < MAX_NUMNODES; node++)
		nr_active += spu_prio->nr_active[node];
	nr_active += spu_prio->nr_waiting;

	return nr_active;
}

/**
 * spu_calc_load - given tick count, update the avenrun load estimates.
 * @tick:	tick count
 *
 * No locking against reading these values from userspace, as for
 * the CPU loadavg code.
 */
static void spu_calc_load(unsigned long ticks)
{
	unsigned long active_tasks; /* fixed-point */
	static int count = LOAD_FREQ;

	count -= ticks;

	if (unlikely(count < 0)) {
		active_tasks = count_active_contexts() * FIXED_1;
		do {
			CALC_LOAD(spu_avenrun[0], EXP_1, active_tasks);
			CALC_LOAD(spu_avenrun[1], EXP_5, active_tasks);
			CALC_LOAD(spu_avenrun[2], EXP_15, active_tasks);
			count += LOAD_FREQ;
		} while (count < 0);
	}
}

845 846 847 848
static void spusched_wake(unsigned long data)
{
	mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);
	wake_up_process(spusched_task);
849
	spu_calc_load(SPUSCHED_TICK);
850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872
}

static int spusched_thread(void *unused)
{
	struct spu *spu, *next;
	int node;

	while (!kthread_should_stop()) {
		set_current_state(TASK_INTERRUPTIBLE);
		schedule();
		for (node = 0; node < MAX_NUMNODES; node++) {
			mutex_lock(&spu_prio->active_mutex[node]);
			list_for_each_entry_safe(spu, next,
						 &spu_prio->active_list[node],
						 list)
				spusched_tick(spu->ctx);
			mutex_unlock(&spu_prio->active_mutex[node]);
		}
	}

	return 0;
}

873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910
#define LOAD_INT(x) ((x) >> FSHIFT)
#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)

static int show_spu_loadavg(struct seq_file *s, void *private)
{
	int a, b, c;

	a = spu_avenrun[0] + (FIXED_1/200);
	b = spu_avenrun[1] + (FIXED_1/200);
	c = spu_avenrun[2] + (FIXED_1/200);

	/*
	 * Note that last_pid doesn't really make much sense for the
	 * SPU loadavg (it even seems very odd on the CPU side..),
	 * but we include it here to have a 100% compatible interface.
	 */
	seq_printf(s, "%d.%02d %d.%02d %d.%02d %ld/%d %d\n",
		LOAD_INT(a), LOAD_FRAC(a),
		LOAD_INT(b), LOAD_FRAC(b),
		LOAD_INT(c), LOAD_FRAC(c),
		count_active_contexts(),
		atomic_read(&nr_spu_contexts),
		current->nsproxy->pid_ns->last_pid);
	return 0;
}

static int spu_loadavg_open(struct inode *inode, struct file *file)
{
	return single_open(file, show_spu_loadavg, NULL);
}

static const struct file_operations spu_loadavg_fops = {
	.open		= spu_loadavg_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
	.release	= single_release,
};

911 912
int __init spu_sched_init(void)
{
913 914
	struct proc_dir_entry *entry;
	int err = -ENOMEM, i;
915

916
	spu_prio = kzalloc(sizeof(struct spu_prio_array), GFP_KERNEL);
917
	if (!spu_prio)
918
		goto out;
919

920
	for (i = 0; i < MAX_PRIO; i++) {
921
		INIT_LIST_HEAD(&spu_prio->runq[i]);
922
		__clear_bit(i, spu_prio->bitmap);
923
	}
924 925 926
	for (i = 0; i < MAX_NUMNODES; i++) {
		mutex_init(&spu_prio->active_mutex[i]);
		INIT_LIST_HEAD(&spu_prio->active_list[i]);
927
	}
928
	spin_lock_init(&spu_prio->runq_lock);
929

930 931
	setup_timer(&spusched_timer, spusched_wake, 0);

932 933
	spusched_task = kthread_run(spusched_thread, NULL, "spusched");
	if (IS_ERR(spusched_task)) {
934 935
		err = PTR_ERR(spusched_task);
		goto out_free_spu_prio;
936
	}
937

938 939 940 941 942
	entry = create_proc_entry("spu_loadavg", 0, NULL);
	if (!entry)
		goto out_stop_kthread;
	entry->proc_fops = &spu_loadavg_fops;

943 944
	pr_debug("spusched: tick: %d, min ticks: %d, default ticks: %d\n",
			SPUSCHED_TICK, MIN_SPU_TIMESLICE, DEF_SPU_TIMESLICE);
945
	return 0;
946

947 948 949 950 951 952
 out_stop_kthread:
	kthread_stop(spusched_task);
 out_free_spu_prio:
	kfree(spu_prio);
 out:
	return err;
953 954
}

955
void spu_sched_exit(void)
956
{
957 958 959
	struct spu *spu, *tmp;
	int node;

960 961
	remove_proc_entry("spu_loadavg", NULL);

962
	del_timer_sync(&spusched_timer);
963 964
	kthread_stop(spusched_task);

965 966 967 968 969 970 971 972
	for (node = 0; node < MAX_NUMNODES; node++) {
		mutex_lock(&spu_prio->active_mutex[node]);
		list_for_each_entry_safe(spu, tmp, &spu_prio->active_list[node],
					 list) {
			list_del_init(&spu->list);
			spu_free(spu);
		}
		mutex_unlock(&spu_prio->active_mutex[node]);
973
	}
974
	kfree(spu_prio);
975
}