sched.c 24.9 KB
Newer Older
1 2 3 4 5
/* sched.c - SPU scheduler.
 *
 * Copyright (C) IBM 2005
 * Author: Mark Nutter <mnutter@us.ibm.com>
 *
6
 * 2006-03-31	NUMA domains added.
7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2, or (at your option)
 * any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 */

23 24
#undef DEBUG

25 26 27 28 29 30 31 32 33 34
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/completion.h>
#include <linux/vmalloc.h>
#include <linux/smp.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
35 36
#include <linux/numa.h>
#include <linux/mutex.h>
37
#include <linux/notifier.h>
38
#include <linux/kthread.h>
39 40 41
#include <linux/pid_namespace.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
42 43 44 45 46

#include <asm/io.h>
#include <asm/mmu_context.h>
#include <asm/spu.h>
#include <asm/spu_csa.h>
47
#include <asm/spu_priv1.h>
48 49 50
#include "spufs.h"

struct spu_prio_array {
51
	DECLARE_BITMAP(bitmap, MAX_PRIO);
52 53
	struct list_head runq[MAX_PRIO];
	spinlock_t runq_lock;
54
	int nr_waiting;
55 56
};

57
static unsigned long spu_avenrun[3];
58
static struct spu_prio_array *spu_prio;
59 60
static struct task_struct *spusched_task;
static struct timer_list spusched_timer;
61

62 63 64 65 66 67 68 69 70 71 72 73 74 75
/*
 * Priority of a normal, non-rt, non-niced'd process (aka nice level 0).
 */
#define NORMAL_PRIO		120

/*
 * Frequency of the spu scheduler tick.  By default we do one SPU scheduler
 * tick for every 10 CPU scheduler ticks.
 */
#define SPUSCHED_TICK		(10)

/*
 * These are the 'tuning knobs' of the scheduler:
 *
76 77
 * Minimum timeslice is 5 msecs (or 1 spu scheduler tick, whichever is
 * larger), default timeslice is 100 msecs, maximum timeslice is 800 msecs.
78
 */
79 80
#define MIN_SPU_TIMESLICE	max(5 * HZ / (1000 * SPUSCHED_TICK), 1)
#define DEF_SPU_TIMESLICE	(100 * HZ / (1000 * SPUSCHED_TICK))
81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101

#define MAX_USER_PRIO		(MAX_PRIO - MAX_RT_PRIO)
#define SCALE_PRIO(x, prio) \
	max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_SPU_TIMESLICE)

/*
 * scale user-nice values [ -20 ... 0 ... 19 ] to time slice values:
 * [800ms ... 100ms ... 5ms]
 *
 * The higher a thread's priority, the bigger timeslices
 * it gets during one round of execution. But even the lowest
 * priority thread gets MIN_TIMESLICE worth of execution time.
 */
void spu_set_timeslice(struct spu_context *ctx)
{
	if (ctx->prio < NORMAL_PRIO)
		ctx->time_slice = SCALE_PRIO(DEF_SPU_TIMESLICE * 4, ctx->prio);
	else
		ctx->time_slice = SCALE_PRIO(DEF_SPU_TIMESLICE, ctx->prio);
}

102 103 104 105 106
/*
 * Update scheduling information from the owning thread.
 */
void __spu_update_sched_info(struct spu_context *ctx)
{
107 108 109 110 111 112 113
	/*
	 * 32-Bit assignment are atomic on powerpc, and we don't care about
	 * memory ordering here because retriving the controlling thread is
	 * per defintion racy.
	 */
	ctx->tid = current->pid;

114 115 116 117 118 119 120 121 122 123 124
	/*
	 * We do our own priority calculations, so we normally want
	 * ->static_prio to start with. Unfortunately thies field
	 * contains junk for threads with a realtime scheduling
	 * policy so we have to look at ->prio in this case.
	 */
	if (rt_prio(current->prio))
		ctx->prio = current->prio;
	else
		ctx->prio = current->static_prio;
	ctx->policy = current->policy;
125 126

	/*
127
	 * A lot of places that don't hold list_mutex poke into
128 129 130 131 132 133 134
	 * cpus_allowed, including grab_runnable_context which
	 * already holds the runq_lock.  So abuse runq_lock
	 * to protect this field aswell.
	 */
	spin_lock(&spu_prio->runq_lock);
	ctx->cpus_allowed = current->cpus_allowed;
	spin_unlock(&spu_prio->runq_lock);
135 136 137 138 139 140
}

void spu_update_sched_info(struct spu_context *ctx)
{
	int node = ctx->spu->node;

141
	mutex_lock(&cbe_spu_info[node].list_mutex);
142
	__spu_update_sched_info(ctx);
143
	mutex_unlock(&cbe_spu_info[node].list_mutex);
144 145
}

146
static int __node_allowed(struct spu_context *ctx, int node)
147
{
148 149
	if (nr_cpus_node(node)) {
		cpumask_t mask = node_to_cpumask(node);
150

151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166
		if (cpus_intersects(mask, ctx->cpus_allowed))
			return 1;
	}

	return 0;
}

static int node_allowed(struct spu_context *ctx, int node)
{
	int rval;

	spin_lock(&spu_prio->runq_lock);
	rval = __node_allowed(ctx, node);
	spin_unlock(&spu_prio->runq_lock);

	return rval;
167 168
}

169 170
static BLOCKING_NOTIFIER_HEAD(spu_switch_notifier);

171
void spu_switch_notify(struct spu *spu, struct spu_context *ctx)
172 173 174 175 176
{
	blocking_notifier_call_chain(&spu_switch_notifier,
			    ctx ? ctx->object_id : 0, spu);
}

177 178 179 180 181 182 183 184 185 186 187 188
static void notify_spus_active(void)
{
	int node;

	/*
	 * Wake up the active spu_contexts.
	 *
	 * When the awakened processes see their "notify_active" flag is set,
	 * they will call spu_switch_notify();
	 */
	for_each_online_node(node) {
		struct spu *spu;
189 190 191 192 193 194 195 196 197 198

		mutex_lock(&cbe_spu_info[node].list_mutex);
		list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
			if (spu->alloc_state != SPU_FREE) {
				struct spu_context *ctx = spu->ctx;
				set_bit(SPU_SCHED_NOTIFY_ACTIVE,
					&ctx->sched_flags);
				mb();
				wake_up_all(&ctx->stop_wq);
			}
199
		}
200
		mutex_unlock(&cbe_spu_info[node].list_mutex);
201 202 203
	}
}

204 205
int spu_switch_event_register(struct notifier_block * n)
{
206 207 208 209 210
	int ret;
	ret = blocking_notifier_chain_register(&spu_switch_notifier, n);
	if (!ret)
		notify_spus_active();
	return ret;
211
}
212
EXPORT_SYMBOL_GPL(spu_switch_event_register);
213 214 215 216 217

int spu_switch_event_unregister(struct notifier_block * n)
{
	return blocking_notifier_chain_unregister(&spu_switch_notifier, n);
}
218
EXPORT_SYMBOL_GPL(spu_switch_event_unregister);
219

220 221 222 223 224 225
/**
 * spu_bind_context - bind spu context to physical spu
 * @spu:	physical spu to bind to
 * @ctx:	context to bind
 */
static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
226
{
227 228
	pr_debug("%s: pid=%d SPU=%d NODE=%d\n", __FUNCTION__, current->pid,
		 spu->number, spu->node);
229
	spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
230

231 232 233
	if (ctx->flags & SPU_CREATE_NOSCHED)
		atomic_inc(&cbe_spu_info[spu->node].reserved_spus);

234 235 236
	ctx->stats.slb_flt_base = spu->stats.slb_flt;
	ctx->stats.class2_intr_base = spu->stats.class2_intr;

237 238 239 240 241
	spu->ctx = ctx;
	spu->flags = 0;
	ctx->spu = spu;
	ctx->ops = &spu_hw_ops;
	spu->pid = current->pid;
242
	spu->tgid = current->tgid;
243
	spu_associate_mm(spu, ctx->owner);
244 245
	spu->ibox_callback = spufs_ibox_callback;
	spu->wbox_callback = spufs_wbox_callback;
246
	spu->stop_callback = spufs_stop_callback;
247
	spu->mfc_callback = spufs_mfc_callback;
248
	spu->dma_callback = spufs_dma_callback;
249
	mb();
250
	spu_unmap_mappings(ctx);
251
	spu_restore(&ctx->csa, spu);
252
	spu->timestamp = jiffies;
253
	spu_cpu_affinity_set(spu, raw_smp_processor_id());
254
	spu_switch_notify(spu, ctx);
255
	ctx->state = SPU_STATE_RUNNABLE;
256 257

	spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED);
258 259
}

260
/*
261
 * Must be used with the list_mutex held.
262 263 264
 */
static inline int sched_spu(struct spu *spu)
{
265 266
	BUG_ON(!mutex_is_locked(&cbe_spu_info[spu->node].list_mutex));

267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318
	return (!spu->ctx || !(spu->ctx->flags & SPU_CREATE_NOSCHED));
}

static void aff_merge_remaining_ctxs(struct spu_gang *gang)
{
	struct spu_context *ctx;

	list_for_each_entry(ctx, &gang->aff_list_head, aff_list) {
		if (list_empty(&ctx->aff_list))
			list_add(&ctx->aff_list, &gang->aff_list_head);
	}
	gang->aff_flags |= AFF_MERGED;
}

static void aff_set_offsets(struct spu_gang *gang)
{
	struct spu_context *ctx;
	int offset;

	offset = -1;
	list_for_each_entry_reverse(ctx, &gang->aff_ref_ctx->aff_list,
								aff_list) {
		if (&ctx->aff_list == &gang->aff_list_head)
			break;
		ctx->aff_offset = offset--;
	}

	offset = 0;
	list_for_each_entry(ctx, gang->aff_ref_ctx->aff_list.prev, aff_list) {
		if (&ctx->aff_list == &gang->aff_list_head)
			break;
		ctx->aff_offset = offset++;
	}

	gang->aff_flags |= AFF_OFFSETS_SET;
}

static struct spu *aff_ref_location(struct spu_context *ctx, int mem_aff,
		 int group_size, int lowest_offset)
{
	struct spu *spu;
	int node, n;

	/*
	 * TODO: A better algorithm could be used to find a good spu to be
	 *       used as reference location for the ctxs chain.
	 */
	node = cpu_to_node(raw_smp_processor_id());
	for (n = 0; n < MAX_NUMNODES; n++, node++) {
		node = (node < MAX_NUMNODES) ? node : 0;
		if (!node_allowed(ctx, node))
			continue;
319
		mutex_lock(&cbe_spu_info[node].list_mutex);
320 321
		list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
			if ((!mem_aff || spu->has_mem_affinity) &&
322 323
							sched_spu(spu)) {
				mutex_unlock(&cbe_spu_info[node].list_mutex);
324
				return spu;
325
			}
326
		}
327
		mutex_unlock(&cbe_spu_info[node].list_mutex);
328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351
	}
	return NULL;
}

static void aff_set_ref_point_location(struct spu_gang *gang)
{
	int mem_aff, gs, lowest_offset;
	struct spu_context *ctx;
	struct spu *tmp;

	mem_aff = gang->aff_ref_ctx->flags & SPU_CREATE_AFFINITY_MEM;
	lowest_offset = 0;
	gs = 0;

	list_for_each_entry(tmp, &gang->aff_list_head, aff_list)
		gs++;

	list_for_each_entry_reverse(ctx, &gang->aff_ref_ctx->aff_list,
								aff_list) {
		if (&ctx->aff_list == &gang->aff_list_head)
			break;
		lowest_offset = ctx->aff_offset;
	}

352 353
	gang->aff_ref_spu = aff_ref_location(gang->aff_ref_ctx, mem_aff, gs,
							lowest_offset);
354 355
}

356
static struct spu *ctx_location(struct spu *ref, int offset, int node)
357 358 359 360 361 362
{
	struct spu *spu;

	spu = NULL;
	if (offset >= 0) {
		list_for_each_entry(spu, ref->aff_list.prev, aff_list) {
363
			BUG_ON(spu->node != node);
364 365 366 367 368 369 370
			if (offset == 0)
				break;
			if (sched_spu(spu))
				offset--;
		}
	} else {
		list_for_each_entry_reverse(spu, ref->aff_list.next, aff_list) {
371
			BUG_ON(spu->node != node);
372 373 374 375 376 377
			if (offset == 0)
				break;
			if (sched_spu(spu))
				offset++;
		}
	}
378

379 380 381 382 383 384 385
	return spu;
}

/*
 * affinity_check is called each time a context is going to be scheduled.
 * It returns the spu ptr on which the context must run.
 */
386
static int has_affinity(struct spu_context *ctx)
387
{
388
	struct spu_gang *gang = ctx->gang;
389 390

	if (list_empty(&ctx->aff_list))
391 392
		return 0;

393 394 395 396 397 398 399
	if (!gang->aff_ref_spu) {
		if (!(gang->aff_flags & AFF_MERGED))
			aff_merge_remaining_ctxs(gang);
		if (!(gang->aff_flags & AFF_OFFSETS_SET))
			aff_set_offsets(gang);
		aff_set_ref_point_location(gang);
	}
400 401

	return gang->aff_ref_spu != NULL;
402 403
}

404 405 406 407 408
/**
 * spu_unbind_context - unbind spu context from physical spu
 * @spu:	physical spu to unbind from
 * @ctx:	context to unbind
 */
409
static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
410
{
411 412
	pr_debug("%s: unbind pid=%d SPU=%d NODE=%d\n", __FUNCTION__,
		 spu->pid, spu->number, spu->node);
413
	spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
414

415 416
 	if (spu->ctx->flags & SPU_CREATE_NOSCHED)
		atomic_dec(&cbe_spu_info[spu->node].reserved_spus);
417 418 419 420 421 422 423 424 425 426

	if (ctx->gang){
		mutex_lock(&ctx->gang->aff_mutex);
		if (has_affinity(ctx)) {
			if (atomic_dec_and_test(&ctx->gang->aff_sched_count))
				ctx->gang->aff_ref_spu = NULL;
		}
		mutex_unlock(&ctx->gang->aff_mutex);
	}

427
	spu_switch_notify(spu, NULL);
428
	spu_unmap_mappings(ctx);
429
	spu_save(&ctx->csa, spu);
430
	spu->timestamp = jiffies;
431 432 433
	ctx->state = SPU_STATE_SAVED;
	spu->ibox_callback = NULL;
	spu->wbox_callback = NULL;
434
	spu->stop_callback = NULL;
435
	spu->mfc_callback = NULL;
436
	spu->dma_callback = NULL;
437
	spu_associate_mm(spu, NULL);
438
	spu->pid = 0;
439
	spu->tgid = 0;
440
	ctx->ops = &spu_backing_ops;
441
	spu->flags = 0;
442
	spu->ctx = NULL;
443 444 445 446 447

	ctx->stats.slb_flt +=
		(spu->stats.slb_flt - ctx->stats.slb_flt_base);
	ctx->stats.class2_intr +=
		(spu->stats.class2_intr - ctx->stats.class2_intr_base);
448 449 450 451

	/* This maps the underlying spu state to idle */
	spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED);
	ctx->spu = NULL;
452 453
}

454 455 456 457
/**
 * spu_add_to_rq - add a context to the runqueue
 * @ctx:       context to add
 */
458
static void __spu_add_to_rq(struct spu_context *ctx)
459
{
460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478
	/*
	 * Unfortunately this code path can be called from multiple threads
	 * on behalf of a single context due to the way the problem state
	 * mmap support works.
	 *
	 * Fortunately we need to wake up all these threads at the same time
	 * and can simply skip the runqueue addition for every but the first
	 * thread getting into this codepath.
	 *
	 * It's still quite hacky, and long-term we should proxy all other
	 * threads through the owner thread so that spu_run is in control
	 * of all the scheduling activity for a given context.
	 */
	if (list_empty(&ctx->rq)) {
		list_add_tail(&ctx->rq, &spu_prio->runq[ctx->prio]);
		set_bit(ctx->prio, spu_prio->bitmap);
		if (!spu_prio->nr_waiting++)
			__mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);
	}
479
}
480

481
static void __spu_del_from_rq(struct spu_context *ctx)
482
{
483 484
	int prio = ctx->prio;

485
	if (!list_empty(&ctx->rq)) {
486 487
		if (!--spu_prio->nr_waiting)
			del_timer(&spusched_timer);
488
		list_del_init(&ctx->rq);
489 490 491

		if (list_empty(&spu_prio->runq[prio]))
			clear_bit(prio, spu_prio->bitmap);
492
	}
493
}
494

495
static void spu_prio_wait(struct spu_context *ctx)
496
{
497
	DEFINE_WAIT(wait);
498

499
	spin_lock(&spu_prio->runq_lock);
500
	prepare_to_wait_exclusive(&ctx->stop_wq, &wait, TASK_INTERRUPTIBLE);
501
	if (!signal_pending(current)) {
502 503
		__spu_add_to_rq(ctx);
		spin_unlock(&spu_prio->runq_lock);
504
		mutex_unlock(&ctx->state_mutex);
505
		schedule();
506
		mutex_lock(&ctx->state_mutex);
507 508
		spin_lock(&spu_prio->runq_lock);
		__spu_del_from_rq(ctx);
509
	}
510
	spin_unlock(&spu_prio->runq_lock);
511 512
	__set_current_state(TASK_RUNNING);
	remove_wait_queue(&ctx->stop_wq, &wait);
513 514
}

515
static struct spu *spu_get_idle(struct spu_context *ctx)
516
{
517
	struct spu *spu, *aff_ref_spu;
518 519
	int node, n;

520 521 522 523 524 525 526 527 528 529 530 531 532
	if (ctx->gang) {
		mutex_lock(&ctx->gang->aff_mutex);
		if (has_affinity(ctx)) {
			aff_ref_spu = ctx->gang->aff_ref_spu;
			atomic_inc(&ctx->gang->aff_sched_count);
			mutex_unlock(&ctx->gang->aff_mutex);
			node = aff_ref_spu->node;

			mutex_lock(&cbe_spu_info[node].list_mutex);
			spu = ctx_location(aff_ref_spu, ctx->aff_offset, node);
			if (spu && spu->alloc_state == SPU_FREE)
				goto found;
			mutex_unlock(&cbe_spu_info[node].list_mutex);
533

534 535 536 537
			mutex_lock(&ctx->gang->aff_mutex);
			if (atomic_dec_and_test(&ctx->gang->aff_sched_count))
				ctx->gang->aff_ref_spu = NULL;
			mutex_unlock(&ctx->gang->aff_mutex);
538

539 540 541 542
			return NULL;
		}
		mutex_unlock(&ctx->gang->aff_mutex);
	}
543
	node = cpu_to_node(raw_smp_processor_id());
544 545
	for (n = 0; n < MAX_NUMNODES; n++, node++) {
		node = (node < MAX_NUMNODES) ? node : 0;
546
		if (!node_allowed(ctx, node))
547
			continue;
548 549 550 551 552 553 554

		mutex_lock(&cbe_spu_info[node].list_mutex);
		list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
			if (spu->alloc_state == SPU_FREE)
				goto found;
		}
		mutex_unlock(&cbe_spu_info[node].list_mutex);
555
	}
556 557 558 559 560 561 562 563

	return NULL;

 found:
	spu->alloc_state = SPU_USED;
	mutex_unlock(&cbe_spu_info[node].list_mutex);
	pr_debug("Got SPU %d %d\n", spu->number, spu->node);
	spu_init_channels(spu);
564 565
	return spu;
}
566

567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589
/**
 * find_victim - find a lower priority context to preempt
 * @ctx:	canidate context for running
 *
 * Returns the freed physical spu to run the new context on.
 */
static struct spu *find_victim(struct spu_context *ctx)
{
	struct spu_context *victim = NULL;
	struct spu *spu;
	int node, n;

	/*
	 * Look for a possible preemption candidate on the local node first.
	 * If there is no candidate look at the other nodes.  This isn't
	 * exactly fair, but so far the whole spu schedule tries to keep
	 * a strong node affinity.  We might want to fine-tune this in
	 * the future.
	 */
 restart:
	node = cpu_to_node(raw_smp_processor_id());
	for (n = 0; n < MAX_NUMNODES; n++, node++) {
		node = (node < MAX_NUMNODES) ? node : 0;
590
		if (!node_allowed(ctx, node))
591 592
			continue;

593 594
		mutex_lock(&cbe_spu_info[node].list_mutex);
		list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
595 596
			struct spu_context *tmp = spu->ctx;

597
			if (tmp && tmp->prio > ctx->prio &&
598
			    (!victim || tmp->prio > victim->prio))
599 600
				victim = spu->ctx;
		}
601
		mutex_unlock(&cbe_spu_info[node].list_mutex);
602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625

		if (victim) {
			/*
			 * This nests ctx->state_mutex, but we always lock
			 * higher priority contexts before lower priority
			 * ones, so this is safe until we introduce
			 * priority inheritance schemes.
			 */
			if (!mutex_trylock(&victim->state_mutex)) {
				victim = NULL;
				goto restart;
			}

			spu = victim->spu;
			if (!spu) {
				/*
				 * This race can happen because we've dropped
				 * the active list mutex.  No a problem, just
				 * restart the search.
				 */
				mutex_unlock(&victim->state_mutex);
				victim = NULL;
				goto restart;
			}
626 627 628

			mutex_lock(&cbe_spu_info[node].list_mutex);
			cbe_spu_info[node].nr_active--;
629
			spu_unbind_context(spu, victim);
630 631
			mutex_unlock(&cbe_spu_info[node].list_mutex);

632
			victim->stats.invol_ctx_switch++;
633
			spu->stats.invol_ctx_switch++;
634
			mutex_unlock(&victim->state_mutex);
635 636 637 638 639 640
			/*
			 * We need to break out of the wait loop in spu_run
			 * manually to ensure this context gets put on the
			 * runqueue again ASAP.
			 */
			wake_up(&victim->stop_wq);
641 642 643 644 645 646 647
			return spu;
		}
	}

	return NULL;
}

648 649 650 651 652
/**
 * spu_activate - find a free spu for a context and execute it
 * @ctx:	spu context to schedule
 * @flags:	flags (currently ignored)
 *
653
 * Tries to find a free spu to run @ctx.  If no free spu is available
654 655 656
 * add the context to the runqueue so it gets woken up once an spu
 * is available.
 */
657
int spu_activate(struct spu_context *ctx, unsigned long flags)
658
{
659 660 661
	do {
		struct spu *spu;

662 663 664 665 666 667 668 669 670
		/*
		 * If there are multiple threads waiting for a single context
		 * only one actually binds the context while the others will
		 * only be able to acquire the state_mutex once the context
		 * already is in runnable state.
		 */
		if (ctx->spu)
			return 0;

671
		spu = spu_get_idle(ctx);
672 673 674 675
		/*
		 * If this is a realtime thread we try to get it running by
		 * preempting a lower priority thread.
		 */
676
		if (!spu && rt_prio(ctx->prio))
677
			spu = find_victim(ctx);
678
		if (spu) {
679 680 681
			int node = spu->node;

			mutex_lock(&cbe_spu_info[node].list_mutex);
682
			spu_bind_context(spu, ctx);
683 684
			cbe_spu_info[node].nr_active++;
			mutex_unlock(&cbe_spu_info[node].list_mutex);
685
			return 0;
686
		}
687

688
		spu_prio_wait(ctx);
689 690 691
	} while (!signal_pending(current));

	return -ERESTARTSYS;
692 693
}

694 695 696 697 698 699
/**
 * grab_runnable_context - try to find a runnable context
 *
 * Remove the highest priority context on the runqueue and return it
 * to the caller.  Returns %NULL if no runnable context was found.
 */
700
static struct spu_context *grab_runnable_context(int prio, int node)
701
{
702
	struct spu_context *ctx;
703 704 705
	int best;

	spin_lock(&spu_prio->runq_lock);
706
	best = find_first_bit(spu_prio->bitmap, prio);
707
	while (best < prio) {
708 709
		struct list_head *rq = &spu_prio->runq[best];

710 711 712 713 714 715 716 717
		list_for_each_entry(ctx, rq, rq) {
			/* XXX(hch): check for affinity here aswell */
			if (__node_allowed(ctx, node)) {
				__spu_del_from_rq(ctx);
				goto found;
			}
		}
		best++;
718
	}
719 720
	ctx = NULL;
 found:
721 722 723 724 725 726 727 728 729 730
	spin_unlock(&spu_prio->runq_lock);
	return ctx;
}

static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio)
{
	struct spu *spu = ctx->spu;
	struct spu_context *new = NULL;

	if (spu) {
731
		new = grab_runnable_context(max_prio, spu->node);
732
		if (new || force) {
733 734 735
			int node = spu->node;

			mutex_lock(&cbe_spu_info[node].list_mutex);
736
			spu_unbind_context(spu, ctx);
737 738 739 740
			spu->alloc_state = SPU_FREE;
			cbe_spu_info[node].nr_active--;
			mutex_unlock(&cbe_spu_info[node].list_mutex);

741
			ctx->stats.vol_ctx_switch++;
742
			spu->stats.vol_ctx_switch++;
743

744 745 746 747 748 749 750 751 752
			if (new)
				wake_up(&new->stop_wq);
		}

	}

	return new != NULL;
}

753 754 755 756 757 758 759
/**
 * spu_deactivate - unbind a context from it's physical spu
 * @ctx:	spu context to unbind
 *
 * Unbind @ctx from the physical spu it is running on and schedule
 * the highest priority context to run on the freed physical spu.
 */
760 761
void spu_deactivate(struct spu_context *ctx)
{
762
	__spu_deactivate(ctx, 1, MAX_PRIO);
763 764
}

765
/**
766
 * spu_yield -	yield a physical spu if others are waiting
767 768 769 770 771 772
 * @ctx:	spu context to yield
 *
 * Check if there is a higher priority context waiting and if yes
 * unbind @ctx from the physical spu and schedule the highest
 * priority context to run on the freed physical spu instead.
 */
773 774
void spu_yield(struct spu_context *ctx)
{
775 776
	if (!(ctx->flags & SPU_CREATE_NOSCHED)) {
		mutex_lock(&ctx->state_mutex);
777
		__spu_deactivate(ctx, 0, MAX_PRIO);
778 779
		mutex_unlock(&ctx->state_mutex);
	}
780
}
781

782
static noinline void spusched_tick(struct spu_context *ctx)
783
{
784 785 786 787 788 789
	if (ctx->flags & SPU_CREATE_NOSCHED)
		return;
	if (ctx->policy == SCHED_FIFO)
		return;

	if (--ctx->time_slice)
790
		return;
791 792

	/*
793
	 * Unfortunately list_mutex ranks outside of state_mutex, so
794 795
	 * we have to trylock here.  If we fail give the context another
	 * tick and try again.
796
	 */
797
	if (mutex_trylock(&ctx->state_mutex)) {
798
		struct spu *spu = ctx->spu;
799 800 801
		struct spu_context *new;

		new = grab_runnable_context(ctx->prio + 1, spu->node);
802 803
		if (new) {
			spu_unbind_context(spu, ctx);
804
			ctx->stats.invol_ctx_switch++;
805
			spu->stats.invol_ctx_switch++;
806 807
			spu->alloc_state = SPU_FREE;
			cbe_spu_info[spu->node].nr_active--;
808 809 810 811 812 813 814 815
			wake_up(&new->stop_wq);
			/*
			 * We need to break out of the wait loop in
			 * spu_run manually to ensure this context
			 * gets put on the runqueue again ASAP.
			 */
			wake_up(&ctx->stop_wq);
		}
816
		spu_set_timeslice(ctx);
817
		mutex_unlock(&ctx->state_mutex);
818
	} else {
819
		ctx->time_slice++;
820 821 822
	}
}

823 824 825 826 827
/**
 * count_active_contexts - count nr of active tasks
 *
 * Return the number of tasks currently running or waiting to run.
 *
828
 * Note that we don't take runq_lock / list_mutex here.  Reading
829 830 831 832 833 834 835 836
 * a single 32bit value is atomic on powerpc, and we don't care
 * about memory ordering issues here.
 */
static unsigned long count_active_contexts(void)
{
	int nr_active = 0, node;

	for (node = 0; node < MAX_NUMNODES; node++)
837
		nr_active += cbe_spu_info[node].nr_active;
838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867
	nr_active += spu_prio->nr_waiting;

	return nr_active;
}

/**
 * spu_calc_load - given tick count, update the avenrun load estimates.
 * @tick:	tick count
 *
 * No locking against reading these values from userspace, as for
 * the CPU loadavg code.
 */
static void spu_calc_load(unsigned long ticks)
{
	unsigned long active_tasks; /* fixed-point */
	static int count = LOAD_FREQ;

	count -= ticks;

	if (unlikely(count < 0)) {
		active_tasks = count_active_contexts() * FIXED_1;
		do {
			CALC_LOAD(spu_avenrun[0], EXP_1, active_tasks);
			CALC_LOAD(spu_avenrun[1], EXP_5, active_tasks);
			CALC_LOAD(spu_avenrun[2], EXP_15, active_tasks);
			count += LOAD_FREQ;
		} while (count < 0);
	}
}

868 869 870 871
static void spusched_wake(unsigned long data)
{
	mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);
	wake_up_process(spusched_task);
872
	spu_calc_load(SPUSCHED_TICK);
873 874 875 876
}

static int spusched_thread(void *unused)
{
877
	struct spu *spu;
878 879 880 881 882 883
	int node;

	while (!kthread_should_stop()) {
		set_current_state(TASK_INTERRUPTIBLE);
		schedule();
		for (node = 0; node < MAX_NUMNODES; node++) {
884 885 886 887 888
			mutex_lock(&cbe_spu_info[node].list_mutex);
			list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list)
				if (spu->ctx)
					spusched_tick(spu->ctx);
			mutex_unlock(&cbe_spu_info[node].list_mutex);
889 890 891 892 893 894
		}
	}

	return 0;
}

895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932
#define LOAD_INT(x) ((x) >> FSHIFT)
#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)

static int show_spu_loadavg(struct seq_file *s, void *private)
{
	int a, b, c;

	a = spu_avenrun[0] + (FIXED_1/200);
	b = spu_avenrun[1] + (FIXED_1/200);
	c = spu_avenrun[2] + (FIXED_1/200);

	/*
	 * Note that last_pid doesn't really make much sense for the
	 * SPU loadavg (it even seems very odd on the CPU side..),
	 * but we include it here to have a 100% compatible interface.
	 */
	seq_printf(s, "%d.%02d %d.%02d %d.%02d %ld/%d %d\n",
		LOAD_INT(a), LOAD_FRAC(a),
		LOAD_INT(b), LOAD_FRAC(b),
		LOAD_INT(c), LOAD_FRAC(c),
		count_active_contexts(),
		atomic_read(&nr_spu_contexts),
		current->nsproxy->pid_ns->last_pid);
	return 0;
}

static int spu_loadavg_open(struct inode *inode, struct file *file)
{
	return single_open(file, show_spu_loadavg, NULL);
}

static const struct file_operations spu_loadavg_fops = {
	.open		= spu_loadavg_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
	.release	= single_release,
};

933 934
int __init spu_sched_init(void)
{
935 936
	struct proc_dir_entry *entry;
	int err = -ENOMEM, i;
937

938
	spu_prio = kzalloc(sizeof(struct spu_prio_array), GFP_KERNEL);
939
	if (!spu_prio)
940
		goto out;
941

942
	for (i = 0; i < MAX_PRIO; i++) {
943
		INIT_LIST_HEAD(&spu_prio->runq[i]);
944
		__clear_bit(i, spu_prio->bitmap);
945
	}
946
	spin_lock_init(&spu_prio->runq_lock);
947

948 949
	setup_timer(&spusched_timer, spusched_wake, 0);

950 951
	spusched_task = kthread_run(spusched_thread, NULL, "spusched");
	if (IS_ERR(spusched_task)) {
952 953
		err = PTR_ERR(spusched_task);
		goto out_free_spu_prio;
954
	}
955

956 957 958 959 960
	entry = create_proc_entry("spu_loadavg", 0, NULL);
	if (!entry)
		goto out_stop_kthread;
	entry->proc_fops = &spu_loadavg_fops;

961 962
	pr_debug("spusched: tick: %d, min ticks: %d, default ticks: %d\n",
			SPUSCHED_TICK, MIN_SPU_TIMESLICE, DEF_SPU_TIMESLICE);
963
	return 0;
964

965 966 967 968 969 970
 out_stop_kthread:
	kthread_stop(spusched_task);
 out_free_spu_prio:
	kfree(spu_prio);
 out:
	return err;
971 972
}

973
void spu_sched_exit(void)
974
{
975
	struct spu *spu;
976 977
	int node;

978 979
	remove_proc_entry("spu_loadavg", NULL);

980
	del_timer_sync(&spusched_timer);
981 982
	kthread_stop(spusched_task);

983
	for (node = 0; node < MAX_NUMNODES; node++) {
984 985 986 987 988
		mutex_lock(&cbe_spu_info[node].list_mutex);
		list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list)
			if (spu->alloc_state != SPU_FREE)
				spu->alloc_state = SPU_FREE;
		mutex_unlock(&cbe_spu_info[node].list_mutex);
989
	}
990
	kfree(spu_prio);
991
}