sched.c 25.6 KB
Newer Older
1 2 3 4 5
/* sched.c - SPU scheduler.
 *
 * Copyright (C) IBM 2005
 * Author: Mark Nutter <mnutter@us.ibm.com>
 *
6
 * 2006-03-31	NUMA domains added.
7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2, or (at your option)
 * any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 */

23 24
#undef DEBUG

25 26 27 28 29 30 31 32 33 34
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/completion.h>
#include <linux/vmalloc.h>
#include <linux/smp.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
35 36
#include <linux/numa.h>
#include <linux/mutex.h>
37
#include <linux/notifier.h>
38
#include <linux/kthread.h>
39 40 41
#include <linux/pid_namespace.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
42 43 44 45 46

#include <asm/io.h>
#include <asm/mmu_context.h>
#include <asm/spu.h>
#include <asm/spu_csa.h>
47
#include <asm/spu_priv1.h>
48 49 50
#include "spufs.h"

struct spu_prio_array {
51
	DECLARE_BITMAP(bitmap, MAX_PRIO);
52 53
	struct list_head runq[MAX_PRIO];
	spinlock_t runq_lock;
54
	int nr_waiting;
55 56
};

57
static unsigned long spu_avenrun[3];
58
static struct spu_prio_array *spu_prio;
59 60
static struct task_struct *spusched_task;
static struct timer_list spusched_timer;
61

62 63 64 65 66 67 68 69 70 71 72 73 74 75
/*
 * Priority of a normal, non-rt, non-niced'd process (aka nice level 0).
 */
#define NORMAL_PRIO		120

/*
 * Frequency of the spu scheduler tick.  By default we do one SPU scheduler
 * tick for every 10 CPU scheduler ticks.
 */
#define SPUSCHED_TICK		(10)

/*
 * These are the 'tuning knobs' of the scheduler:
 *
76 77
 * Minimum timeslice is 5 msecs (or 1 spu scheduler tick, whichever is
 * larger), default timeslice is 100 msecs, maximum timeslice is 800 msecs.
78
 */
79 80
#define MIN_SPU_TIMESLICE	max(5 * HZ / (1000 * SPUSCHED_TICK), 1)
#define DEF_SPU_TIMESLICE	(100 * HZ / (1000 * SPUSCHED_TICK))
81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101

#define MAX_USER_PRIO		(MAX_PRIO - MAX_RT_PRIO)
#define SCALE_PRIO(x, prio) \
	max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_SPU_TIMESLICE)

/*
 * scale user-nice values [ -20 ... 0 ... 19 ] to time slice values:
 * [800ms ... 100ms ... 5ms]
 *
 * The higher a thread's priority, the bigger timeslices
 * it gets during one round of execution. But even the lowest
 * priority thread gets MIN_TIMESLICE worth of execution time.
 */
void spu_set_timeslice(struct spu_context *ctx)
{
	if (ctx->prio < NORMAL_PRIO)
		ctx->time_slice = SCALE_PRIO(DEF_SPU_TIMESLICE * 4, ctx->prio);
	else
		ctx->time_slice = SCALE_PRIO(DEF_SPU_TIMESLICE, ctx->prio);
}

102 103 104 105 106
/*
 * Update scheduling information from the owning thread.
 */
void __spu_update_sched_info(struct spu_context *ctx)
{
107
	/*
108 109 110
	 * 32-Bit assignments are atomic on powerpc, and we don't care about
	 * memory ordering here because retrieving the controlling thread is
	 * per definition racy.
111 112 113
	 */
	ctx->tid = current->pid;

114 115
	/*
	 * We do our own priority calculations, so we normally want
116
	 * ->static_prio to start with. Unfortunately this field
117 118 119 120 121 122 123 124
	 * contains junk for threads with a realtime scheduling
	 * policy so we have to look at ->prio in this case.
	 */
	if (rt_prio(current->prio))
		ctx->prio = current->prio;
	else
		ctx->prio = current->static_prio;
	ctx->policy = current->policy;
125 126

	/*
127
	 * A lot of places that don't hold list_mutex poke into
128 129
	 * cpus_allowed, including grab_runnable_context which
	 * already holds the runq_lock.  So abuse runq_lock
130
	 * to protect this field as well.
131 132 133 134
	 */
	spin_lock(&spu_prio->runq_lock);
	ctx->cpus_allowed = current->cpus_allowed;
	spin_unlock(&spu_prio->runq_lock);
135 136 137 138 139 140
}

void spu_update_sched_info(struct spu_context *ctx)
{
	int node = ctx->spu->node;

141
	mutex_lock(&cbe_spu_info[node].list_mutex);
142
	__spu_update_sched_info(ctx);
143
	mutex_unlock(&cbe_spu_info[node].list_mutex);
144 145
}

146
static int __node_allowed(struct spu_context *ctx, int node)
147
{
148 149
	if (nr_cpus_node(node)) {
		cpumask_t mask = node_to_cpumask(node);
150

151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166
		if (cpus_intersects(mask, ctx->cpus_allowed))
			return 1;
	}

	return 0;
}

static int node_allowed(struct spu_context *ctx, int node)
{
	int rval;

	spin_lock(&spu_prio->runq_lock);
	rval = __node_allowed(ctx, node);
	spin_unlock(&spu_prio->runq_lock);

	return rval;
167 168
}

169 170
static BLOCKING_NOTIFIER_HEAD(spu_switch_notifier);

171
void spu_switch_notify(struct spu *spu, struct spu_context *ctx)
172 173 174 175 176
{
	blocking_notifier_call_chain(&spu_switch_notifier,
			    ctx ? ctx->object_id : 0, spu);
}

177 178 179 180 181 182 183 184
static void notify_spus_active(void)
{
	int node;

	/*
	 * Wake up the active spu_contexts.
	 *
	 * When the awakened processes see their "notify_active" flag is set,
185
	 * they will call spu_switch_notify().
186 187 188
	 */
	for_each_online_node(node) {
		struct spu *spu;
189 190 191 192 193 194 195 196 197 198

		mutex_lock(&cbe_spu_info[node].list_mutex);
		list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
			if (spu->alloc_state != SPU_FREE) {
				struct spu_context *ctx = spu->ctx;
				set_bit(SPU_SCHED_NOTIFY_ACTIVE,
					&ctx->sched_flags);
				mb();
				wake_up_all(&ctx->stop_wq);
			}
199
		}
200
		mutex_unlock(&cbe_spu_info[node].list_mutex);
201 202 203
	}
}

204 205
int spu_switch_event_register(struct notifier_block * n)
{
206 207 208 209 210
	int ret;
	ret = blocking_notifier_chain_register(&spu_switch_notifier, n);
	if (!ret)
		notify_spus_active();
	return ret;
211
}
212
EXPORT_SYMBOL_GPL(spu_switch_event_register);
213 214 215 216 217

int spu_switch_event_unregister(struct notifier_block * n)
{
	return blocking_notifier_chain_unregister(&spu_switch_notifier, n);
}
218
EXPORT_SYMBOL_GPL(spu_switch_event_unregister);
219

220 221 222 223 224 225
/**
 * spu_bind_context - bind spu context to physical spu
 * @spu:	physical spu to bind to
 * @ctx:	context to bind
 */
static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
226
{
227 228
	pr_debug("%s: pid=%d SPU=%d NODE=%d\n", __FUNCTION__, current->pid,
		 spu->number, spu->node);
229
	spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
230

231 232 233
	if (ctx->flags & SPU_CREATE_NOSCHED)
		atomic_inc(&cbe_spu_info[spu->node].reserved_spus);

234 235 236
	ctx->stats.slb_flt_base = spu->stats.slb_flt;
	ctx->stats.class2_intr_base = spu->stats.class2_intr;

237 238 239 240 241
	spu->ctx = ctx;
	spu->flags = 0;
	ctx->spu = spu;
	ctx->ops = &spu_hw_ops;
	spu->pid = current->pid;
242
	spu->tgid = current->tgid;
243
	spu_associate_mm(spu, ctx->owner);
244 245
	spu->ibox_callback = spufs_ibox_callback;
	spu->wbox_callback = spufs_wbox_callback;
246
	spu->stop_callback = spufs_stop_callback;
247
	spu->mfc_callback = spufs_mfc_callback;
248
	mb();
249
	spu_unmap_mappings(ctx);
250
	spu_restore(&ctx->csa, spu);
251
	spu->timestamp = jiffies;
252
	spu_cpu_affinity_set(spu, raw_smp_processor_id());
253
	spu_switch_notify(spu, ctx);
254
	ctx->state = SPU_STATE_RUNNABLE;
255 256

	spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED);
257 258
}

259
/*
260
 * Must be used with the list_mutex held.
261 262 263
 */
static inline int sched_spu(struct spu *spu)
{
264 265
	BUG_ON(!mutex_is_locked(&cbe_spu_info[spu->node].list_mutex));

266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317
	return (!spu->ctx || !(spu->ctx->flags & SPU_CREATE_NOSCHED));
}

static void aff_merge_remaining_ctxs(struct spu_gang *gang)
{
	struct spu_context *ctx;

	list_for_each_entry(ctx, &gang->aff_list_head, aff_list) {
		if (list_empty(&ctx->aff_list))
			list_add(&ctx->aff_list, &gang->aff_list_head);
	}
	gang->aff_flags |= AFF_MERGED;
}

static void aff_set_offsets(struct spu_gang *gang)
{
	struct spu_context *ctx;
	int offset;

	offset = -1;
	list_for_each_entry_reverse(ctx, &gang->aff_ref_ctx->aff_list,
								aff_list) {
		if (&ctx->aff_list == &gang->aff_list_head)
			break;
		ctx->aff_offset = offset--;
	}

	offset = 0;
	list_for_each_entry(ctx, gang->aff_ref_ctx->aff_list.prev, aff_list) {
		if (&ctx->aff_list == &gang->aff_list_head)
			break;
		ctx->aff_offset = offset++;
	}

	gang->aff_flags |= AFF_OFFSETS_SET;
}

static struct spu *aff_ref_location(struct spu_context *ctx, int mem_aff,
		 int group_size, int lowest_offset)
{
	struct spu *spu;
	int node, n;

	/*
	 * TODO: A better algorithm could be used to find a good spu to be
	 *       used as reference location for the ctxs chain.
	 */
	node = cpu_to_node(raw_smp_processor_id());
	for (n = 0; n < MAX_NUMNODES; n++, node++) {
		node = (node < MAX_NUMNODES) ? node : 0;
		if (!node_allowed(ctx, node))
			continue;
318
		mutex_lock(&cbe_spu_info[node].list_mutex);
319 320
		list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
			if ((!mem_aff || spu->has_mem_affinity) &&
321 322
							sched_spu(spu)) {
				mutex_unlock(&cbe_spu_info[node].list_mutex);
323
				return spu;
324
			}
325
		}
326
		mutex_unlock(&cbe_spu_info[node].list_mutex);
327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350
	}
	return NULL;
}

static void aff_set_ref_point_location(struct spu_gang *gang)
{
	int mem_aff, gs, lowest_offset;
	struct spu_context *ctx;
	struct spu *tmp;

	mem_aff = gang->aff_ref_ctx->flags & SPU_CREATE_AFFINITY_MEM;
	lowest_offset = 0;
	gs = 0;

	list_for_each_entry(tmp, &gang->aff_list_head, aff_list)
		gs++;

	list_for_each_entry_reverse(ctx, &gang->aff_ref_ctx->aff_list,
								aff_list) {
		if (&ctx->aff_list == &gang->aff_list_head)
			break;
		lowest_offset = ctx->aff_offset;
	}

351 352
	gang->aff_ref_spu = aff_ref_location(gang->aff_ref_ctx, mem_aff, gs,
							lowest_offset);
353 354
}

355
static struct spu *ctx_location(struct spu *ref, int offset, int node)
356 357 358 359 360 361
{
	struct spu *spu;

	spu = NULL;
	if (offset >= 0) {
		list_for_each_entry(spu, ref->aff_list.prev, aff_list) {
362
			BUG_ON(spu->node != node);
363 364 365 366 367 368 369
			if (offset == 0)
				break;
			if (sched_spu(spu))
				offset--;
		}
	} else {
		list_for_each_entry_reverse(spu, ref->aff_list.next, aff_list) {
370
			BUG_ON(spu->node != node);
371 372 373 374 375 376
			if (offset == 0)
				break;
			if (sched_spu(spu))
				offset++;
		}
	}
377

378 379 380 381 382 383 384
	return spu;
}

/*
 * affinity_check is called each time a context is going to be scheduled.
 * It returns the spu ptr on which the context must run.
 */
385
static int has_affinity(struct spu_context *ctx)
386
{
387
	struct spu_gang *gang = ctx->gang;
388 389

	if (list_empty(&ctx->aff_list))
390 391
		return 0;

392 393 394 395 396 397 398
	if (!gang->aff_ref_spu) {
		if (!(gang->aff_flags & AFF_MERGED))
			aff_merge_remaining_ctxs(gang);
		if (!(gang->aff_flags & AFF_OFFSETS_SET))
			aff_set_offsets(gang);
		aff_set_ref_point_location(gang);
	}
399 400

	return gang->aff_ref_spu != NULL;
401 402
}

403 404 405 406 407
/**
 * spu_unbind_context - unbind spu context from physical spu
 * @spu:	physical spu to unbind from
 * @ctx:	context to unbind
 */
408
static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
409
{
410 411
	pr_debug("%s: unbind pid=%d SPU=%d NODE=%d\n", __FUNCTION__,
		 spu->pid, spu->number, spu->node);
412
	spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
413

414 415
 	if (spu->ctx->flags & SPU_CREATE_NOSCHED)
		atomic_dec(&cbe_spu_info[spu->node].reserved_spus);
416 417 418 419 420 421 422 423 424 425

	if (ctx->gang){
		mutex_lock(&ctx->gang->aff_mutex);
		if (has_affinity(ctx)) {
			if (atomic_dec_and_test(&ctx->gang->aff_sched_count))
				ctx->gang->aff_ref_spu = NULL;
		}
		mutex_unlock(&ctx->gang->aff_mutex);
	}

426
	spu_switch_notify(spu, NULL);
427
	spu_unmap_mappings(ctx);
428
	spu_save(&ctx->csa, spu);
429
	spu->timestamp = jiffies;
430 431 432
	ctx->state = SPU_STATE_SAVED;
	spu->ibox_callback = NULL;
	spu->wbox_callback = NULL;
433
	spu->stop_callback = NULL;
434
	spu->mfc_callback = NULL;
435
	spu_associate_mm(spu, NULL);
436
	spu->pid = 0;
437
	spu->tgid = 0;
438
	ctx->ops = &spu_backing_ops;
439
	spu->flags = 0;
440
	spu->ctx = NULL;
441 442 443 444 445

	ctx->stats.slb_flt +=
		(spu->stats.slb_flt - ctx->stats.slb_flt_base);
	ctx->stats.class2_intr +=
		(spu->stats.class2_intr - ctx->stats.class2_intr_base);
446 447 448 449

	/* This maps the underlying spu state to idle */
	spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED);
	ctx->spu = NULL;
450 451
}

452 453 454 455
/**
 * spu_add_to_rq - add a context to the runqueue
 * @ctx:       context to add
 */
456
static void __spu_add_to_rq(struct spu_context *ctx)
457
{
458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476
	/*
	 * Unfortunately this code path can be called from multiple threads
	 * on behalf of a single context due to the way the problem state
	 * mmap support works.
	 *
	 * Fortunately we need to wake up all these threads at the same time
	 * and can simply skip the runqueue addition for every but the first
	 * thread getting into this codepath.
	 *
	 * It's still quite hacky, and long-term we should proxy all other
	 * threads through the owner thread so that spu_run is in control
	 * of all the scheduling activity for a given context.
	 */
	if (list_empty(&ctx->rq)) {
		list_add_tail(&ctx->rq, &spu_prio->runq[ctx->prio]);
		set_bit(ctx->prio, spu_prio->bitmap);
		if (!spu_prio->nr_waiting++)
			__mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);
	}
477
}
478

479
static void __spu_del_from_rq(struct spu_context *ctx)
480
{
481 482
	int prio = ctx->prio;

483
	if (!list_empty(&ctx->rq)) {
484 485
		if (!--spu_prio->nr_waiting)
			del_timer(&spusched_timer);
486
		list_del_init(&ctx->rq);
487 488 489

		if (list_empty(&spu_prio->runq[prio]))
			clear_bit(prio, spu_prio->bitmap);
490
	}
491
}
492

493
static void spu_prio_wait(struct spu_context *ctx)
494
{
495
	DEFINE_WAIT(wait);
496

497
	spin_lock(&spu_prio->runq_lock);
498
	prepare_to_wait_exclusive(&ctx->stop_wq, &wait, TASK_INTERRUPTIBLE);
499
	if (!signal_pending(current)) {
500 501
		__spu_add_to_rq(ctx);
		spin_unlock(&spu_prio->runq_lock);
502
		mutex_unlock(&ctx->state_mutex);
503
		schedule();
504
		mutex_lock(&ctx->state_mutex);
505 506
		spin_lock(&spu_prio->runq_lock);
		__spu_del_from_rq(ctx);
507
	}
508
	spin_unlock(&spu_prio->runq_lock);
509 510
	__set_current_state(TASK_RUNNING);
	remove_wait_queue(&ctx->stop_wq, &wait);
511 512
}

513
static struct spu *spu_get_idle(struct spu_context *ctx)
514
{
515
	struct spu *spu, *aff_ref_spu;
516 517
	int node, n;

518 519 520 521 522 523 524 525 526 527 528 529 530
	if (ctx->gang) {
		mutex_lock(&ctx->gang->aff_mutex);
		if (has_affinity(ctx)) {
			aff_ref_spu = ctx->gang->aff_ref_spu;
			atomic_inc(&ctx->gang->aff_sched_count);
			mutex_unlock(&ctx->gang->aff_mutex);
			node = aff_ref_spu->node;

			mutex_lock(&cbe_spu_info[node].list_mutex);
			spu = ctx_location(aff_ref_spu, ctx->aff_offset, node);
			if (spu && spu->alloc_state == SPU_FREE)
				goto found;
			mutex_unlock(&cbe_spu_info[node].list_mutex);
531

532 533 534 535
			mutex_lock(&ctx->gang->aff_mutex);
			if (atomic_dec_and_test(&ctx->gang->aff_sched_count))
				ctx->gang->aff_ref_spu = NULL;
			mutex_unlock(&ctx->gang->aff_mutex);
536

537 538 539 540
			return NULL;
		}
		mutex_unlock(&ctx->gang->aff_mutex);
	}
541
	node = cpu_to_node(raw_smp_processor_id());
542 543
	for (n = 0; n < MAX_NUMNODES; n++, node++) {
		node = (node < MAX_NUMNODES) ? node : 0;
544
		if (!node_allowed(ctx, node))
545
			continue;
546 547 548 549 550 551 552

		mutex_lock(&cbe_spu_info[node].list_mutex);
		list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
			if (spu->alloc_state == SPU_FREE)
				goto found;
		}
		mutex_unlock(&cbe_spu_info[node].list_mutex);
553
	}
554 555 556 557 558 559 560 561

	return NULL;

 found:
	spu->alloc_state = SPU_USED;
	mutex_unlock(&cbe_spu_info[node].list_mutex);
	pr_debug("Got SPU %d %d\n", spu->number, spu->node);
	spu_init_channels(spu);
562 563
	return spu;
}
564

565 566 567 568 569 570 571 572 573 574 575 576 577 578 579
/**
 * find_victim - find a lower priority context to preempt
 * @ctx:	canidate context for running
 *
 * Returns the freed physical spu to run the new context on.
 */
static struct spu *find_victim(struct spu_context *ctx)
{
	struct spu_context *victim = NULL;
	struct spu *spu;
	int node, n;

	/*
	 * Look for a possible preemption candidate on the local node first.
	 * If there is no candidate look at the other nodes.  This isn't
580
	 * exactly fair, but so far the whole spu scheduler tries to keep
581 582 583 584 585 586 587
	 * a strong node affinity.  We might want to fine-tune this in
	 * the future.
	 */
 restart:
	node = cpu_to_node(raw_smp_processor_id());
	for (n = 0; n < MAX_NUMNODES; n++, node++) {
		node = (node < MAX_NUMNODES) ? node : 0;
588
		if (!node_allowed(ctx, node))
589 590
			continue;

591 592
		mutex_lock(&cbe_spu_info[node].list_mutex);
		list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
593 594
			struct spu_context *tmp = spu->ctx;

595
			if (tmp && tmp->prio > ctx->prio &&
596
			    (!victim || tmp->prio > victim->prio))
597 598
				victim = spu->ctx;
		}
599
		mutex_unlock(&cbe_spu_info[node].list_mutex);
600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623

		if (victim) {
			/*
			 * This nests ctx->state_mutex, but we always lock
			 * higher priority contexts before lower priority
			 * ones, so this is safe until we introduce
			 * priority inheritance schemes.
			 */
			if (!mutex_trylock(&victim->state_mutex)) {
				victim = NULL;
				goto restart;
			}

			spu = victim->spu;
			if (!spu) {
				/*
				 * This race can happen because we've dropped
				 * the active list mutex.  No a problem, just
				 * restart the search.
				 */
				mutex_unlock(&victim->state_mutex);
				victim = NULL;
				goto restart;
			}
624 625 626

			mutex_lock(&cbe_spu_info[node].list_mutex);
			cbe_spu_info[node].nr_active--;
627
			spu_unbind_context(spu, victim);
628 629
			mutex_unlock(&cbe_spu_info[node].list_mutex);

630
			victim->stats.invol_ctx_switch++;
631
			spu->stats.invol_ctx_switch++;
632
			mutex_unlock(&victim->state_mutex);
633 634 635 636 637 638
			/*
			 * We need to break out of the wait loop in spu_run
			 * manually to ensure this context gets put on the
			 * runqueue again ASAP.
			 */
			wake_up(&victim->stop_wq);
639 640 641 642 643 644 645
			return spu;
		}
	}

	return NULL;
}

646 647 648 649 650
/**
 * spu_activate - find a free spu for a context and execute it
 * @ctx:	spu context to schedule
 * @flags:	flags (currently ignored)
 *
651
 * Tries to find a free spu to run @ctx.  If no free spu is available
652 653 654
 * add the context to the runqueue so it gets woken up once an spu
 * is available.
 */
655
int spu_activate(struct spu_context *ctx, unsigned long flags)
656
{
657 658 659
	do {
		struct spu *spu;

660 661 662 663 664 665 666 667 668
		/*
		 * If there are multiple threads waiting for a single context
		 * only one actually binds the context while the others will
		 * only be able to acquire the state_mutex once the context
		 * already is in runnable state.
		 */
		if (ctx->spu)
			return 0;

669
		spu = spu_get_idle(ctx);
670 671 672 673
		/*
		 * If this is a realtime thread we try to get it running by
		 * preempting a lower priority thread.
		 */
674
		if (!spu && rt_prio(ctx->prio))
675
			spu = find_victim(ctx);
676
		if (spu) {
677 678 679
			int node = spu->node;

			mutex_lock(&cbe_spu_info[node].list_mutex);
680
			spu_bind_context(spu, ctx);
681 682
			cbe_spu_info[node].nr_active++;
			mutex_unlock(&cbe_spu_info[node].list_mutex);
683
			wake_up_all(&ctx->run_wq);
684
			return 0;
685
		}
686

687
		spu_prio_wait(ctx);
688 689 690
	} while (!signal_pending(current));

	return -ERESTARTSYS;
691 692
}

693 694 695 696 697 698
/**
 * grab_runnable_context - try to find a runnable context
 *
 * Remove the highest priority context on the runqueue and return it
 * to the caller.  Returns %NULL if no runnable context was found.
 */
699
static struct spu_context *grab_runnable_context(int prio, int node)
700
{
701
	struct spu_context *ctx;
702 703 704
	int best;

	spin_lock(&spu_prio->runq_lock);
705
	best = find_first_bit(spu_prio->bitmap, prio);
706
	while (best < prio) {
707 708
		struct list_head *rq = &spu_prio->runq[best];

709 710 711 712 713 714 715 716
		list_for_each_entry(ctx, rq, rq) {
			/* XXX(hch): check for affinity here aswell */
			if (__node_allowed(ctx, node)) {
				__spu_del_from_rq(ctx);
				goto found;
			}
		}
		best++;
717
	}
718 719
	ctx = NULL;
 found:
720 721 722 723 724 725 726 727 728 729
	spin_unlock(&spu_prio->runq_lock);
	return ctx;
}

static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio)
{
	struct spu *spu = ctx->spu;
	struct spu_context *new = NULL;

	if (spu) {
730
		new = grab_runnable_context(max_prio, spu->node);
731
		if (new || force) {
732 733 734
			int node = spu->node;

			mutex_lock(&cbe_spu_info[node].list_mutex);
735
			spu_unbind_context(spu, ctx);
736 737 738 739
			spu->alloc_state = SPU_FREE;
			cbe_spu_info[node].nr_active--;
			mutex_unlock(&cbe_spu_info[node].list_mutex);

740
			ctx->stats.vol_ctx_switch++;
741
			spu->stats.vol_ctx_switch++;
742

743 744 745 746 747 748 749 750 751
			if (new)
				wake_up(&new->stop_wq);
		}

	}

	return new != NULL;
}

752 753 754 755 756 757 758
/**
 * spu_deactivate - unbind a context from it's physical spu
 * @ctx:	spu context to unbind
 *
 * Unbind @ctx from the physical spu it is running on and schedule
 * the highest priority context to run on the freed physical spu.
 */
759 760
void spu_deactivate(struct spu_context *ctx)
{
761
	__spu_deactivate(ctx, 1, MAX_PRIO);
762 763
}

764
/**
765
 * spu_yield -	yield a physical spu if others are waiting
766 767 768 769 770 771
 * @ctx:	spu context to yield
 *
 * Check if there is a higher priority context waiting and if yes
 * unbind @ctx from the physical spu and schedule the highest
 * priority context to run on the freed physical spu instead.
 */
772 773
void spu_yield(struct spu_context *ctx)
{
774 775
	if (!(ctx->flags & SPU_CREATE_NOSCHED)) {
		mutex_lock(&ctx->state_mutex);
776
		__spu_deactivate(ctx, 0, MAX_PRIO);
777 778
		mutex_unlock(&ctx->state_mutex);
	}
779
}
780

781
static noinline void spusched_tick(struct spu_context *ctx)
782
{
783 784 785 786 787 788
	if (ctx->flags & SPU_CREATE_NOSCHED)
		return;
	if (ctx->policy == SCHED_FIFO)
		return;

	if (--ctx->time_slice)
789
		return;
790 791

	/*
792
	 * Unfortunately list_mutex ranks outside of state_mutex, so
793 794
	 * we have to trylock here.  If we fail give the context another
	 * tick and try again.
795
	 */
796
	if (mutex_trylock(&ctx->state_mutex)) {
797
		struct spu *spu = ctx->spu;
798 799 800
		struct spu_context *new;

		new = grab_runnable_context(ctx->prio + 1, spu->node);
801 802
		if (new) {
			spu_unbind_context(spu, ctx);
803
			ctx->stats.invol_ctx_switch++;
804
			spu->stats.invol_ctx_switch++;
805 806
			spu->alloc_state = SPU_FREE;
			cbe_spu_info[spu->node].nr_active--;
807 808 809 810 811 812 813 814
			wake_up(&new->stop_wq);
			/*
			 * We need to break out of the wait loop in
			 * spu_run manually to ensure this context
			 * gets put on the runqueue again ASAP.
			 */
			wake_up(&ctx->stop_wq);
		}
815
		spu_set_timeslice(ctx);
816
		mutex_unlock(&ctx->state_mutex);
817
	} else {
818
		ctx->time_slice++;
819 820 821
	}
}

822 823 824 825 826
/**
 * count_active_contexts - count nr of active tasks
 *
 * Return the number of tasks currently running or waiting to run.
 *
827
 * Note that we don't take runq_lock / list_mutex here.  Reading
828 829 830 831 832 833 834 835
 * a single 32bit value is atomic on powerpc, and we don't care
 * about memory ordering issues here.
 */
static unsigned long count_active_contexts(void)
{
	int nr_active = 0, node;

	for (node = 0; node < MAX_NUMNODES; node++)
836
		nr_active += cbe_spu_info[node].nr_active;
837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866
	nr_active += spu_prio->nr_waiting;

	return nr_active;
}

/**
 * spu_calc_load - given tick count, update the avenrun load estimates.
 * @tick:	tick count
 *
 * No locking against reading these values from userspace, as for
 * the CPU loadavg code.
 */
static void spu_calc_load(unsigned long ticks)
{
	unsigned long active_tasks; /* fixed-point */
	static int count = LOAD_FREQ;

	count -= ticks;

	if (unlikely(count < 0)) {
		active_tasks = count_active_contexts() * FIXED_1;
		do {
			CALC_LOAD(spu_avenrun[0], EXP_1, active_tasks);
			CALC_LOAD(spu_avenrun[1], EXP_5, active_tasks);
			CALC_LOAD(spu_avenrun[2], EXP_15, active_tasks);
			count += LOAD_FREQ;
		} while (count < 0);
	}
}

867 868 869 870
static void spusched_wake(unsigned long data)
{
	mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);
	wake_up_process(spusched_task);
871
	spu_calc_load(SPUSCHED_TICK);
872 873 874 875
}

static int spusched_thread(void *unused)
{
876
	struct spu *spu;
877 878 879 880 881 882
	int node;

	while (!kthread_should_stop()) {
		set_current_state(TASK_INTERRUPTIBLE);
		schedule();
		for (node = 0; node < MAX_NUMNODES; node++) {
883 884 885 886 887
			mutex_lock(&cbe_spu_info[node].list_mutex);
			list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list)
				if (spu->ctx)
					spusched_tick(spu->ctx);
			mutex_unlock(&cbe_spu_info[node].list_mutex);
888 889 890 891 892 893
		}
	}

	return 0;
}

894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925
void spuctx_switch_state(struct spu_context *ctx,
		enum spu_utilization_state new_state)
{
	unsigned long long curtime;
	signed long long delta;
	struct timespec ts;
	struct spu *spu;
	enum spu_utilization_state old_state;

	ktime_get_ts(&ts);
	curtime = timespec_to_ns(&ts);
	delta = curtime - ctx->stats.tstamp;

	WARN_ON(!mutex_is_locked(&ctx->state_mutex));
	WARN_ON(delta < 0);

	spu = ctx->spu;
	old_state = ctx->stats.util_state;
	ctx->stats.util_state = new_state;
	ctx->stats.tstamp = curtime;

	/*
	 * Update the physical SPU utilization statistics.
	 */
	if (spu) {
		ctx->stats.times[old_state] += delta;
		spu->stats.times[old_state] += delta;
		spu->stats.util_state = new_state;
		spu->stats.tstamp = curtime;
	}
}

926 927 928 929 930 931 932 933 934 935 936 937 938
#define LOAD_INT(x) ((x) >> FSHIFT)
#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)

static int show_spu_loadavg(struct seq_file *s, void *private)
{
	int a, b, c;

	a = spu_avenrun[0] + (FIXED_1/200);
	b = spu_avenrun[1] + (FIXED_1/200);
	c = spu_avenrun[2] + (FIXED_1/200);

	/*
	 * Note that last_pid doesn't really make much sense for the
939
	 * SPU loadavg (it even seems very odd on the CPU side...),
940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963
	 * but we include it here to have a 100% compatible interface.
	 */
	seq_printf(s, "%d.%02d %d.%02d %d.%02d %ld/%d %d\n",
		LOAD_INT(a), LOAD_FRAC(a),
		LOAD_INT(b), LOAD_FRAC(b),
		LOAD_INT(c), LOAD_FRAC(c),
		count_active_contexts(),
		atomic_read(&nr_spu_contexts),
		current->nsproxy->pid_ns->last_pid);
	return 0;
}

static int spu_loadavg_open(struct inode *inode, struct file *file)
{
	return single_open(file, show_spu_loadavg, NULL);
}

static const struct file_operations spu_loadavg_fops = {
	.open		= spu_loadavg_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
	.release	= single_release,
};

964 965
int __init spu_sched_init(void)
{
966 967
	struct proc_dir_entry *entry;
	int err = -ENOMEM, i;
968

969
	spu_prio = kzalloc(sizeof(struct spu_prio_array), GFP_KERNEL);
970
	if (!spu_prio)
971
		goto out;
972

973
	for (i = 0; i < MAX_PRIO; i++) {
974
		INIT_LIST_HEAD(&spu_prio->runq[i]);
975
		__clear_bit(i, spu_prio->bitmap);
976
	}
977
	spin_lock_init(&spu_prio->runq_lock);
978

979 980
	setup_timer(&spusched_timer, spusched_wake, 0);

981 982
	spusched_task = kthread_run(spusched_thread, NULL, "spusched");
	if (IS_ERR(spusched_task)) {
983 984
		err = PTR_ERR(spusched_task);
		goto out_free_spu_prio;
985
	}
986

987 988 989 990 991
	entry = create_proc_entry("spu_loadavg", 0, NULL);
	if (!entry)
		goto out_stop_kthread;
	entry->proc_fops = &spu_loadavg_fops;

992 993
	pr_debug("spusched: tick: %d, min ticks: %d, default ticks: %d\n",
			SPUSCHED_TICK, MIN_SPU_TIMESLICE, DEF_SPU_TIMESLICE);
994
	return 0;
995

996 997 998 999 1000 1001
 out_stop_kthread:
	kthread_stop(spusched_task);
 out_free_spu_prio:
	kfree(spu_prio);
 out:
	return err;
1002 1003
}

1004
void spu_sched_exit(void)
1005
{
1006
	struct spu *spu;
1007 1008
	int node;

1009 1010
	remove_proc_entry("spu_loadavg", NULL);

1011
	del_timer_sync(&spusched_timer);
1012 1013
	kthread_stop(spusched_task);

1014
	for (node = 0; node < MAX_NUMNODES; node++) {
1015 1016 1017 1018 1019
		mutex_lock(&cbe_spu_info[node].list_mutex);
		list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list)
			if (spu->alloc_state != SPU_FREE)
				spu->alloc_state = SPU_FREE;
		mutex_unlock(&cbe_spu_info[node].list_mutex);
1020
	}
1021
	kfree(spu_prio);
1022
}