sched.c 12.8 KB
Newer Older
1 2 3 4 5
/* sched.c - SPU scheduler.
 *
 * Copyright (C) IBM 2005
 * Author: Mark Nutter <mnutter@us.ibm.com>
 *
6
 * 2006-03-31	NUMA domains added.
7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2, or (at your option)
 * any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 */

23 24
#undef DEBUG

25 26 27 28 29 30 31 32 33 34 35
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/completion.h>
#include <linux/vmalloc.h>
#include <linux/smp.h>
#include <linux/smp_lock.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
36 37
#include <linux/numa.h>
#include <linux/mutex.h>
38
#include <linux/notifier.h>
39 40 41 42 43

#include <asm/io.h>
#include <asm/mmu_context.h>
#include <asm/spu.h>
#include <asm/spu_csa.h>
44
#include <asm/spu_priv1.h>
45 46
#include "spufs.h"

47
#define SPU_TIMESLICE	(HZ)
48

49
struct spu_prio_array {
50
	DECLARE_BITMAP(bitmap, MAX_PRIO);
51 52
	struct list_head runq[MAX_PRIO];
	spinlock_t runq_lock;
53 54
	struct list_head active_list[MAX_NUMNODES];
	struct mutex active_mutex[MAX_NUMNODES];
55 56
};

57
static struct spu_prio_array *spu_prio;
58
static struct workqueue_struct *spu_sched_wq;
59

60
static inline int node_allowed(int node)
61
{
62
	cpumask_t mask;
63

64 65 66 67 68 69
	if (!nr_cpus_node(node))
		return 0;
	mask = node_to_cpumask(node);
	if (!cpus_intersects(mask, current->cpus_allowed))
		return 0;
	return 1;
70 71
}

72 73
void spu_start_tick(struct spu_context *ctx)
{
74 75 76 77 78
	if (ctx->policy == SCHED_RR) {
		/*
		 * Make sure the exiting bit is cleared.
		 */
		clear_bit(SPU_SCHED_EXITING, &ctx->sched_flags);
79
		mb();
80
		queue_delayed_work(spu_sched_wq, &ctx->sched_work, SPU_TIMESLICE);
81
	}
82 83 84 85
}

void spu_stop_tick(struct spu_context *ctx)
{
86 87 88 89 90 91
	if (ctx->policy == SCHED_RR) {
		/*
		 * While the work can be rearming normally setting this flag
		 * makes sure it does not rearm itself anymore.
		 */
		set_bit(SPU_SCHED_EXITING, &ctx->sched_flags);
92
		mb();
93
		cancel_delayed_work(&ctx->sched_work);
94
	}
95 96 97 98 99 100 101
}

void spu_sched_tick(struct work_struct *work)
{
	struct spu_context *ctx =
		container_of(work, struct spu_context, sched_work.work);
	struct spu *spu;
102
	int preempted = 0;
103

104 105 106 107 108 109 110 111
	/*
	 * If this context is being stopped avoid rescheduling from the
	 * scheduler tick because we would block on the state_mutex.
	 * The caller will yield the spu later on anyway.
	 */
	if (test_bit(SPU_SCHED_EXITING, &ctx->sched_flags))
		return;

112 113 114 115 116 117
	mutex_lock(&ctx->state_mutex);
	spu = ctx->spu;
	if (spu) {
		int best = sched_find_first_bit(spu_prio->bitmap);
		if (best <= ctx->prio) {
			spu_deactivate(ctx);
118
			preempted = 1;
119 120 121 122
		}
	}
	mutex_unlock(&ctx->state_mutex);

123 124 125 126 127 128 129 130
	if (preempted) {
		/*
		 * We need to break out of the wait loop in spu_run manually
		 * to ensure this context gets put on the runqueue again
		 * ASAP.
		 */
		wake_up(&ctx->stop_wq);
	} else
131 132 133
		spu_start_tick(ctx);
}

134 135 136 137 138 139 140 141 142 143 144 145 146 147 148
/**
 * spu_add_to_active_list - add spu to active list
 * @spu:	spu to add to the active list
 */
static void spu_add_to_active_list(struct spu *spu)
{
	mutex_lock(&spu_prio->active_mutex[spu->node]);
	list_add_tail(&spu->list, &spu_prio->active_list[spu->node]);
	mutex_unlock(&spu_prio->active_mutex[spu->node]);
}

/**
 * spu_remove_from_active_list - remove spu from active list
 * @spu:       spu to remove from the active list
 */
149
static void spu_remove_from_active_list(struct spu *spu)
150 151 152 153
{
	int node = spu->node;

	mutex_lock(&spu_prio->active_mutex[node]);
154
	list_del_init(&spu->list);
155 156 157
	mutex_unlock(&spu_prio->active_mutex[node]);
}

158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175
static BLOCKING_NOTIFIER_HEAD(spu_switch_notifier);

static void spu_switch_notify(struct spu *spu, struct spu_context *ctx)
{
	blocking_notifier_call_chain(&spu_switch_notifier,
			    ctx ? ctx->object_id : 0, spu);
}

int spu_switch_event_register(struct notifier_block * n)
{
	return blocking_notifier_chain_register(&spu_switch_notifier, n);
}

int spu_switch_event_unregister(struct notifier_block * n)
{
	return blocking_notifier_chain_unregister(&spu_switch_notifier, n);
}

176 177 178 179 180 181
/**
 * spu_bind_context - bind spu context to physical spu
 * @spu:	physical spu to bind to
 * @ctx:	context to bind
 */
static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
182
{
183 184
	pr_debug("%s: pid=%d SPU=%d NODE=%d\n", __FUNCTION__, current->pid,
		 spu->number, spu->node);
185 186 187 188 189
	spu->ctx = ctx;
	spu->flags = 0;
	ctx->spu = spu;
	ctx->ops = &spu_hw_ops;
	spu->pid = current->pid;
190
	spu_associate_mm(spu, ctx->owner);
191 192
	spu->ibox_callback = spufs_ibox_callback;
	spu->wbox_callback = spufs_wbox_callback;
193
	spu->stop_callback = spufs_stop_callback;
194
	spu->mfc_callback = spufs_mfc_callback;
195
	spu->dma_callback = spufs_dma_callback;
196
	mb();
197
	spu_unmap_mappings(ctx);
198
	spu_restore(&ctx->csa, spu);
199
	spu->timestamp = jiffies;
200
	spu_cpu_affinity_set(spu, raw_smp_processor_id());
201
	spu_switch_notify(spu, ctx);
202
	spu_add_to_active_list(spu);
203
	ctx->state = SPU_STATE_RUNNABLE;
204 205
}

206 207 208 209 210
/**
 * spu_unbind_context - unbind spu context from physical spu
 * @spu:	physical spu to unbind from
 * @ctx:	context to unbind
 */
211
static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
212
{
213 214
	pr_debug("%s: unbind pid=%d SPU=%d NODE=%d\n", __FUNCTION__,
		 spu->pid, spu->number, spu->node);
215

216
	spu_remove_from_active_list(spu);
217
	spu_switch_notify(spu, NULL);
218
	spu_unmap_mappings(ctx);
219
	spu_save(&ctx->csa, spu);
220
	spu->timestamp = jiffies;
221 222 223
	ctx->state = SPU_STATE_SAVED;
	spu->ibox_callback = NULL;
	spu->wbox_callback = NULL;
224
	spu->stop_callback = NULL;
225
	spu->mfc_callback = NULL;
226
	spu->dma_callback = NULL;
227
	spu_associate_mm(spu, NULL);
228 229 230
	spu->pid = 0;
	ctx->ops = &spu_backing_ops;
	ctx->spu = NULL;
231
	spu->flags = 0;
232 233 234
	spu->ctx = NULL;
}

235 236 237 238 239
/**
 * spu_add_to_rq - add a context to the runqueue
 * @ctx:       context to add
 */
static void spu_add_to_rq(struct spu_context *ctx)
240
{
241 242 243
	spin_lock(&spu_prio->runq_lock);
	list_add_tail(&ctx->rq, &spu_prio->runq[ctx->prio]);
	set_bit(ctx->prio, spu_prio->bitmap);
244
	mb();
245
	spin_unlock(&spu_prio->runq_lock);
246
}
247

248 249 250 251 252 253 254 255
static void __spu_del_from_rq(struct spu_context *ctx, int prio)
{
	if (!list_empty(&ctx->rq))
		list_del_init(&ctx->rq);
	if (list_empty(&spu_prio->runq[prio]))
		clear_bit(ctx->prio, spu_prio->bitmap);
}

256 257 258 259 260
/**
 * spu_del_from_rq - remove a context from the runqueue
 * @ctx:       context to remove
 */
static void spu_del_from_rq(struct spu_context *ctx)
261
{
262
	spin_lock(&spu_prio->runq_lock);
263
	__spu_del_from_rq(ctx, ctx->prio);
264 265
	spin_unlock(&spu_prio->runq_lock);
}
266

267
static void spu_prio_wait(struct spu_context *ctx)
268
{
269
	DEFINE_WAIT(wait);
270

271
	prepare_to_wait_exclusive(&ctx->stop_wq, &wait, TASK_INTERRUPTIBLE);
272
	if (!signal_pending(current)) {
273
		mutex_unlock(&ctx->state_mutex);
274
		schedule();
275
		mutex_lock(&ctx->state_mutex);
276
	}
277 278
	__set_current_state(TASK_RUNNING);
	remove_wait_queue(&ctx->stop_wq, &wait);
279 280
}

281 282 283 284 285 286 287 288
/**
 * spu_reschedule - try to find a runnable context for a spu
 * @spu:       spu available
 *
 * This function is called whenever a spu becomes idle.  It looks for the
 * most suitable runnable spu context and schedules it for execution.
 */
static void spu_reschedule(struct spu *spu)
289
{
290 291 292 293 294 295
	int best;

	spu_free(spu);

	spin_lock(&spu_prio->runq_lock);
	best = sched_find_first_bit(spu_prio->bitmap);
296
	if (best < MAX_PRIO) {
297 298 299 300 301 302 303 304
		struct list_head *rq = &spu_prio->runq[best];
		struct spu_context *ctx;

		BUG_ON(list_empty(rq));

		ctx = list_entry(rq->next, struct spu_context, rq);
		__spu_del_from_rq(ctx, best);
		wake_up(&ctx->stop_wq);
305
	}
306
	spin_unlock(&spu_prio->runq_lock);
307 308
}

309
static struct spu *spu_get_idle(struct spu_context *ctx)
310 311 312 313 314 315 316 317 318 319 320 321 322 323 324
{
	struct spu *spu = NULL;
	int node = cpu_to_node(raw_smp_processor_id());
	int n;

	for (n = 0; n < MAX_NUMNODES; n++, node++) {
		node = (node < MAX_NUMNODES) ? node : 0;
		if (!node_allowed(node))
			continue;
		spu = spu_alloc_node(node);
		if (spu)
			break;
	}
	return spu;
}
325

326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386
/**
 * find_victim - find a lower priority context to preempt
 * @ctx:	canidate context for running
 *
 * Returns the freed physical spu to run the new context on.
 */
static struct spu *find_victim(struct spu_context *ctx)
{
	struct spu_context *victim = NULL;
	struct spu *spu;
	int node, n;

	/*
	 * Look for a possible preemption candidate on the local node first.
	 * If there is no candidate look at the other nodes.  This isn't
	 * exactly fair, but so far the whole spu schedule tries to keep
	 * a strong node affinity.  We might want to fine-tune this in
	 * the future.
	 */
 restart:
	node = cpu_to_node(raw_smp_processor_id());
	for (n = 0; n < MAX_NUMNODES; n++, node++) {
		node = (node < MAX_NUMNODES) ? node : 0;
		if (!node_allowed(node))
			continue;

		mutex_lock(&spu_prio->active_mutex[node]);
		list_for_each_entry(spu, &spu_prio->active_list[node], list) {
			struct spu_context *tmp = spu->ctx;

			if (tmp->rt_priority < ctx->rt_priority &&
			    (!victim || tmp->rt_priority < victim->rt_priority))
				victim = spu->ctx;
		}
		mutex_unlock(&spu_prio->active_mutex[node]);

		if (victim) {
			/*
			 * This nests ctx->state_mutex, but we always lock
			 * higher priority contexts before lower priority
			 * ones, so this is safe until we introduce
			 * priority inheritance schemes.
			 */
			if (!mutex_trylock(&victim->state_mutex)) {
				victim = NULL;
				goto restart;
			}

			spu = victim->spu;
			if (!spu) {
				/*
				 * This race can happen because we've dropped
				 * the active list mutex.  No a problem, just
				 * restart the search.
				 */
				mutex_unlock(&victim->state_mutex);
				victim = NULL;
				goto restart;
			}
			spu_unbind_context(spu, victim);
			mutex_unlock(&victim->state_mutex);
387 388 389 390 391 392
			/*
			 * We need to break out of the wait loop in spu_run
			 * manually to ensure this context gets put on the
			 * runqueue again ASAP.
			 */
			wake_up(&victim->stop_wq);
393 394 395 396 397 398 399
			return spu;
		}
	}

	return NULL;
}

400 401 402 403 404
/**
 * spu_activate - find a free spu for a context and execute it
 * @ctx:	spu context to schedule
 * @flags:	flags (currently ignored)
 *
405
 * Tries to find a free spu to run @ctx.  If no free spu is available
406 407 408
 * add the context to the runqueue so it gets woken up once an spu
 * is available.
 */
409
int spu_activate(struct spu_context *ctx, unsigned long flags)
410 411
{

412 413 414 415 416 417 418
	if (ctx->spu)
		return 0;

	do {
		struct spu *spu;

		spu = spu_get_idle(ctx);
419 420 421 422 423 424
		/*
		 * If this is a realtime thread we try to get it running by
		 * preempting a lower priority thread.
		 */
		if (!spu && ctx->rt_priority)
			spu = find_victim(ctx);
425
		if (spu) {
426
			spu_bind_context(spu, ctx);
427
			return 0;
428
		}
429 430

		spu_add_to_rq(ctx);
431
		spu_prio_wait(ctx);
432 433 434 435
		spu_del_from_rq(ctx);
	} while (!signal_pending(current));

	return -ERESTARTSYS;
436 437
}

438 439 440 441 442 443 444
/**
 * spu_deactivate - unbind a context from it's physical spu
 * @ctx:	spu context to unbind
 *
 * Unbind @ctx from the physical spu it is running on and schedule
 * the highest priority context to run on the freed physical spu.
 */
445 446
void spu_deactivate(struct spu_context *ctx)
{
447
	struct spu *spu = ctx->spu;
448

449 450
	if (spu) {
		spu_unbind_context(spu, ctx);
451
		spu_reschedule(spu);
452
	}
453 454
}

455 456 457 458 459 460 461 462
/**
 * spu_yield -  yield a physical spu if others are waiting
 * @ctx:	spu context to yield
 *
 * Check if there is a higher priority context waiting and if yes
 * unbind @ctx from the physical spu and schedule the highest
 * priority context to run on the freed physical spu instead.
 */
463 464 465 466
void spu_yield(struct spu_context *ctx)
{
	struct spu *spu;

467
	if (mutex_trylock(&ctx->state_mutex)) {
468 469 470 471 472 473 474 475
		if ((spu = ctx->spu) != NULL) {
			int best = sched_find_first_bit(spu_prio->bitmap);
			if (best < MAX_PRIO) {
				pr_debug("%s: yielding SPU %d NODE %d\n",
					 __FUNCTION__, spu->number, spu->node);
				spu_deactivate(ctx);
			}
		}
476
		mutex_unlock(&ctx->state_mutex);
477 478 479 480 481 482 483
	}
}

int __init spu_sched_init(void)
{
	int i;

484 485 486 487
	spu_sched_wq = create_singlethread_workqueue("spusched");
	if (!spu_sched_wq)
		return 1;

488 489 490
	spu_prio = kzalloc(sizeof(struct spu_prio_array), GFP_KERNEL);
	if (!spu_prio) {
		printk(KERN_WARNING "%s: Unable to allocate priority queue.\n",
491
		       __FUNCTION__);
492
		       destroy_workqueue(spu_sched_wq);
493 494 495
		return 1;
	}
	for (i = 0; i < MAX_PRIO; i++) {
496
		INIT_LIST_HEAD(&spu_prio->runq[i]);
497
		__clear_bit(i, spu_prio->bitmap);
498
	}
499 500 501 502
	__set_bit(MAX_PRIO, spu_prio->bitmap);
	for (i = 0; i < MAX_NUMNODES; i++) {
		mutex_init(&spu_prio->active_mutex[i]);
		INIT_LIST_HEAD(&spu_prio->active_list[i]);
503
	}
504
	spin_lock_init(&spu_prio->runq_lock);
505 506 507 508 509
	return 0;
}

void __exit spu_sched_exit(void)
{
510 511 512 513 514 515 516 517 518 519 520
	struct spu *spu, *tmp;
	int node;

	for (node = 0; node < MAX_NUMNODES; node++) {
		mutex_lock(&spu_prio->active_mutex[node]);
		list_for_each_entry_safe(spu, tmp, &spu_prio->active_list[node],
					 list) {
			list_del_init(&spu->list);
			spu_free(spu);
		}
		mutex_unlock(&spu_prio->active_mutex[node]);
521
	}
522
	kfree(spu_prio);
523
	destroy_workqueue(spu_sched_wq);
524
}