mmu_context_nohash.c 11.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
/*
 * This file contains the routines for handling the MMU on those
 * PowerPC implementations where the MMU is not using the hash
 * table, such as 8xx, 4xx, BookE's etc...
 *
 * Copyright 2008 Ben Herrenschmidt <benh@kernel.crashing.org>
 *                IBM Corp.
 *
 *  Derived from previous arch/powerpc/mm/mmu_context.c
 *  and arch/powerpc/include/asm/mmu_context.h
 *
 *  This program is free software; you can redistribute it and/or
 *  modify it under the terms of the GNU General Public License
 *  as published by the Free Software Foundation; either version
 *  2 of the License, or (at your option) any later version.
 *
17 18 19 20 21 22 23 24 25
 * TODO:
 *
 *   - The global context lock will not scale very well
 *   - The maps should be dynamically allocated to allow for processors
 *     that support more PID bits at runtime
 *   - Implement flush_tlb_mm() by making the context stale and picking
 *     a new one
 *   - More aggressively clear stale map bits and maybe find some way to
 *     also clear mm->cpu_vm_mask bits when processes are migrated
26 27
 */

28 29
//#define DEBUG_MAP_CONSISTENCY
//#define DEBUG_CLAMP_LAST_CONTEXT   31
30 31 32 33 34 35 36 37 38 39 40 41
//#define DEBUG_HARDER

/* We don't use DEBUG because it tends to be compiled in always nowadays
 * and this would generate way too much output
 */
#ifdef DEBUG_HARDER
#define pr_hard(args...)	printk(KERN_DEBUG args)
#define pr_hardcont(args...)	printk(KERN_CONT args)
#else
#define pr_hard(args...)	do { } while(0)
#define pr_hardcont(args...)	do { } while(0)
#endif
42 43

#include <linux/kernel.h>
44 45
#include <linux/mm.h>
#include <linux/init.h>
46 47 48 49
#include <linux/spinlock.h>
#include <linux/bootmem.h>
#include <linux/notifier.h>
#include <linux/cpu.h>
50 51 52 53

#include <asm/mmu_context.h>
#include <asm/tlbflush.h>

54
static unsigned int first_context, last_context;
55
static unsigned int next_context, nr_free_contexts;
56 57 58
static unsigned long *context_map;
static unsigned long *stale_map[NR_CPUS];
static struct mm_struct **context_mm;
59
static DEFINE_SPINLOCK(context_lock);
60

61 62 63 64
#define CTX_MAP_SIZE	\
	(sizeof(unsigned long) * (last_context / BITS_PER_LONG + 1))


65
/* Steal a context from a task that has one at the moment.
66 67 68 69
 *
 * This is used when we are running out of available PID numbers
 * on the processors.
 *
70 71 72 73
 * This isn't an LRU system, it just frees up each context in
 * turn (sort-of pseudo-random replacement :).  This would be the
 * place to implement an LRU scheme if anyone was motivated to do it.
 *  -- paulus
74 75 76 77 78
 *
 * For context stealing, we use a slightly different approach for
 * SMP and UP. Basically, the UP one is simpler and doesn't use
 * the stale map as we can just flush the local CPU
 *  -- benh
79
 */
80 81
#ifdef CONFIG_SMP
static unsigned int steal_context_smp(unsigned int id)
82 83
{
	struct mm_struct *mm;
84
	unsigned int cpu, max, i;
85

86
	max = last_context - first_context;
87

88 89 90 91
	/* Attempt to free next_context first and then loop until we manage */
	while (max--) {
		/* Pick up the victim mm */
		mm = context_mm[id];
92

93 94 95 96 97
		/* We have a candidate victim, check if it's active, on SMP
		 * we cannot steal active contexts
		 */
		if (mm->context.active) {
			id++;
98 99
			if (id > last_context)
				id = first_context;
100 101
			continue;
		}
102
		pr_hardcont(" | steal %d from 0x%p", id, mm);
103 104 105 106

		/* Mark this mm has having no context anymore */
		mm->context.id = MMU_NO_CONTEXT;

107 108 109 110 111 112 113 114 115 116 117
		/* Mark it stale on all CPUs that used this mm. For threaded
		 * implementations, we set it on all threads on each core
		 * represented in the mask. A future implementation will use
		 * a core map instead but this will do for now.
		 */
		for_each_cpu(cpu, mm_cpumask(mm)) {
			for (i = cpu_first_thread_in_core(cpu);
			     i <= cpu_last_thread_in_core(cpu); i++)
				__set_bit(id, stale_map[i]);
			cpu = i - 1;
		}
118 119 120 121 122 123 124 125 126
		return id;
	}

	/* This will happen if you have more CPUs than available contexts,
	 * all we can do here is wait a bit and try again
	 */
	spin_unlock(&context_lock);
	cpu_relax();
	spin_lock(&context_lock);
127 128 129

	/* This will cause the caller to try again */
	return MMU_NO_CONTEXT;
130 131 132 133 134 135 136
}
#endif  /* CONFIG_SMP */

/* Note that this will also be called on SMP if all other CPUs are
 * offlined, which means that it may be called for cpu != 0. For
 * this to work, we somewhat assume that CPUs that are onlined
 * come up with a fully clean TLB (or are cleaned when offlined)
137
 */
138
static unsigned int steal_context_up(unsigned int id)
139
{
140 141
	struct mm_struct *mm;
	int cpu = smp_processor_id();
142

143 144 145
	/* Pick up the victim mm */
	mm = context_mm[id];

146
	pr_hardcont(" | steal %d from 0x%p", id, mm);
147

148 149 150
	/* Flush the TLB for that context */
	local_flush_tlb_mm(mm);

151 152 153
	/* Mark this mm has having no context anymore */
	mm->context.id = MMU_NO_CONTEXT;

154 155 156 157 158 159 160 161 162 163 164 165
	/* XXX This clear should ultimately be part of local_flush_tlb_mm */
	__clear_bit(id, stale_map[cpu]);

	return id;
}

#ifdef DEBUG_MAP_CONSISTENCY
static void context_check_map(void)
{
	unsigned int id, nrf, nact;

	nrf = nact = 0;
166
	for (id = first_context; id <= last_context; id++) {
167 168 169 170 171 172 173 174
		int used = test_bit(id, context_map);
		if (!used)
			nrf++;
		if (used != (context_mm[id] != NULL))
			pr_err("MMU: Context %d is %s and MM is %p !\n",
			       id, used ? "used" : "free", context_mm[id]);
		if (context_mm[id] != NULL)
			nact += context_mm[id]->context.active;
175
	}
176 177 178 179 180 181 182 183
	if (nrf != nr_free_contexts) {
		pr_err("MMU: Free context count out of sync ! (%d vs %d)\n",
		       nr_free_contexts, nrf);
		nr_free_contexts = nrf;
	}
	if (nact > num_online_cpus())
		pr_err("MMU: More active contexts than CPUs ! (%d vs %d)\n",
		       nact, num_online_cpus());
184 185
	if (first_context > 0 && !test_bit(0, context_map))
		pr_err("MMU: Context 0 has been freed !!!\n");
186
}
187 188 189
#else
static void context_check_map(void) { }
#endif
190 191 192

void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
{
193
	unsigned int i, id, cpu = smp_processor_id();
194
	unsigned long *map;
195

196 197 198
	/* No lockless fast path .. yet */
	spin_lock(&context_lock);

199 200
	pr_hard("[%d] activating context for mm @%p, active=%d, id=%d",
		cpu, next, next->context.active, next->context.id);
201 202 203 204 205

#ifdef CONFIG_SMP
	/* Mark us active and the previous one not anymore */
	next->context.active++;
	if (prev) {
206
		pr_hardcont(" (old=0x%p a=%d)", prev, prev->context.active);
207 208 209
		WARN_ON(prev->context.active < 1);
		prev->context.active--;
	}
210 211

 again:
212 213 214 215
#endif /* CONFIG_SMP */

	/* If we already have a valid assigned context, skip all that */
	id = next->context.id;
216 217 218 219 220 221
	if (likely(id != MMU_NO_CONTEXT)) {
#ifdef DEBUG_MAP_CONSISTENCY
		if (context_mm[id] != next)
			pr_err("MMU: mm 0x%p has id %d but context_mm[%d] says 0x%p\n",
			       next, id, id, context_mm[id]);
#endif
222
		goto ctxt_ok;
223
	}
224 225 226

	/* We really don't have a context, let's try to acquire one */
	id = next_context;
227 228
	if (id > last_context)
		id = first_context;
229 230 231 232 233 234 235
	map = context_map;

	/* No more free contexts, let's try to steal one */
	if (nr_free_contexts == 0) {
#ifdef CONFIG_SMP
		if (num_online_cpus() > 1) {
			id = steal_context_smp(id);
236 237
			if (id == MMU_NO_CONTEXT)
				goto again;
238
			goto stolen;
239 240 241 242 243 244 245 246 247
		}
#endif /* CONFIG_SMP */
		id = steal_context_up(id);
		goto stolen;
	}
	nr_free_contexts--;

	/* We know there's at least one free context, try to find it */
	while (__test_and_set_bit(id, map)) {
248 249 250
		id = find_next_zero_bit(map, last_context+1, id);
		if (id > last_context)
			id = first_context;
251 252 253 254 255
	}
 stolen:
	next_context = id + 1;
	context_mm[id] = next;
	next->context.id = id;
256
	pr_hardcont(" | new id=%d,nrf=%d", id, nr_free_contexts);
257 258 259 260 261 262 263 264

	context_check_map();
 ctxt_ok:

	/* If that context got marked stale on this CPU, then flush the
	 * local TLB for it and unmark it before we use it
	 */
	if (test_bit(id, stale_map[cpu])) {
265 266 267 268
		pr_hardcont(" | stale flush %d [%d..%d]",
			    id, cpu_first_thread_in_core(cpu),
			    cpu_last_thread_in_core(cpu));

269 270 271
		local_flush_tlb_mm(next);

		/* XXX This clear should ultimately be part of local_flush_tlb_mm */
272 273 274 275
		for (i = cpu_first_thread_in_core(cpu);
		     i <= cpu_last_thread_in_core(cpu); i++) {
			__clear_bit(id, stale_map[i]);
		}
276 277 278
	}

	/* Flick the MMU and release lock */
279
	pr_hardcont(" -> %d\n", id);
280 281
	set_context(id, next->pgd);
	spin_unlock(&context_lock);
282 283 284 285 286 287 288
}

/*
 * Set up the context for a new address space.
 */
int init_new_context(struct task_struct *t, struct mm_struct *mm)
{
289 290
	pr_hard("initing context for mm @%p\n", mm);

291 292 293
	mm->context.id = MMU_NO_CONTEXT;
	mm->context.active = 0;

294 295 296 297 298 299 300 301
	return 0;
}

/*
 * We're finished using the context for an address space.
 */
void destroy_context(struct mm_struct *mm)
{
302
	unsigned long flags;
303 304 305 306 307 308 309
	unsigned int id;

	if (mm->context.id == MMU_NO_CONTEXT)
		return;

	WARN_ON(mm->context.active != 0);

310
	spin_lock_irqsave(&context_lock, flags);
311 312 313 314 315 316 317
	id = mm->context.id;
	if (id != MMU_NO_CONTEXT) {
		__clear_bit(id, context_map);
		mm->context.id = MMU_NO_CONTEXT;
#ifdef DEBUG_MAP_CONSISTENCY
		mm->context.active = 0;
#endif
318
		context_mm[id] = NULL;
319
		nr_free_contexts++;
320
	}
321
	spin_unlock_irqrestore(&context_lock, flags);
322 323
}

324 325 326 327 328 329
#ifdef CONFIG_SMP

static int __cpuinit mmu_context_cpu_notify(struct notifier_block *self,
					    unsigned long action, void *hcpu)
{
	unsigned int cpu = (unsigned int)(long)hcpu;
330 331 332
#ifdef CONFIG_HOTPLUG_CPU
	struct task_struct *p;
#endif
333 334 335 336 337 338 339 340 341
	/* We don't touch CPU 0 map, it's allocated at aboot and kept
	 * around forever
	 */
	if (cpu == 0)
		return NOTIFY_OK;

	switch (action) {
	case CPU_ONLINE:
	case CPU_ONLINE_FROZEN:
342
		pr_devel("MMU: Allocating stale context map for CPU %d\n", cpu);
343 344 345 346 347
		stale_map[cpu] = kzalloc(CTX_MAP_SIZE, GFP_KERNEL);
		break;
#ifdef CONFIG_HOTPLUG_CPU
	case CPU_DEAD:
	case CPU_DEAD_FROZEN:
348
		pr_devel("MMU: Freeing stale context map for CPU %d\n", cpu);
349 350
		kfree(stale_map[cpu]);
		stale_map[cpu] = NULL;
351 352 353 354 355

		/* We also clear the cpu_vm_mask bits of CPUs going away */
		read_lock(&tasklist_lock);
		for_each_process(p) {
			if (p->mm)
356
				cpumask_clear_cpu(cpu, mm_cpumask(p->mm));
357 358 359 360
		}
		read_unlock(&tasklist_lock);
	break;
#endif /* CONFIG_HOTPLUG_CPU */
361 362 363 364 365 366 367 368 369
	}
	return NOTIFY_OK;
}

static struct notifier_block __cpuinitdata mmu_context_cpu_nb = {
	.notifier_call	= mmu_context_cpu_notify,
};

#endif /* CONFIG_SMP */
370 371 372 373 374 375

/*
 * Initialize the context management stuff.
 */
void __init mmu_context_init(void)
{
376 377 378 379 380 381
	/* Mark init_mm as being active on all possible CPUs since
	 * we'll get called with prev == init_mm the first time
	 * we schedule on a given CPU
	 */
	init_mm.context.active = NR_CPUS;

382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420
	/*
	 *   The MPC8xx has only 16 contexts.  We rotate through them on each
	 * task switch.  A better way would be to keep track of tasks that
	 * own contexts, and implement an LRU usage.  That way very active
	 * tasks don't always have to pay the TLB reload overhead.  The
	 * kernel pages are mapped shared, so the kernel can run on behalf
	 * of any task that makes a kernel entry.  Shared does not mean they
	 * are not protected, just that the ASID comparison is not performed.
	 *      -- Dan
	 *
	 * The IBM4xx has 256 contexts, so we can just rotate through these
	 * as a way of "switching" contexts.  If the TID of the TLB is zero,
	 * the PID/TID comparison is disabled, so we can use a TID of zero
	 * to represent all kernel pages as shared among all contexts.
	 * 	-- Dan
	 */
	if (mmu_has_feature(MMU_FTR_TYPE_8xx)) {
		first_context = 0;
		last_context = 15;
	} else {
		first_context = 1;
		last_context = 255;
	}

#ifdef DEBUG_CLAMP_LAST_CONTEXT
	last_context = DEBUG_CLAMP_LAST_CONTEXT;
#endif
	/*
	 * Allocate the maps used by context management
	 */
	context_map = alloc_bootmem(CTX_MAP_SIZE);
	context_mm = alloc_bootmem(sizeof(void *) * (last_context + 1));
	stale_map[0] = alloc_bootmem(CTX_MAP_SIZE);

#ifdef CONFIG_SMP
	register_cpu_notifier(&mmu_context_cpu_nb);
#endif

	printk(KERN_INFO
421
	       "MMU: Allocated %zu bytes of context maps for %d contexts\n",
422 423 424
	       2 * CTX_MAP_SIZE + (sizeof(void *) * (last_context + 1)),
	       last_context - first_context + 1);

425 426 427 428
	/*
	 * Some processors have too few contexts to reserve one for
	 * init_mm, and require using context 0 for a normal task.
	 * Other processors reserve the use of context zero for the kernel.
429
	 * This code assumes first_context < 32.
430
	 */
431 432 433
	context_map[0] = (1 << first_context) - 1;
	next_context = first_context;
	nr_free_contexts = last_context - first_context + 1;
434 435
}