mmu.c 28.4 KB
Newer Older
1 2 3 4 5 6 7 8 9
/*
 *  linux/arch/arm/mm/mmu.c
 *
 *  Copyright (C) 1995-2005 Russell King
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
10
#include <linux/module.h>
11 12 13 14 15
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/mman.h>
#include <linux/nodemask.h>
R
Russell King 已提交
16
#include <linux/memblock.h>
17
#include <linux/sort.h>
18
#include <linux/fs.h>
19

20
#include <asm/cputype.h>
R
Russell King 已提交
21
#include <asm/sections.h>
22
#include <asm/cachetype.h>
23 24
#include <asm/setup.h>
#include <asm/sizes.h>
25
#include <asm/smp_plat.h>
26
#include <asm/tlb.h>
N
Nicolas Pitre 已提交
27
#include <asm/highmem.h>
28 29 30 31 32 33 34 35 36 37 38 39 40

#include <asm/mach/arch.h>
#include <asm/mach/map.h>

#include "mm.h"

DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);

/*
 * empty_zero_page is a special page that is used for
 * zero-initialized data and COW.
 */
struct page *empty_zero_page;
41
EXPORT_SYMBOL(empty_zero_page);
42 43 44 45 46 47

/*
 * The pmd table for the upper-most set of pages.
 */
pmd_t *top_pmd;

48 49 50 51 52 53 54 55
#define CPOLICY_UNCACHED	0
#define CPOLICY_BUFFERED	1
#define CPOLICY_WRITETHROUGH	2
#define CPOLICY_WRITEBACK	3
#define CPOLICY_WRITEALLOC	4

static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK;
static unsigned int ecc_mask __initdata = 0;
56
pgprot_t pgprot_user;
57 58
pgprot_t pgprot_kernel;

59
EXPORT_SYMBOL(pgprot_user);
60 61 62 63 64 65 66 67 68 69 70 71 72 73
EXPORT_SYMBOL(pgprot_kernel);

struct cachepolicy {
	const char	policy[16];
	unsigned int	cr_mask;
	unsigned int	pmd;
	unsigned int	pte;
};

static struct cachepolicy cache_policies[] __initdata = {
	{
		.policy		= "uncached",
		.cr_mask	= CR_W|CR_C,
		.pmd		= PMD_SECT_UNCACHED,
74
		.pte		= L_PTE_MT_UNCACHED,
75 76 77 78
	}, {
		.policy		= "buffered",
		.cr_mask	= CR_C,
		.pmd		= PMD_SECT_BUFFERED,
79
		.pte		= L_PTE_MT_BUFFERABLE,
80 81 82 83
	}, {
		.policy		= "writethrough",
		.cr_mask	= 0,
		.pmd		= PMD_SECT_WT,
84
		.pte		= L_PTE_MT_WRITETHROUGH,
85 86 87 88
	}, {
		.policy		= "writeback",
		.cr_mask	= 0,
		.pmd		= PMD_SECT_WB,
89
		.pte		= L_PTE_MT_WRITEBACK,
90 91 92 93
	}, {
		.policy		= "writealloc",
		.cr_mask	= 0,
		.pmd		= PMD_SECT_WBWA,
94
		.pte		= L_PTE_MT_WRITEALLOC,
95 96 97 98
	}
};

/*
S
Simon Arlott 已提交
99
 * These are useful for identifying cache coherency
100 101 102 103
 * problems by allowing the cache or the cache and
 * writebuffer to be turned off.  (Note: the write
 * buffer should not be on and the cache off).
 */
104
static int __init early_cachepolicy(char *p)
105 106 107 108 109 110
{
	int i;

	for (i = 0; i < ARRAY_SIZE(cache_policies); i++) {
		int len = strlen(cache_policies[i].policy);

111
		if (memcmp(p, cache_policies[i].policy, len) == 0) {
112 113 114 115 116 117 118 119
			cachepolicy = i;
			cr_alignment &= ~cache_policies[i].cr_mask;
			cr_no_alignment &= ~cache_policies[i].cr_mask;
			break;
		}
	}
	if (i == ARRAY_SIZE(cache_policies))
		printk(KERN_ERR "ERROR: unknown or unsupported cache policy\n");
120 121 122 123 124 125 126
	/*
	 * This restriction is partly to do with the way we boot; it is
	 * unpredictable to have memory mapped using two different sets of
	 * memory attributes (shared, type, and cache attribs).  We can not
	 * change these attributes once the initial assembly has setup the
	 * page tables.
	 */
127 128 129 130
	if (cpu_architecture() >= CPU_ARCH_ARMv6) {
		printk(KERN_WARNING "Only cachepolicy=writeback supported on ARMv6 and later\n");
		cachepolicy = CPOLICY_WRITEBACK;
	}
131 132
	flush_cache_all();
	set_cr(cr_alignment);
133
	return 0;
134
}
135
early_param("cachepolicy", early_cachepolicy);
136

137
static int __init early_nocache(char *__unused)
138 139 140
{
	char *p = "buffered";
	printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p);
141 142
	early_cachepolicy(p);
	return 0;
143
}
144
early_param("nocache", early_nocache);
145

146
static int __init early_nowrite(char *__unused)
147 148 149
{
	char *p = "uncached";
	printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p);
150 151
	early_cachepolicy(p);
	return 0;
152
}
153
early_param("nowb", early_nowrite);
154

155
static int __init early_ecc(char *p)
156
{
157
	if (memcmp(p, "on", 2) == 0)
158
		ecc_mask = PMD_PROTECTION;
159
	else if (memcmp(p, "off", 3) == 0)
160
		ecc_mask = 0;
161
	return 0;
162
}
163
early_param("ecc", early_ecc);
164 165 166 167 168 169 170 171 172 173

static int __init noalign_setup(char *__unused)
{
	cr_alignment &= ~CR_A;
	cr_no_alignment &= ~CR_A;
	set_cr(cr_alignment);
	return 1;
}
__setup("noalign", noalign_setup);

174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193
#ifndef CONFIG_SMP
void adjust_cr(unsigned long mask, unsigned long set)
{
	unsigned long flags;

	mask &= ~CR_A;

	set &= mask;

	local_irq_save(flags);

	cr_no_alignment = (cr_no_alignment & ~mask) | set;
	cr_alignment = (cr_alignment & ~mask) | set;

	set_cr((get_cr() & ~mask) | set);

	local_irq_restore(flags);
}
#endif

194
#define PROT_PTE_DEVICE		L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_WRITE
195
#define PROT_SECT_DEVICE	PMD_TYPE_SECT|PMD_SECT_AP_WRITE
196

197
static struct mem_type mem_types[] = {
198
	[MT_DEVICE] = {		  /* Strongly ordered / ARMv6 shared device */
199 200
		.prot_pte	= PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
				  L_PTE_SHARED,
201
		.prot_l1	= PMD_TYPE_TABLE,
202
		.prot_sect	= PROT_SECT_DEVICE | PMD_SECT_S,
203 204 205
		.domain		= DOMAIN_IO,
	},
	[MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */
206
		.prot_pte	= PROT_PTE_DEVICE | L_PTE_MT_DEV_NONSHARED,
207
		.prot_l1	= PMD_TYPE_TABLE,
208
		.prot_sect	= PROT_SECT_DEVICE,
209 210 211
		.domain		= DOMAIN_IO,
	},
	[MT_DEVICE_CACHED] = {	  /* ioremap_cached */
212
		.prot_pte	= PROT_PTE_DEVICE | L_PTE_MT_DEV_CACHED,
213 214 215 216
		.prot_l1	= PMD_TYPE_TABLE,
		.prot_sect	= PROT_SECT_DEVICE | PMD_SECT_WB,
		.domain		= DOMAIN_IO,
	},	
217
	[MT_DEVICE_WC] = {	/* ioremap_wc */
218
		.prot_pte	= PROT_PTE_DEVICE | L_PTE_MT_DEV_WC,
219
		.prot_l1	= PMD_TYPE_TABLE,
220
		.prot_sect	= PROT_SECT_DEVICE,
221
		.domain		= DOMAIN_IO,
222
	},
223 224 225 226 227 228
	[MT_UNCACHED] = {
		.prot_pte	= PROT_PTE_DEVICE,
		.prot_l1	= PMD_TYPE_TABLE,
		.prot_sect	= PMD_TYPE_SECT | PMD_SECT_XN,
		.domain		= DOMAIN_IO,
	},
229
	[MT_CACHECLEAN] = {
230
		.prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
231 232 233
		.domain    = DOMAIN_KERNEL,
	},
	[MT_MINICLEAN] = {
234
		.prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
235 236 237 238 239 240 241 242 243 244 245 246 247 248 249
		.domain    = DOMAIN_KERNEL,
	},
	[MT_LOW_VECTORS] = {
		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
				L_PTE_EXEC,
		.prot_l1   = PMD_TYPE_TABLE,
		.domain    = DOMAIN_USER,
	},
	[MT_HIGH_VECTORS] = {
		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
				L_PTE_USER | L_PTE_EXEC,
		.prot_l1   = PMD_TYPE_TABLE,
		.domain    = DOMAIN_USER,
	},
	[MT_MEMORY] = {
250
		.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
251 252 253
		.domain    = DOMAIN_KERNEL,
	},
	[MT_ROM] = {
254
		.prot_sect = PMD_TYPE_SECT,
255 256
		.domain    = DOMAIN_KERNEL,
	},
257 258 259 260
	[MT_MEMORY_NONCACHED] = {
		.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
		.domain    = DOMAIN_KERNEL,
	},
261 262 263 264 265 266 267 268 269 270 271 272 273
	[MT_MEMORY_DTCM] = {
		.prot_pte	= L_PTE_PRESENT | L_PTE_YOUNG |
		                  L_PTE_DIRTY | L_PTE_WRITE,
		.prot_l1	= PMD_TYPE_TABLE,
		.prot_sect	= PMD_TYPE_SECT | PMD_SECT_XN,
		.domain		= DOMAIN_KERNEL,
	},
	[MT_MEMORY_ITCM] = {
		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
				L_PTE_USER | L_PTE_EXEC,
		.prot_l1   = PMD_TYPE_TABLE,
		.domain    = DOMAIN_IO,
	},
274 275
};

276 277 278 279
const struct mem_type *get_mem_type(unsigned int type)
{
	return type < ARRAY_SIZE(mem_types) ? &mem_types[type] : NULL;
}
280
EXPORT_SYMBOL(get_mem_type);
281

282 283 284 285 286 287 288
/*
 * Adjust the PMD section entries according to the CPU in use.
 */
static void __init build_mem_type_table(void)
{
	struct cachepolicy *cp;
	unsigned int cr = get_cr();
289
	unsigned int user_pgprot, kern_pgprot, vecs_pgprot;
290 291 292
	int cpu_arch = cpu_architecture();
	int i;

293
	if (cpu_arch < CPU_ARCH_ARMv6) {
294
#if defined(CONFIG_CPU_DCACHE_DISABLE)
295 296
		if (cachepolicy > CPOLICY_BUFFERED)
			cachepolicy = CPOLICY_BUFFERED;
297
#elif defined(CONFIG_CPU_DCACHE_WRITETHROUGH)
298 299
		if (cachepolicy > CPOLICY_WRITETHROUGH)
			cachepolicy = CPOLICY_WRITETHROUGH;
300
#endif
301
	}
302 303 304 305 306
	if (cpu_arch < CPU_ARCH_ARMv5) {
		if (cachepolicy >= CPOLICY_WRITEALLOC)
			cachepolicy = CPOLICY_WRITEBACK;
		ecc_mask = 0;
	}
307 308 309
#ifdef CONFIG_SMP
	cachepolicy = CPOLICY_WRITEALLOC;
#endif
310

311
	/*
312 313 314
	 * Strip out features not present on earlier architectures.
	 * Pre-ARMv5 CPUs don't have TEX bits.  Pre-ARMv6 CPUs or those
	 * without extended page tables don't have the 'Shared' bit.
315
	 */
316 317 318 319 320 321
	if (cpu_arch < CPU_ARCH_ARMv5)
		for (i = 0; i < ARRAY_SIZE(mem_types); i++)
			mem_types[i].prot_sect &= ~PMD_SECT_TEX(7);
	if ((cpu_arch < CPU_ARCH_ARMv6 || !(cr & CR_XP)) && !cpu_is_xsc3())
		for (i = 0; i < ARRAY_SIZE(mem_types); i++)
			mem_types[i].prot_sect &= ~PMD_SECT_S;
322 323

	/*
324 325 326
	 * ARMv5 and lower, bit 4 must be set for page tables (was: cache
	 * "update-able on write" bit on ARM610).  However, Xscale and
	 * Xscale3 require this bit to be cleared.
327
	 */
328
	if (cpu_is_xscale() || cpu_is_xsc3()) {
329
		for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
330
			mem_types[i].prot_sect &= ~PMD_BIT4;
331 332 333 334
			mem_types[i].prot_l1 &= ~PMD_BIT4;
		}
	} else if (cpu_arch < CPU_ARCH_ARMv6) {
		for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
335 336
			if (mem_types[i].prot_l1)
				mem_types[i].prot_l1 |= PMD_BIT4;
337 338 339 340
			if (mem_types[i].prot_sect)
				mem_types[i].prot_sect |= PMD_BIT4;
		}
	}
341

342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399
	/*
	 * Mark the device areas according to the CPU/architecture.
	 */
	if (cpu_is_xsc3() || (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP))) {
		if (!cpu_is_xsc3()) {
			/*
			 * Mark device regions on ARMv6+ as execute-never
			 * to prevent speculative instruction fetches.
			 */
			mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN;
			mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN;
			mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN;
			mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN;
		}
		if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
			/*
			 * For ARMv7 with TEX remapping,
			 * - shared device is SXCB=1100
			 * - nonshared device is SXCB=0100
			 * - write combine device mem is SXCB=0001
			 * (Uncached Normal memory)
			 */
			mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1);
			mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(1);
			mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
		} else if (cpu_is_xsc3()) {
			/*
			 * For Xscale3,
			 * - shared device is TEXCB=00101
			 * - nonshared device is TEXCB=01000
			 * - write combine device mem is TEXCB=00100
			 * (Inner/Outer Uncacheable in xsc3 parlance)
			 */
			mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1) | PMD_SECT_BUFFERED;
			mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
			mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
		} else {
			/*
			 * For ARMv6 and ARMv7 without TEX remapping,
			 * - shared device is TEXCB=00001
			 * - nonshared device is TEXCB=01000
			 * - write combine device mem is TEXCB=00100
			 * (Uncached Normal in ARMv6 parlance).
			 */
			mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED;
			mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
			mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
		}
	} else {
		/*
		 * On others, write combining is "Uncached/Buffered"
		 */
		mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
	}

	/*
	 * Now deal with the memory-type mappings
	 */
400
	cp = &cache_policies[cachepolicy];
401 402 403 404 405 406 407 408 409
	vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;

#ifndef CONFIG_SMP
	/*
	 * Only use write-through for non-SMP systems
	 */
	if (cpu_arch >= CPU_ARCH_ARMv5 && cachepolicy > CPOLICY_WRITETHROUGH)
		vecs_pgprot = cache_policies[CPOLICY_WRITETHROUGH].pte;
#endif
410 411 412 413 414

	/*
	 * Enable CPU-specific coherency if supported.
	 * (Only available on XSC3 at the moment.)
	 */
415 416
	if (arch_is_coherent() && cpu_is_xsc3())
		mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435

	/*
	 * ARMv6 and above have extended page tables.
	 */
	if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
		/*
		 * Mark cache clean areas and XIP ROM read only
		 * from SVC mode and no access from userspace.
		 */
		mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
		mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
		mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;

#ifdef CONFIG_SMP
		/*
		 * Mark memory with the "shared" attribute for SMP systems
		 */
		user_pgprot |= L_PTE_SHARED;
		kern_pgprot |= L_PTE_SHARED;
436
		vecs_pgprot |= L_PTE_SHARED;
437 438 439 440
		mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S;
		mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
		mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
		mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
441
		mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
442
		mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
443 444 445
#endif
	}

446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463
	/*
	 * Non-cacheable Normal - intended for memory areas that must
	 * not cause dirty cache line writebacks when used
	 */
	if (cpu_arch >= CPU_ARCH_ARMv6) {
		if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
			/* Non-cacheable Normal is XCB = 001 */
			mem_types[MT_MEMORY_NONCACHED].prot_sect |=
				PMD_SECT_BUFFERED;
		} else {
			/* For both ARMv6 and non-TEX-remapping ARMv7 */
			mem_types[MT_MEMORY_NONCACHED].prot_sect |=
				PMD_SECT_TEX(1);
		}
	} else {
		mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
	}

464 465
	for (i = 0; i < 16; i++) {
		unsigned long v = pgprot_val(protection_map[i]);
466
		protection_map[i] = __pgprot(v | user_pgprot);
467 468
	}

469 470
	mem_types[MT_LOW_VECTORS].prot_pte |= vecs_pgprot;
	mem_types[MT_HIGH_VECTORS].prot_pte |= vecs_pgprot;
471

472
	pgprot_user   = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot);
473
	pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
R
Russell King 已提交
474
				 L_PTE_DIRTY | L_PTE_WRITE | kern_pgprot);
475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491

	mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
	mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
	mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
	mem_types[MT_ROM].prot_sect |= cp->pmd;

	switch (cp->pmd) {
	case PMD_SECT_WT:
		mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
		break;
	case PMD_SECT_WB:
	case PMD_SECT_WBWA:
		mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
		break;
	}
	printk("Memory policy: ECC %sabled, Data cache %s\n",
		ecc_mask ? "en" : "dis", cp->policy);
492 493 494 495 496 497 498 499

	for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
		struct mem_type *t = &mem_types[i];
		if (t->prot_l1)
			t->prot_l1 |= PMD_DOMAIN(t->domain);
		if (t->prot_sect)
			t->prot_sect |= PMD_DOMAIN(t->domain);
	}
500 501
}

502 503 504 505 506 507 508 509 510 511 512 513 514
#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
			      unsigned long size, pgprot_t vma_prot)
{
	if (!pfn_valid(pfn))
		return pgprot_noncached(vma_prot);
	else if (file->f_flags & O_SYNC)
		return pgprot_writecombine(vma_prot);
	return vma_prot;
}
EXPORT_SYMBOL(phys_mem_access_prot);
#endif

515 516
#define vectors_base()	(vectors_high() ? 0xffff0000 : 0)

R
Russell King 已提交
517 518
static void __init *early_alloc(unsigned long sz)
{
R
Russell King 已提交
519 520 521
	void *ptr = __va(memblock_alloc(sz, sz));
	memset(ptr, 0, sz);
	return ptr;
R
Russell King 已提交
522 523
}

R
Russell King 已提交
524
static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned long prot)
525
{
526
	if (pmd_none(*pmd)) {
R
Russell King 已提交
527 528
		pte_t *pte = early_alloc(2 * PTRS_PER_PTE * sizeof(pte_t));
		__pmd_populate(pmd, __pa(pte) | prot);
529
	}
R
Russell King 已提交
530 531 532
	BUG_ON(pmd_bad(*pmd));
	return pte_offset_kernel(pmd, addr);
}
533

R
Russell King 已提交
534 535 536 537 538
static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
				  unsigned long end, unsigned long pfn,
				  const struct mem_type *type)
{
	pte_t *pte = early_pte_alloc(pmd, addr, type->prot_l1);
539
	do {
540
		set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), 0);
541 542
		pfn++;
	} while (pte++, addr += PAGE_SIZE, addr != end);
543 544
}

545 546 547
static void __init alloc_init_section(pgd_t *pgd, unsigned long addr,
				      unsigned long end, unsigned long phys,
				      const struct mem_type *type)
548
{
549
	pmd_t *pmd = pmd_offset(pgd, addr);
550

551 552 553 554 555 556 557 558
	/*
	 * Try a section mapping - end, addr and phys must all be aligned
	 * to a section boundary.  Note that PMDs refer to the individual
	 * L1 entries, whereas PGDs refer to a group of L1 entries making
	 * up one logical pointer to an L2 table.
	 */
	if (((addr | end | phys) & ~SECTION_MASK) == 0) {
		pmd_t *p = pmd;
559

560 561 562 563 564 565 566
		if (addr & SECTION_SIZE)
			pmd++;

		do {
			*pmd = __pmd(phys | type->prot_sect);
			phys += SECTION_SIZE;
		} while (pmd++, addr += SECTION_SIZE, addr != end);
567

568 569 570 571 572 573 574 575
		flush_pmd_entry(p);
	} else {
		/*
		 * No need to loop; pte's aren't interested in the
		 * individual L1 entries.
		 */
		alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type);
	}
576 577
}

578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635
static void __init create_36bit_mapping(struct map_desc *md,
					const struct mem_type *type)
{
	unsigned long phys, addr, length, end;
	pgd_t *pgd;

	addr = md->virtual;
	phys = (unsigned long)__pfn_to_phys(md->pfn);
	length = PAGE_ALIGN(md->length);

	if (!(cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3())) {
		printk(KERN_ERR "MM: CPU does not support supersection "
		       "mapping for 0x%08llx at 0x%08lx\n",
		       __pfn_to_phys((u64)md->pfn), addr);
		return;
	}

	/* N.B.	ARMv6 supersections are only defined to work with domain 0.
	 *	Since domain assignments can in fact be arbitrary, the
	 *	'domain == 0' check below is required to insure that ARMv6
	 *	supersections are only allocated for domain 0 regardless
	 *	of the actual domain assignments in use.
	 */
	if (type->domain) {
		printk(KERN_ERR "MM: invalid domain in supersection "
		       "mapping for 0x%08llx at 0x%08lx\n",
		       __pfn_to_phys((u64)md->pfn), addr);
		return;
	}

	if ((addr | length | __pfn_to_phys(md->pfn)) & ~SUPERSECTION_MASK) {
		printk(KERN_ERR "MM: cannot create mapping for "
		       "0x%08llx at 0x%08lx invalid alignment\n",
		       __pfn_to_phys((u64)md->pfn), addr);
		return;
	}

	/*
	 * Shift bits [35:32] of address into bits [23:20] of PMD
	 * (See ARMv6 spec).
	 */
	phys |= (((md->pfn >> (32 - PAGE_SHIFT)) & 0xF) << 20);

	pgd = pgd_offset_k(addr);
	end = addr + length;
	do {
		pmd_t *pmd = pmd_offset(pgd, addr);
		int i;

		for (i = 0; i < 16; i++)
			*pmd++ = __pmd(phys | type->prot_sect | PMD_SECT_SUPER);

		addr += SUPERSECTION_SIZE;
		phys += SUPERSECTION_SIZE;
		pgd += SUPERSECTION_SIZE >> PGDIR_SHIFT;
	} while (addr != end);
}

636 637 638 639 640 641 642
/*
 * Create the page directory entries and any necessary
 * page tables for the mapping specified by `md'.  We
 * are able to cope here with varying sizes and address
 * offsets, and we take full advantage of sections and
 * supersections.
 */
643
static void __init create_mapping(struct map_desc *md)
644
{
645
	unsigned long phys, addr, length, end;
646
	const struct mem_type *type;
647
	pgd_t *pgd;
648 649 650 651 652 653 654 655 656 657 658 659 660 661 662

	if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) {
		printk(KERN_WARNING "BUG: not creating mapping for "
		       "0x%08llx at 0x%08lx in user region\n",
		       __pfn_to_phys((u64)md->pfn), md->virtual);
		return;
	}

	if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
	    md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) {
		printk(KERN_WARNING "BUG: mapping for 0x%08llx at 0x%08lx "
		       "overlaps vmalloc space\n",
		       __pfn_to_phys((u64)md->pfn), md->virtual);
	}

663
	type = &mem_types[md->type];
664 665 666 667

	/*
	 * Catch 36-bit addresses
	 */
668 669 670
	if (md->pfn >= 0x100000) {
		create_36bit_mapping(md, type);
		return;
671 672
	}

673
	addr = md->virtual & PAGE_MASK;
674
	phys = (unsigned long)__pfn_to_phys(md->pfn);
675
	length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
676

677
	if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) {
678 679
		printk(KERN_WARNING "BUG: map for 0x%08lx at 0x%08lx can not "
		       "be mapped using pages, ignoring.\n",
680
		       __pfn_to_phys(md->pfn), addr);
681 682 683
		return;
	}

684 685 686 687
	pgd = pgd_offset_k(addr);
	end = addr + length;
	do {
		unsigned long next = pgd_addr_end(addr, end);
688

689
		alloc_init_section(pgd, addr, next, phys, type);
690

691 692 693
		phys += next - addr;
		addr = next;
	} while (pgd++, addr != end);
694 695 696 697 698 699 700 701 702 703 704 705 706
}

/*
 * Create the architecture specific mappings
 */
void __init iotable_init(struct map_desc *io_desc, int nr)
{
	int i;

	for (i = 0; i < nr; i++)
		create_mapping(io_desc + i);
}

R
Russell King 已提交
707
static void * __initdata vmalloc_min = (void *)(VMALLOC_END - SZ_128M);
708 709 710 711 712 713

/*
 * vmalloc=size forces the vmalloc area to be exactly 'size'
 * bytes. This can be used to increase (or decrease) the vmalloc
 * area - the default is 128m.
 */
714
static int __init early_vmalloc(char *arg)
715
{
R
Russell King 已提交
716
	unsigned long vmalloc_reserve = memparse(arg, NULL);
717 718 719 720 721 722 723

	if (vmalloc_reserve < SZ_16M) {
		vmalloc_reserve = SZ_16M;
		printk(KERN_WARNING
			"vmalloc area too small, limiting to %luMB\n",
			vmalloc_reserve >> 20);
	}
724 725 726 727 728 729 730

	if (vmalloc_reserve > VMALLOC_END - (PAGE_OFFSET + SZ_32M)) {
		vmalloc_reserve = VMALLOC_END - (PAGE_OFFSET + SZ_32M);
		printk(KERN_WARNING
			"vmalloc area is too big, limiting to %luMB\n",
			vmalloc_reserve >> 20);
	}
R
Russell King 已提交
731 732

	vmalloc_min = (void *)(VMALLOC_END - vmalloc_reserve);
733
	return 0;
734
}
735
early_param("vmalloc", early_vmalloc);
736

R
Russell King 已提交
737 738
phys_addr_t lowmem_end_addr;

739
static void __init sanity_check_meminfo(void)
740
{
R
Russell King 已提交
741
	int i, j, highmem = 0;
742

R
Russell King 已提交
743 744
	lowmem_end_addr = __pa(vmalloc_min - 1) + 1;

745
	for (i = 0, j = 0; i < meminfo.nr_banks; i++) {
746 747
		struct membank *bank = &meminfo.bank[j];
		*bank = meminfo.bank[i];
748

749
#ifdef CONFIG_HIGHMEM
R
Russell King 已提交
750
		if (__va(bank->start) > vmalloc_min ||
R
Russell King 已提交
751 752 753 754 755
		    __va(bank->start) < (void *)PAGE_OFFSET)
			highmem = 1;

		bank->highmem = highmem;

756 757 758 759
		/*
		 * Split those memory banks which are partially overlapping
		 * the vmalloc area greatly simplifying things later.
		 */
R
Russell King 已提交
760 761
		if (__va(bank->start) < vmalloc_min &&
		    bank->size > vmalloc_min - __va(bank->start)) {
762 763 764 765 766 767 768 769
			if (meminfo.nr_banks >= NR_BANKS) {
				printk(KERN_CRIT "NR_BANKS too low, "
						 "ignoring high memory\n");
			} else {
				memmove(bank + 1, bank,
					(meminfo.nr_banks - i) * sizeof(*bank));
				meminfo.nr_banks++;
				i++;
R
Russell King 已提交
770 771
				bank[1].size -= vmalloc_min - __va(bank->start);
				bank[1].start = __pa(vmalloc_min - 1) + 1;
R
Russell King 已提交
772
				bank[1].highmem = highmem = 1;
773 774
				j++;
			}
R
Russell King 已提交
775
			bank->size = vmalloc_min - __va(bank->start);
776 777
		}
#else
778 779
		bank->highmem = highmem;

780 781 782 783
		/*
		 * Check whether this memory bank would entirely overlap
		 * the vmalloc area.
		 */
R
Russell King 已提交
784
		if (__va(bank->start) >= vmalloc_min ||
785
		    __va(bank->start) < (void *)PAGE_OFFSET) {
786 787 788 789 790
			printk(KERN_NOTICE "Ignoring RAM at %.8lx-%.8lx "
			       "(vmalloc region overlap).\n",
			       bank->start, bank->start + bank->size - 1);
			continue;
		}
791

792 793 794 795
		/*
		 * Check whether this memory bank would partially overlap
		 * the vmalloc area.
		 */
R
Russell King 已提交
796
		if (__va(bank->start + bank->size) > vmalloc_min ||
797
		    __va(bank->start + bank->size) < __va(bank->start)) {
R
Russell King 已提交
798
			unsigned long newsize = vmalloc_min - __va(bank->start);
799 800 801 802 803 804 805 806
			printk(KERN_NOTICE "Truncating RAM at %.8lx-%.8lx "
			       "to -%.8lx (vmalloc region overlap).\n",
			       bank->start, bank->start + bank->size - 1,
			       bank->start + newsize - 1);
			bank->size = newsize;
		}
#endif
		j++;
807
	}
808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839
#ifdef CONFIG_HIGHMEM
	if (highmem) {
		const char *reason = NULL;

		if (cache_is_vipt_aliasing()) {
			/*
			 * Interactions between kmap and other mappings
			 * make highmem support with aliasing VIPT caches
			 * rather difficult.
			 */
			reason = "with VIPT aliasing cache";
#ifdef CONFIG_SMP
		} else if (tlb_ops_need_broadcast()) {
			/*
			 * kmap_high needs to occasionally flush TLB entries,
			 * however, if the TLB entries need to be broadcast
			 * we may deadlock:
			 *  kmap_high(irqs off)->flush_all_zero_pkmaps->
			 *  flush_tlb_kernel_range->smp_call_function_many
			 *   (must not be called with irqs off)
			 */
			reason = "without hardware TLB ops broadcasting";
#endif
		}
		if (reason) {
			printk(KERN_CRIT "HIGHMEM is not supported %s, ignoring high memory\n",
				reason);
			while (j > 0 && meminfo.bank[j - 1].highmem)
				j--;
		}
	}
#endif
840
	meminfo.nr_banks = j;
841 842
}

843
static inline void prepare_page_table(void)
844 845 846 847 848 849
{
	unsigned long addr;

	/*
	 * Clear out all the mappings below the kernel image.
	 */
850
	for (addr = 0; addr < MODULES_VADDR; addr += PGDIR_SIZE)
851 852 853 854
		pmd_clear(pmd_off_k(addr));

#ifdef CONFIG_XIP_KERNEL
	/* The XIP kernel is mapped in the module area -- skip over it */
R
Russell King 已提交
855
	addr = ((unsigned long)_etext + PGDIR_SIZE - 1) & PGDIR_MASK;
856 857 858 859 860 861 862 863
#endif
	for ( ; addr < PAGE_OFFSET; addr += PGDIR_SIZE)
		pmd_clear(pmd_off_k(addr));

	/*
	 * Clear out all the kernel space mappings, except for the first
	 * memory bank, up to the end of the vmalloc region.
	 */
864
	for (addr = __phys_to_virt(bank_phys_end(&meminfo.bank[0]));
865 866 867 868 869
	     addr < VMALLOC_END; addr += PGDIR_SIZE)
		pmd_clear(pmd_off_k(addr));
}

/*
R
Russell King 已提交
870
 * Reserve the special regions of memory
871
 */
R
Russell King 已提交
872
void __init arm_mm_memblock_reserve(void)
873 874 875 876 877
{
	/*
	 * Reserve the page tables.  These are already in use,
	 * and can only be in node 0.
	 */
R
Russell King 已提交
878
	memblock_reserve(__pa(swapper_pg_dir), PTRS_PER_PGD * sizeof(pgd_t));
879 880 881 882 883 884

#ifdef CONFIG_SA1111
	/*
	 * Because of the SA1111 DMA bug, we want to preserve our
	 * precious DMA-able memory...
	 */
R
Russell King 已提交
885
	memblock_reserve(PHYS_OFFSET, __pa(swapper_pg_dir) - PHYS_OFFSET);
886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904
#endif
}

/*
 * Set up device the mappings.  Since we clear out the page tables for all
 * mappings above VMALLOC_END, we will remove any debug device mappings.
 * This means you have to be careful how you debug this function, or any
 * called function.  This means you can't use any function or debugging
 * method which may touch any device, otherwise the kernel _will_ crash.
 */
static void __init devicemaps_init(struct machine_desc *mdesc)
{
	struct map_desc map;
	unsigned long addr;
	void *vectors;

	/*
	 * Allocate the vector page early.
	 */
R
Russell King 已提交
905
	vectors = early_alloc(PAGE_SIZE);
906 907 908 909 910 911 912 913 914 915

	for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE)
		pmd_clear(pmd_off_k(addr));

	/*
	 * Map the kernel if it is XIP.
	 * It is always first in the modulearea.
	 */
#ifdef CONFIG_XIP_KERNEL
	map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
916
	map.virtual = MODULES_VADDR;
R
Russell King 已提交
917
	map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972
	map.type = MT_ROM;
	create_mapping(&map);
#endif

	/*
	 * Map the cache flushing regions.
	 */
#ifdef FLUSH_BASE
	map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
	map.virtual = FLUSH_BASE;
	map.length = SZ_1M;
	map.type = MT_CACHECLEAN;
	create_mapping(&map);
#endif
#ifdef FLUSH_BASE_MINICACHE
	map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
	map.virtual = FLUSH_BASE_MINICACHE;
	map.length = SZ_1M;
	map.type = MT_MINICLEAN;
	create_mapping(&map);
#endif

	/*
	 * Create a mapping for the machine vectors at the high-vectors
	 * location (0xffff0000).  If we aren't using high-vectors, also
	 * create a mapping at the low-vectors virtual address.
	 */
	map.pfn = __phys_to_pfn(virt_to_phys(vectors));
	map.virtual = 0xffff0000;
	map.length = PAGE_SIZE;
	map.type = MT_HIGH_VECTORS;
	create_mapping(&map);

	if (!vectors_high()) {
		map.virtual = 0;
		map.type = MT_LOW_VECTORS;
		create_mapping(&map);
	}

	/*
	 * Ask the machine support to map in the statically mapped devices.
	 */
	if (mdesc->map_io)
		mdesc->map_io();

	/*
	 * Finally flush the caches and tlb to ensure that we're in a
	 * consistent state wrt the writebuffer.  This also ensures that
	 * any write-allocated cache lines in the vector page are written
	 * back.  After this point, we can start to touch devices again.
	 */
	local_flush_tlb_all();
	flush_cache_all();
}

N
Nicolas Pitre 已提交
973 974 975
static void __init kmap_init(void)
{
#ifdef CONFIG_HIGHMEM
R
Russell King 已提交
976 977
	pkmap_page_table = early_pte_alloc(pmd_off_k(PKMAP_BASE),
		PKMAP_BASE, _PAGE_KERNEL_TABLE);
N
Nicolas Pitre 已提交
978 979 980
#endif
}

981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006
static inline void map_memory_bank(struct membank *bank)
{
	struct map_desc map;

	map.pfn = bank_pfn_start(bank);
	map.virtual = __phys_to_virt(bank_phys_start(bank));
	map.length = bank_phys_size(bank);
	map.type = MT_MEMORY;

	create_mapping(&map);
}

static void __init map_lowmem(void)
{
	struct meminfo *mi = &meminfo;
	int i;

	/* Map all the lowmem memory banks. */
	for (i = 0; i < mi->nr_banks; i++) {
		struct membank *bank = &mi->bank[i];

		if (!bank->highmem)
			map_memory_bank(bank);
	}
}

1007 1008 1009 1010 1011 1012 1013
static int __init meminfo_cmp(const void *_a, const void *_b)
{
	const struct membank *a = _a, *b = _b;
	long cmp = bank_pfn_start(a) - bank_pfn_start(b);
	return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
}

1014 1015 1016 1017
/*
 * paging_init() sets up the page tables, initialises the zone memory
 * maps, and sets up the zero page, bad page and bad page tables.
 */
1018
void __init paging_init(struct machine_desc *mdesc)
1019 1020 1021
{
	void *zero_page;

1022 1023
	sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);

1024
	build_mem_type_table();
1025 1026
	sanity_check_meminfo();
	prepare_page_table();
1027
	map_lowmem();
1028
	devicemaps_init(mdesc);
N
Nicolas Pitre 已提交
1029
	kmap_init();
1030 1031 1032

	top_pmd = pmd_off_k(0xffff0000);

R
Russell King 已提交
1033 1034
	/* allocate the zero page. */
	zero_page = early_alloc(PAGE_SIZE);
R
Russell King 已提交
1035

1036
	bootmem_init();
R
Russell King 已提交
1037

1038
	empty_zero_page = virt_to_page(zero_page);
1039
	__flush_dcache_page(NULL, empty_zero_page);
1040
}
1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052

/*
 * In order to soft-boot, we need to insert a 1:1 mapping in place of
 * the user-mode pages.  This will then ensure that we have predictable
 * results when turning the mmu off
 */
void setup_mm_for_reboot(char mode)
{
	unsigned long base_pmdval;
	pgd_t *pgd;
	int i;

1053 1054 1055 1056 1057 1058
	/*
	 * We need to access to user-mode page tables here. For kernel threads
	 * we don't have any user-mode mappings so we use the context that we
	 * "borrowed".
	 */
	pgd = current->active_mm->pgd;
1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072

	base_pmdval = PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT;
	if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale())
		base_pmdval |= PMD_BIT4;

	for (i = 0; i < FIRST_USER_PGD_NR + USER_PTRS_PER_PGD; i++, pgd++) {
		unsigned long pmdval = (i << PGDIR_SHIFT) | base_pmdval;
		pmd_t *pmd;

		pmd = pmd_off(pgd, i << PGDIR_SHIFT);
		pmd[0] = __pmd(pmdval);
		pmd[1] = __pmd(pmdval + (1 << (PGDIR_SHIFT - 1)));
		flush_pmd_entry(pmd);
	}
1073 1074

	local_flush_tlb_all();
1075
}