init.c 18.4 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3
/*
 *  linux/arch/arm/mm/init.c
 *
R
Russell King 已提交
4
 *  Copyright (C) 1995-2005 Russell King
L
Linus Torvalds 已提交
5 6 7 8 9 10 11 12 13 14 15
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/swap.h>
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/mman.h>
16
#include <linux/export.h>
L
Linus Torvalds 已提交
17 18
#include <linux/nodemask.h>
#include <linux/initrd.h>
G
Grant Likely 已提交
19
#include <linux/of_fdt.h>
20
#include <linux/highmem.h>
21
#include <linux/gfp.h>
R
Russell King 已提交
22
#include <linux/memblock.h>
23
#include <linux/dma-contiguous.h>
24
#include <linux/sizes.h>
L
Linus Torvalds 已提交
25

26
#include <asm/cp15.h>
L
Linus Torvalds 已提交
27
#include <asm/mach-types.h>
28
#include <asm/memblock.h>
29
#include <asm/prom.h>
R
Russell King 已提交
30
#include <asm/sections.h>
L
Linus Torvalds 已提交
31
#include <asm/setup.h>
32
#include <asm/system_info.h>
L
Linus Torvalds 已提交
33
#include <asm/tlb.h>
34
#include <asm/fixmap.h>
L
Linus Torvalds 已提交
35 36 37 38

#include <asm/mach/arch.h>
#include <asm/mach/map.h>

39 40
#include "mm.h"

41 42 43 44 45 46 47 48
#ifdef CONFIG_CPU_CP15_MMU
unsigned long __init __clear_cr(unsigned long mask)
{
	cr_alignment = cr_alignment & ~mask;
	return cr_alignment;
}
#endif

49
static phys_addr_t phys_initrd_start __initdata = 0;
50 51
static unsigned long phys_initrd_size __initdata = 0;

52
static int __init early_initrd(char *p)
53
{
54 55
	phys_addr_t start;
	unsigned long size;
56
	char *endp;
57

58 59 60
	start = memparse(p, &endp);
	if (*endp == ',') {
		size = memparse(endp + 1, NULL);
61 62 63 64

		phys_initrd_start = start;
		phys_initrd_size = size;
	}
65
	return 0;
66
}
67
early_param("initrd", early_initrd);
68 69 70

static int __init parse_tag_initrd(const struct tag *tag)
{
R
Russell King 已提交
71
	pr_warn("ATAG_INITRD is deprecated; "
72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87
		"please update your bootloader.\n");
	phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
	phys_initrd_size = tag->u.initrd.size;
	return 0;
}

__tagtable(ATAG_INITRD, parse_tag_initrd);

static int __init parse_tag_initrd2(const struct tag *tag)
{
	phys_initrd_start = tag->u.initrd.start;
	phys_initrd_size = tag->u.initrd.size;
	return 0;
}

__tagtable(ATAG_INITRD2, parse_tag_initrd2);
L
Linus Torvalds 已提交
88

89
static void __init find_limits(unsigned long *min, unsigned long *max_low,
90
			       unsigned long *max_high)
R
Russell King 已提交
91
{
L
Laura Abbott 已提交
92 93 94
	*max_low = PFN_DOWN(memblock_get_current_limit());
	*min = PFN_UP(memblock_start_of_DRAM());
	*max_high = PFN_DOWN(memblock_end_of_DRAM());
R
Russell King 已提交
95 96
}

97
#ifdef CONFIG_ZONE_DMA
98

99
phys_addr_t arm_dma_zone_size __read_mostly;
100 101
EXPORT_SYMBOL(arm_dma_zone_size);

102 103 104 105 106 107
/*
 * The DMA mask corresponding to the maximum bus address allocatable
 * using GFP_DMA.  The default here places no restriction on DMA
 * allocations.  This must be the smallest DMA mask in the system,
 * so a successful GFP_DMA allocation will always satisfy this.
 */
108
phys_addr_t arm_dma_limit;
109
unsigned long arm_dma_pfn_limit;
110

111 112 113 114 115 116 117 118 119 120 121 122 123
static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
	unsigned long dma_size)
{
	if (size[0] <= dma_size)
		return;

	size[ZONE_NORMAL] = size[0] - dma_size;
	size[ZONE_DMA] = dma_size;
	hole[ZONE_NORMAL] = hole[0];
	hole[ZONE_DMA] = 0;
}
#endif

124
void __init setup_dma_zone(const struct machine_desc *mdesc)
125 126 127 128
{
#ifdef CONFIG_ZONE_DMA
	if (mdesc->dma_zone_size) {
		arm_dma_zone_size = mdesc->dma_zone_size;
129
		arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
130 131
	} else
		arm_dma_limit = 0xffffffff;
132
	arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT;
133 134 135
#endif
}

136
static void __init zone_sizes_init(unsigned long min, unsigned long max_low,
137
	unsigned long max_high)
138 139
{
	unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
140
	struct memblock_region *reg;
141

R
Russell King 已提交
142
	/*
R
Russell King 已提交
143
	 * initialise the zones.
R
Russell King 已提交
144 145 146 147
	 */
	memset(zone_size, 0, sizeof(zone_size));

	/*
R
Russell King 已提交
148 149 150
	 * The memory size has already been determined.  If we need
	 * to do anything fancy with the allocation of this memory
	 * to the zones, now is the time to do it.
R
Russell King 已提交
151
	 */
R
Russell King 已提交
152 153 154 155
	zone_size[0] = max_low - min;
#ifdef CONFIG_HIGHMEM
	zone_size[ZONE_HIGHMEM] = max_high - max_low;
#endif
R
Russell King 已提交
156 157

	/*
R
Russell King 已提交
158 159
	 * Calculate the size of the holes.
	 *  holes = node_size - sum(bank_sizes)
R
Russell King 已提交
160
	 */
R
Russell King 已提交
161
	memcpy(zhole_size, zone_size, sizeof(zhole_size));
162 163 164 165 166 167 168 169
	for_each_memblock(memory, reg) {
		unsigned long start = memblock_region_memory_base_pfn(reg);
		unsigned long end = memblock_region_memory_end_pfn(reg);

		if (start < max_low) {
			unsigned long low_end = min(end, max_low);
			zhole_size[0] -= low_end - start;
		}
R
Russell King 已提交
170
#ifdef CONFIG_HIGHMEM
171 172 173 174
		if (end > max_low) {
			unsigned long high_start = max(start, max_low);
			zhole_size[ZONE_HIGHMEM] -= end - high_start;
		}
R
Russell King 已提交
175 176
#endif
	}
R
Russell King 已提交
177

178
#ifdef CONFIG_ZONE_DMA
R
Russell King 已提交
179 180 181 182
	/*
	 * Adjust the sizes according to any special requirements for
	 * this machine type.
	 */
183
	if (arm_dma_zone_size)
184 185
		arm_adjust_dma_zone(zone_size, zhole_size,
			arm_dma_zone_size >> PAGE_SHIFT);
186
#endif
R
Russell King 已提交
187

R
Russell King 已提交
188
	free_area_init_node(0, zone_size, min, zhole_size);
L
Linus Torvalds 已提交
189 190
}

191
#ifdef CONFIG_HAVE_ARCH_PFN_VALID
192 193
int pfn_valid(unsigned long pfn)
{
194
	return memblock_is_memory(__pfn_to_phys(pfn));
195 196
}
EXPORT_SYMBOL(pfn_valid);
197
#endif
198

199
#ifndef CONFIG_SPARSEMEM
200
static void __init arm_memory_present(void)
201 202 203
{
}
#else
204
static void __init arm_memory_present(void)
205
{
206 207
	struct memblock_region *reg;

208
	for_each_memblock(memory, reg)
209 210
		memory_present(0, memblock_region_memory_base_pfn(reg),
			       memblock_region_memory_end_pfn(reg));
211
}
212 213
#endif

214 215
static bool arm_memblock_steal_permitted = true;

216
phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align)
217 218 219 220 221
{
	phys_addr_t phys;

	BUG_ON(!arm_memblock_steal_permitted);

222
	phys = memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ANYWHERE);
223 224 225 226 227 228
	memblock_free(phys, size);
	memblock_remove(phys, size);

	return phys;
}

L
Laura Abbott 已提交
229
void __init arm_memblock_init(const struct machine_desc *mdesc)
R
Russell King 已提交
230 231 232
{
	/* Register the kernel text, kernel data and initrd with memblock. */
#ifdef CONFIG_XIP_KERNEL
233
	memblock_reserve(__pa(_sdata), _end - _sdata);
R
Russell King 已提交
234 235 236 237
#else
	memblock_reserve(__pa(_stext), _end - _stext);
#endif
#ifdef CONFIG_BLK_DEV_INITRD
238
	/* FDT scan will populate initrd_start */
239
	if (initrd_start && !phys_initrd_size) {
240 241 242
		phys_initrd_start = __virt_to_phys(initrd_start);
		phys_initrd_size = initrd_end - initrd_start;
	}
243
	initrd_start = initrd_end = 0;
244 245
	if (phys_initrd_size &&
	    !memblock_is_region_memory(phys_initrd_start, phys_initrd_size)) {
246 247
		pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region - disabling initrd\n",
		       (u64)phys_initrd_start, phys_initrd_size);
248 249
		phys_initrd_start = phys_initrd_size = 0;
	}
250 251
	if (phys_initrd_size &&
	    memblock_is_region_reserved(phys_initrd_start, phys_initrd_size)) {
252 253
		pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region - disabling initrd\n",
		       (u64)phys_initrd_start, phys_initrd_size);
254 255
		phys_initrd_start = phys_initrd_size = 0;
	}
R
Russell King 已提交
256 257 258 259 260 261 262 263 264 265 266
	if (phys_initrd_size) {
		memblock_reserve(phys_initrd_start, phys_initrd_size);

		/* Now convert initrd to virtual addresses */
		initrd_start = __phys_to_virt(phys_initrd_start);
		initrd_end = initrd_start + phys_initrd_size;
	}
#endif

	arm_mm_memblock_reserve();

267 268 269 270
	/* reserve any platform specific memblock areas */
	if (mdesc->reserve)
		mdesc->reserve();

271
	early_init_fdt_reserve_self();
272 273
	early_init_fdt_scan_reserved_mem();

274
	/* reserve memory for DMA contiguous allocations */
275
	dma_contiguous_reserve(arm_dma_limit);
276

277
	arm_memblock_steal_permitted = false;
R
Russell King 已提交
278 279 280
	memblock_dump_all();
}

281
void __init bootmem_init(void)
L
Linus Torvalds 已提交
282
{
R
Russell King 已提交
283
	unsigned long min, max_low, max_high;
L
Linus Torvalds 已提交
284

285
	memblock_allow_resize();
R
Russell King 已提交
286 287
	max_low = max_high = 0;

288
	find_limits(&min, &max_low, &max_high);
R
Russell King 已提交
289

V
Vladimir Murzin 已提交
290 291 292
	early_memtest((phys_addr_t)min << PAGE_SHIFT,
		      (phys_addr_t)max_low << PAGE_SHIFT);

R
Russell King 已提交
293 294 295 296
	/*
	 * Sparsemem tries to allocate bootmem in memory_present(),
	 * so must be done after the fixed reservations
	 */
297
	arm_memory_present();
L
Linus Torvalds 已提交
298

299 300 301 302 303 304
	/*
	 * sparse_init() needs the bootmem allocator up and running.
	 */
	sparse_init();

	/*
R
Russell King 已提交
305
	 * Now free the memory - free_area_init_node needs
306 307 308
	 * the sparse mem_map arrays initialized by sparse_init()
	 * for memmap_init_zone(), otherwise all PFNs are invalid.
	 */
309
	zone_sizes_init(min, max_low, max_high);
310

R
Russell King 已提交
311 312 313 314 315
	/*
	 * This doesn't seem to be used by the Linux memory manager any
	 * more, but is used by ll_rw_block.  If we can get rid of it, we
	 * also get rid of some of the stuff above as well.
	 */
316 317 318
	min_low_pfn = min;
	max_low_pfn = max_low;
	max_pfn = max_high;
R
Russell King 已提交
319
}
L
Linus Torvalds 已提交
320

321 322 323 324 325 326 327
/*
 * Poison init memory with an undefined instruction (ARM) or a branch to an
 * undefined instruction (Thumb).
 */
static inline void poison_init_mem(void *s, size_t count)
{
	u32 *p = (u32 *)s;
328
	for (; count != 0; count -= 4)
329 330 331
		*p++ = 0xe7fddef0;
}

332
static inline void
R
Russell King 已提交
333
free_memmap(unsigned long start_pfn, unsigned long end_pfn)
334 335
{
	struct page *start_pg, *end_pg;
336
	phys_addr_t pg, pgend;
337 338 339 340

	/*
	 * Convert start_pfn/end_pfn to a struct page pointer.
	 */
341
	start_pg = pfn_to_page(start_pfn - 1) + 1;
342
	end_pg = pfn_to_page(end_pfn - 1) + 1;
343 344 345 346 347

	/*
	 * Convert to physical addresses, and
	 * round start upwards and end downwards.
	 */
348 349
	pg = PAGE_ALIGN(__pa(start_pg));
	pgend = __pa(end_pg) & PAGE_MASK;
350 351 352 353 354 355

	/*
	 * If there are free pages between these,
	 * free the section of the memmap array.
	 */
	if (pg < pgend)
356
		memblock_free_early(pg, pgend - pg);
357 358 359 360 361
}

/*
 * The mem_map array can get very big.  Free the unused area of the memory map.
 */
L
Laura Abbott 已提交
362
static void __init free_unused_memmap(void)
363
{
L
Laura Abbott 已提交
364 365
	unsigned long start, prev_end = 0;
	struct memblock_region *reg;
366 367

	/*
368 369
	 * This relies on each bank being in address order.
	 * The banks are sorted previously in bootmem_init().
370
	 */
L
Laura Abbott 已提交
371 372
	for_each_memblock(memory, reg) {
		start = memblock_region_memory_base_pfn(reg);
373

374 375 376 377 378
#ifdef CONFIG_SPARSEMEM
		/*
		 * Take care not to free memmap entries that don't exist
		 * due to SPARSEMEM sections which aren't present.
		 */
L
Laura Abbott 已提交
379 380
		start = min(start,
				 ALIGN(prev_end, PAGES_PER_SECTION));
381 382 383 384 385 386
#else
		/*
		 * Align down here since the VM subsystem insists that the
		 * memmap entries are valid from the bank start aligned to
		 * MAX_ORDER_NR_PAGES.
		 */
L
Laura Abbott 已提交
387
		start = round_down(start, MAX_ORDER_NR_PAGES);
388
#endif
389 390 391 392
		/*
		 * If we had a previous bank, and there is a space
		 * between the current bank and the previous, free it.
		 */
L
Laura Abbott 已提交
393 394
		if (prev_end && prev_end < start)
			free_memmap(prev_end, start);
395

396 397 398 399 400
		/*
		 * Align up here since the VM subsystem insists that the
		 * memmap entries are valid from the bank end aligned to
		 * MAX_ORDER_NR_PAGES.
		 */
L
Laura Abbott 已提交
401 402
		prev_end = ALIGN(memblock_region_memory_end_pfn(reg),
				 MAX_ORDER_NR_PAGES);
403
	}
404 405

#ifdef CONFIG_SPARSEMEM
L
Laura Abbott 已提交
406 407 408
	if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION))
		free_memmap(prev_end,
			    ALIGN(prev_end, PAGES_PER_SECTION));
409
#endif
410 411
}

412 413 414
#ifdef CONFIG_HIGHMEM
static inline void free_area_high(unsigned long pfn, unsigned long end)
{
415 416
	for (; pfn < end; pfn++)
		free_highmem_page(pfn_to_page(pfn));
417 418 419
}
#endif

420 421 422
static void __init free_highpages(void)
{
#ifdef CONFIG_HIGHMEM
423
	unsigned long max_low = max_low_pfn;
424
	struct memblock_region *mem, *res;
425 426

	/* set highmem page free */
427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454
	for_each_memblock(memory, mem) {
		unsigned long start = memblock_region_memory_base_pfn(mem);
		unsigned long end = memblock_region_memory_end_pfn(mem);

		/* Ignore complete lowmem entries */
		if (end <= max_low)
			continue;

		/* Truncate partial highmem entries */
		if (start < max_low)
			start = max_low;

		/* Find and exclude any reserved regions */
		for_each_memblock(reserved, res) {
			unsigned long res_start, res_end;

			res_start = memblock_region_reserved_base_pfn(res);
			res_end = memblock_region_reserved_end_pfn(res);

			if (res_end < start)
				continue;
			if (res_start < start)
				res_start = start;
			if (res_start > end)
				res_start = end;
			if (res_end > end)
				res_end = end;
			if (res_start != start)
455
				free_area_high(start, res_start);
456 457 458 459 460 461 462
			start = res_end;
			if (start == end)
				break;
		}

		/* And now free anything which remains */
		if (start < end)
463
			free_area_high(start, end);
464 465 466 467
	}
#endif
}

L
Linus Torvalds 已提交
468 469 470 471 472 473 474
/*
 * mem_init() marks the free areas in the mem_map and tells us how much
 * memory is free.  This is done after various parts of the system have
 * claimed their memory after the kernel image.
 */
void __init mem_init(void)
{
475 476 477 478 479
#ifdef CONFIG_HAVE_TCM
	/* These pointers are filled in on TCM detection */
	extern u32 dtcm_end;
	extern u32 itcm_end;
#endif
L
Linus Torvalds 已提交
480

481
	set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
L
Linus Torvalds 已提交
482 483

	/* this will put all unused low memory onto the freelists */
L
Laura Abbott 已提交
484
	free_unused_memmap();
485
	free_all_bootmem();
L
Linus Torvalds 已提交
486 487 488

#ifdef CONFIG_SA1111
	/* now that our DMA memory is actually so designated, we can free it */
489
	free_reserved_area(__va(PHYS_OFFSET), swapper_pg_dir, -1, NULL);
L
Linus Torvalds 已提交
490 491
#endif

492
	free_highpages();
493

494
	mem_init_print_info(NULL);
L
Linus Torvalds 已提交
495

496 497 498 499
#define MLK(b, t) b, t, ((t) - (b)) >> 10
#define MLM(b, t) b, t, ((t) - (b)) >> 20
#define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K)

R
Russell King 已提交
500
	pr_notice("Virtual kernel memory layout:\n"
501
			"    vector  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
502 503 504 505
#ifdef CONFIG_HAVE_TCM
			"    DTCM    : 0x%08lx - 0x%08lx   (%4ld kB)\n"
			"    ITCM    : 0x%08lx - 0x%08lx   (%4ld kB)\n"
#endif
506 507 508 509 510 511
			"    fixmap  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
			"    vmalloc : 0x%08lx - 0x%08lx   (%4ld MB)\n"
			"    lowmem  : 0x%08lx - 0x%08lx   (%4ld MB)\n"
#ifdef CONFIG_HIGHMEM
			"    pkmap   : 0x%08lx - 0x%08lx   (%4ld MB)\n"
#endif
512
#ifdef CONFIG_MODULES
513
			"    modules : 0x%08lx - 0x%08lx   (%4ld MB)\n"
514
#endif
R
Russell King 已提交
515 516 517 518
			"      .text : 0x%p" " - 0x%p" "   (%4td kB)\n"
			"      .init : 0x%p" " - 0x%p" "   (%4td kB)\n"
			"      .data : 0x%p" " - 0x%p" "   (%4td kB)\n"
			"       .bss : 0x%p" " - 0x%p" "   (%4td kB)\n",
519 520 521

			MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) +
				(PAGE_SIZE)),
522
#ifdef CONFIG_HAVE_TCM
523 524
			MLK(DTCM_OFFSET, (unsigned long) dtcm_end),
			MLK(ITCM_OFFSET, (unsigned long) itcm_end),
525
#endif
M
Mark Salter 已提交
526
			MLK(FIXADDR_START, FIXADDR_END),
527
			MLM(VMALLOC_START, VMALLOC_END),
528 529 530 531 532
			MLM(PAGE_OFFSET, (unsigned long)high_memory),
#ifdef CONFIG_HIGHMEM
			MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) *
				(PAGE_SIZE)),
#endif
533
#ifdef CONFIG_MODULES
534
			MLM(MODULES_VADDR, MODULES_END),
535
#endif
536 537

			MLK_ROUNDUP(_text, _etext),
538
			MLK_ROUNDUP(__init_begin, __init_end),
539 540
			MLK_ROUNDUP(_sdata, _edata),
			MLK_ROUNDUP(__bss_start, __bss_stop));
541 542 543 544 545

#undef MLK
#undef MLM
#undef MLK_ROUNDUP

546 547 548 549 550 551 552 553 554 555 556 557 558 559
	/*
	 * Check boundaries twice: Some fundamental inconsistencies can
	 * be detected at build time already.
	 */
#ifdef CONFIG_MMU
	BUILD_BUG_ON(TASK_SIZE				> MODULES_VADDR);
	BUG_ON(TASK_SIZE 				> MODULES_VADDR);
#endif

#ifdef CONFIG_HIGHMEM
	BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
	BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE	> PAGE_OFFSET);
#endif

560
	if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) {
L
Linus Torvalds 已提交
561 562 563 564 565 566 567 568 569 570
		extern int sysctl_overcommit_memory;
		/*
		 * On a machine this small we won't get
		 * anywhere without overcommit, so turn
		 * it on by default.
		 */
		sysctl_overcommit_memory = OVERCOMMIT_ALWAYS;
	}
}

571 572 573 574 575 576
#ifdef CONFIG_ARM_KERNMEM_PERMS
struct section_perm {
	unsigned long start;
	unsigned long end;
	pmdval_t mask;
	pmdval_t prot;
577
	pmdval_t clear;
578 579
};

580
static struct section_perm nx_perms[] = {
581 582 583 584 585 586 587 588 589 590 591 592 593 594
	/* Make pages tables, etc before _stext RW (set NX). */
	{
		.start	= PAGE_OFFSET,
		.end	= (unsigned long)_stext,
		.mask	= ~PMD_SECT_XN,
		.prot	= PMD_SECT_XN,
	},
	/* Make init RW (set NX). */
	{
		.start	= (unsigned long)__init_begin,
		.end	= (unsigned long)_sdata,
		.mask	= ~PMD_SECT_XN,
		.prot	= PMD_SECT_XN,
	},
595 596 597 598 599 600 601 602 603
#ifdef CONFIG_DEBUG_RODATA
	/* Make rodata NX (set RO in ro_perms below). */
	{
		.start  = (unsigned long)__start_rodata,
		.end    = (unsigned long)__init_begin,
		.mask   = ~PMD_SECT_XN,
		.prot   = PMD_SECT_XN,
	},
#endif
604 605
};

606 607 608 609 610 611 612
#ifdef CONFIG_DEBUG_RODATA
static struct section_perm ro_perms[] = {
	/* Make kernel code and rodata RX (set RO). */
	{
		.start  = (unsigned long)_stext,
		.end    = (unsigned long)__init_begin,
#ifdef CONFIG_ARM_LPAE
613 614
		.mask   = ~L_PMD_SECT_RDONLY,
		.prot   = L_PMD_SECT_RDONLY,
615 616 617 618 619 620 621 622 623
#else
		.mask   = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE),
		.prot   = PMD_SECT_APX | PMD_SECT_AP_WRITE,
		.clear  = PMD_SECT_AP_WRITE,
#endif
	},
};
#endif

624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686
/*
 * Updates section permissions only for the current mm (sections are
 * copied into each mm). During startup, this is the init_mm. Is only
 * safe to be called with preemption disabled, as under stop_machine().
 */
static inline void section_update(unsigned long addr, pmdval_t mask,
				  pmdval_t prot)
{
	struct mm_struct *mm;
	pmd_t *pmd;

	mm = current->active_mm;
	pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr);

#ifdef CONFIG_ARM_LPAE
	pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
#else
	if (addr & SECTION_SIZE)
		pmd[1] = __pmd((pmd_val(pmd[1]) & mask) | prot);
	else
		pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
#endif
	flush_pmd_entry(pmd);
	local_flush_tlb_kernel_range(addr, addr + SECTION_SIZE);
}

/* Make sure extended page tables are in use. */
static inline bool arch_has_strict_perms(void)
{
	if (cpu_architecture() < CPU_ARCH_ARMv6)
		return false;

	return !!(get_cr() & CR_XP);
}

#define set_section_perms(perms, field)	{				\
	size_t i;							\
	unsigned long addr;						\
									\
	if (!arch_has_strict_perms())					\
		return;							\
									\
	for (i = 0; i < ARRAY_SIZE(perms); i++) {			\
		if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) ||	\
		    !IS_ALIGNED(perms[i].end, SECTION_SIZE)) {		\
			pr_err("BUG: section %lx-%lx not aligned to %lx\n", \
				perms[i].start, perms[i].end,		\
				SECTION_SIZE);				\
			continue;					\
		}							\
									\
		for (addr = perms[i].start;				\
		     addr < perms[i].end;				\
		     addr += SECTION_SIZE)				\
			section_update(addr, perms[i].mask,		\
				       perms[i].field);			\
	}								\
}

static inline void fix_kernmem_perms(void)
{
	set_section_perms(nx_perms, prot);
}
687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704

#ifdef CONFIG_DEBUG_RODATA
void mark_rodata_ro(void)
{
	set_section_perms(ro_perms, prot);
}

void set_kernel_text_rw(void)
{
	set_section_perms(ro_perms, clear);
}

void set_kernel_text_ro(void)
{
	set_section_perms(ro_perms, prot);
}
#endif /* CONFIG_DEBUG_RODATA */

705 706 707 708 709
#else
static inline void fix_kernmem_perms(void) { }
#endif /* CONFIG_ARM_KERNMEM_PERMS */

void free_tcmmem(void)
L
Linus Torvalds 已提交
710
{
711
#ifdef CONFIG_HAVE_TCM
712
	extern char __tcm_start, __tcm_end;
713

714
	poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
715
	free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link");
716
#endif
717 718 719 720 721 722
}

void free_initmem(void)
{
	fix_kernmem_perms();
	free_tcmmem();
723

724
	poison_init_mem(__init_begin, __init_end - __init_begin);
N
Nicolas Pitre 已提交
725
	if (!machine_is_integrator() && !machine_is_cintegrator())
726
		free_initmem_default(-1);
L
Linus Torvalds 已提交
727 728 729 730 731 732 733 734
}

#ifdef CONFIG_BLK_DEV_INITRD

static int keep_initrd;

void free_initrd_mem(unsigned long start, unsigned long end)
{
735
	if (!keep_initrd) {
736 737 738 739 740
		if (start == initrd_start)
			start = round_down(start, PAGE_SIZE);
		if (end == initrd_end)
			end = round_up(end, PAGE_SIZE);

741
		poison_init_mem((void *)start, PAGE_ALIGN(end) - start);
742
		free_reserved_area((void *)start, (void *)end, -1, "initrd");
743
	}
L
Linus Torvalds 已提交
744 745 746 747 748 749 750 751 752 753
}

static int __init keepinitrd_setup(char *__unused)
{
	keep_initrd = 1;
	return 1;
}

__setup("keepinitrd", keepinitrd_setup);
#endif