init.c 19.4 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3
/*
 *  linux/arch/arm/mm/init.c
 *
R
Russell King 已提交
4
 *  Copyright (C) 1995-2005 Russell King
L
Linus Torvalds 已提交
5 6 7 8 9 10 11 12 13 14 15
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/swap.h>
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/mman.h>
16
#include <linux/export.h>
L
Linus Torvalds 已提交
17 18
#include <linux/nodemask.h>
#include <linux/initrd.h>
G
Grant Likely 已提交
19
#include <linux/of_fdt.h>
20
#include <linux/highmem.h>
21
#include <linux/gfp.h>
R
Russell King 已提交
22
#include <linux/memblock.h>
23
#include <linux/dma-contiguous.h>
24
#include <linux/sizes.h>
L
Linus Torvalds 已提交
25

26
#include <asm/cp15.h>
L
Linus Torvalds 已提交
27
#include <asm/mach-types.h>
28
#include <asm/memblock.h>
29
#include <asm/prom.h>
R
Russell King 已提交
30
#include <asm/sections.h>
L
Linus Torvalds 已提交
31
#include <asm/setup.h>
32
#include <asm/system_info.h>
L
Linus Torvalds 已提交
33
#include <asm/tlb.h>
34
#include <asm/fixmap.h>
L
Linus Torvalds 已提交
35 36 37 38

#include <asm/mach/arch.h>
#include <asm/mach/map.h>

39 40
#include "mm.h"

41 42 43 44 45 46 47 48
#ifdef CONFIG_CPU_CP15_MMU
unsigned long __init __clear_cr(unsigned long mask)
{
	cr_alignment = cr_alignment & ~mask;
	return cr_alignment;
}
#endif

49
static phys_addr_t phys_initrd_start __initdata = 0;
50 51
static unsigned long phys_initrd_size __initdata = 0;

52
static int __init early_initrd(char *p)
53
{
54 55
	phys_addr_t start;
	unsigned long size;
56
	char *endp;
57

58 59 60
	start = memparse(p, &endp);
	if (*endp == ',') {
		size = memparse(endp + 1, NULL);
61 62 63 64

		phys_initrd_start = start;
		phys_initrd_size = size;
	}
65
	return 0;
66
}
67
early_param("initrd", early_initrd);
68 69 70

static int __init parse_tag_initrd(const struct tag *tag)
{
R
Russell King 已提交
71
	pr_warn("ATAG_INITRD is deprecated; "
72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87
		"please update your bootloader.\n");
	phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
	phys_initrd_size = tag->u.initrd.size;
	return 0;
}

__tagtable(ATAG_INITRD, parse_tag_initrd);

static int __init parse_tag_initrd2(const struct tag *tag)
{
	phys_initrd_start = tag->u.initrd.start;
	phys_initrd_size = tag->u.initrd.size;
	return 0;
}

__tagtable(ATAG_INITRD2, parse_tag_initrd2);
L
Linus Torvalds 已提交
88 89

/*
90 91 92
 * This keeps memory configuration data used by a couple memory
 * initialization functions, as well as show_mem() for the skipping
 * of holes in the memory map.  It is populated by arm_add_memory().
L
Linus Torvalds 已提交
93
 */
94
void show_mem(unsigned int filter)
L
Linus Torvalds 已提交
95 96
{
	int free = 0, total = 0, reserved = 0;
L
Laura Abbott 已提交
97 98
	int shared = 0, cached = 0, slab = 0;
	struct memblock_region *reg;
L
Linus Torvalds 已提交
99 100

	printk("Mem-info:\n");
101
	show_free_areas(filter);
R
Russell King 已提交
102

L
Laura Abbott 已提交
103
	for_each_memblock (memory, reg) {
R
Russell King 已提交
104 105 106
		unsigned int pfn1, pfn2;
		struct page *page, *end;

L
Laura Abbott 已提交
107 108
		pfn1 = memblock_region_memory_base_pfn(reg);
		pfn2 = memblock_region_memory_end_pfn(reg);
R
Russell King 已提交
109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124

		page = pfn_to_page(pfn1);
		end  = pfn_to_page(pfn2 - 1) + 1;

		do {
			total++;
			if (PageReserved(page))
				reserved++;
			else if (PageSwapCache(page))
				cached++;
			else if (PageSlab(page))
				slab++;
			else if (!page_count(page))
				free++;
			else
				shared += page_count(page) - 1;
L
Laura Abbott 已提交
125 126 127
			pfn1++;
			page = pfn_to_page(pfn1);
		} while (pfn1 < pfn2);
L
Linus Torvalds 已提交
128 129 130 131 132 133 134 135 136 137
	}

	printk("%d pages of RAM\n", total);
	printk("%d free pages\n", free);
	printk("%d reserved pages\n", reserved);
	printk("%d slab pages\n", slab);
	printk("%d pages shared\n", shared);
	printk("%d pages swap cached\n", cached);
}

138
static void __init find_limits(unsigned long *min, unsigned long *max_low,
139
			       unsigned long *max_high)
R
Russell King 已提交
140
{
L
Laura Abbott 已提交
141 142 143
	*max_low = PFN_DOWN(memblock_get_current_limit());
	*min = PFN_UP(memblock_start_of_DRAM());
	*max_high = PFN_DOWN(memblock_end_of_DRAM());
R
Russell King 已提交
144 145
}

146
#ifdef CONFIG_ZONE_DMA
147

148
phys_addr_t arm_dma_zone_size __read_mostly;
149 150
EXPORT_SYMBOL(arm_dma_zone_size);

151 152 153 154 155 156
/*
 * The DMA mask corresponding to the maximum bus address allocatable
 * using GFP_DMA.  The default here places no restriction on DMA
 * allocations.  This must be the smallest DMA mask in the system,
 * so a successful GFP_DMA allocation will always satisfy this.
 */
157
phys_addr_t arm_dma_limit;
158
unsigned long arm_dma_pfn_limit;
159

160 161 162 163 164 165 166 167 168 169 170 171 172
static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
	unsigned long dma_size)
{
	if (size[0] <= dma_size)
		return;

	size[ZONE_NORMAL] = size[0] - dma_size;
	size[ZONE_DMA] = dma_size;
	hole[ZONE_NORMAL] = hole[0];
	hole[ZONE_DMA] = 0;
}
#endif

173
void __init setup_dma_zone(const struct machine_desc *mdesc)
174 175 176 177
{
#ifdef CONFIG_ZONE_DMA
	if (mdesc->dma_zone_size) {
		arm_dma_zone_size = mdesc->dma_zone_size;
178
		arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
179 180
	} else
		arm_dma_limit = 0xffffffff;
181
	arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT;
182 183 184
#endif
}

185
static void __init zone_sizes_init(unsigned long min, unsigned long max_low,
186
	unsigned long max_high)
187 188
{
	unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
189
	struct memblock_region *reg;
190

R
Russell King 已提交
191
	/*
R
Russell King 已提交
192
	 * initialise the zones.
R
Russell King 已提交
193 194 195 196
	 */
	memset(zone_size, 0, sizeof(zone_size));

	/*
R
Russell King 已提交
197 198 199
	 * The memory size has already been determined.  If we need
	 * to do anything fancy with the allocation of this memory
	 * to the zones, now is the time to do it.
R
Russell King 已提交
200
	 */
R
Russell King 已提交
201 202 203 204
	zone_size[0] = max_low - min;
#ifdef CONFIG_HIGHMEM
	zone_size[ZONE_HIGHMEM] = max_high - max_low;
#endif
R
Russell King 已提交
205 206

	/*
R
Russell King 已提交
207 208
	 * Calculate the size of the holes.
	 *  holes = node_size - sum(bank_sizes)
R
Russell King 已提交
209
	 */
R
Russell King 已提交
210
	memcpy(zhole_size, zone_size, sizeof(zhole_size));
211 212 213 214 215 216 217 218
	for_each_memblock(memory, reg) {
		unsigned long start = memblock_region_memory_base_pfn(reg);
		unsigned long end = memblock_region_memory_end_pfn(reg);

		if (start < max_low) {
			unsigned long low_end = min(end, max_low);
			zhole_size[0] -= low_end - start;
		}
R
Russell King 已提交
219
#ifdef CONFIG_HIGHMEM
220 221 222 223
		if (end > max_low) {
			unsigned long high_start = max(start, max_low);
			zhole_size[ZONE_HIGHMEM] -= end - high_start;
		}
R
Russell King 已提交
224 225
#endif
	}
R
Russell King 已提交
226

227
#ifdef CONFIG_ZONE_DMA
R
Russell King 已提交
228 229 230 231
	/*
	 * Adjust the sizes according to any special requirements for
	 * this machine type.
	 */
232
	if (arm_dma_zone_size)
233 234
		arm_adjust_dma_zone(zone_size, zhole_size,
			arm_dma_zone_size >> PAGE_SHIFT);
235
#endif
R
Russell King 已提交
236

R
Russell King 已提交
237
	free_area_init_node(0, zone_size, min, zhole_size);
L
Linus Torvalds 已提交
238 239
}

240
#ifdef CONFIG_HAVE_ARCH_PFN_VALID
241 242
int pfn_valid(unsigned long pfn)
{
243
	return memblock_is_memory(__pfn_to_phys(pfn));
244 245
}
EXPORT_SYMBOL(pfn_valid);
246
#endif
247

248
#ifndef CONFIG_SPARSEMEM
249
static void __init arm_memory_present(void)
250 251 252
{
}
#else
253
static void __init arm_memory_present(void)
254
{
255 256
	struct memblock_region *reg;

257
	for_each_memblock(memory, reg)
258 259
		memory_present(0, memblock_region_memory_base_pfn(reg),
			       memblock_region_memory_end_pfn(reg));
260
}
261 262
#endif

263 264
static bool arm_memblock_steal_permitted = true;

265
phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align)
266 267 268 269 270
{
	phys_addr_t phys;

	BUG_ON(!arm_memblock_steal_permitted);

271
	phys = memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ANYWHERE);
272 273 274 275 276 277
	memblock_free(phys, size);
	memblock_remove(phys, size);

	return phys;
}

L
Laura Abbott 已提交
278
void __init arm_memblock_init(const struct machine_desc *mdesc)
R
Russell King 已提交
279 280 281
{
	/* Register the kernel text, kernel data and initrd with memblock. */
#ifdef CONFIG_XIP_KERNEL
282
	memblock_reserve(__pa(_sdata), _end - _sdata);
R
Russell King 已提交
283 284 285 286
#else
	memblock_reserve(__pa(_stext), _end - _stext);
#endif
#ifdef CONFIG_BLK_DEV_INITRD
287
	/* FDT scan will populate initrd_start */
288
	if (initrd_start && !phys_initrd_size) {
289 290 291
		phys_initrd_start = __virt_to_phys(initrd_start);
		phys_initrd_size = initrd_end - initrd_start;
	}
292
	initrd_start = initrd_end = 0;
293 294
	if (phys_initrd_size &&
	    !memblock_is_region_memory(phys_initrd_start, phys_initrd_size)) {
295 296
		pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region - disabling initrd\n",
		       (u64)phys_initrd_start, phys_initrd_size);
297 298
		phys_initrd_start = phys_initrd_size = 0;
	}
299 300
	if (phys_initrd_size &&
	    memblock_is_region_reserved(phys_initrd_start, phys_initrd_size)) {
301 302
		pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region - disabling initrd\n",
		       (u64)phys_initrd_start, phys_initrd_size);
303 304
		phys_initrd_start = phys_initrd_size = 0;
	}
R
Russell King 已提交
305 306 307 308 309 310 311 312 313 314 315
	if (phys_initrd_size) {
		memblock_reserve(phys_initrd_start, phys_initrd_size);

		/* Now convert initrd to virtual addresses */
		initrd_start = __phys_to_virt(phys_initrd_start);
		initrd_end = initrd_start + phys_initrd_size;
	}
#endif

	arm_mm_memblock_reserve();

316 317 318 319
	/* reserve any platform specific memblock areas */
	if (mdesc->reserve)
		mdesc->reserve();

320 321
	early_init_fdt_scan_reserved_mem();

322
	/* reserve memory for DMA contiguous allocations */
323
	dma_contiguous_reserve(arm_dma_limit);
324

325
	arm_memblock_steal_permitted = false;
R
Russell King 已提交
326 327 328
	memblock_dump_all();
}

329
void __init bootmem_init(void)
L
Linus Torvalds 已提交
330
{
R
Russell King 已提交
331
	unsigned long min, max_low, max_high;
L
Linus Torvalds 已提交
332

333
	memblock_allow_resize();
R
Russell King 已提交
334 335
	max_low = max_high = 0;

336
	find_limits(&min, &max_low, &max_high);
R
Russell King 已提交
337

R
Russell King 已提交
338 339 340 341
	/*
	 * Sparsemem tries to allocate bootmem in memory_present(),
	 * so must be done after the fixed reservations
	 */
342
	arm_memory_present();
L
Linus Torvalds 已提交
343

344 345 346 347 348 349
	/*
	 * sparse_init() needs the bootmem allocator up and running.
	 */
	sparse_init();

	/*
R
Russell King 已提交
350
	 * Now free the memory - free_area_init_node needs
351 352 353
	 * the sparse mem_map arrays initialized by sparse_init()
	 * for memmap_init_zone(), otherwise all PFNs are invalid.
	 */
354
	zone_sizes_init(min, max_low, max_high);
355

R
Russell King 已提交
356 357 358 359 360
	/*
	 * This doesn't seem to be used by the Linux memory manager any
	 * more, but is used by ll_rw_block.  If we can get rid of it, we
	 * also get rid of some of the stuff above as well.
	 */
361 362 363
	min_low_pfn = min;
	max_low_pfn = max_low;
	max_pfn = max_high;
R
Russell King 已提交
364
}
L
Linus Torvalds 已提交
365

366 367 368 369 370 371 372
/*
 * Poison init memory with an undefined instruction (ARM) or a branch to an
 * undefined instruction (Thumb).
 */
static inline void poison_init_mem(void *s, size_t count)
{
	u32 *p = (u32 *)s;
373
	for (; count != 0; count -= 4)
374 375 376
		*p++ = 0xe7fddef0;
}

377
static inline void
R
Russell King 已提交
378
free_memmap(unsigned long start_pfn, unsigned long end_pfn)
379 380
{
	struct page *start_pg, *end_pg;
381
	phys_addr_t pg, pgend;
382 383 384 385

	/*
	 * Convert start_pfn/end_pfn to a struct page pointer.
	 */
386
	start_pg = pfn_to_page(start_pfn - 1) + 1;
387
	end_pg = pfn_to_page(end_pfn - 1) + 1;
388 389 390 391 392

	/*
	 * Convert to physical addresses, and
	 * round start upwards and end downwards.
	 */
393 394
	pg = PAGE_ALIGN(__pa(start_pg));
	pgend = __pa(end_pg) & PAGE_MASK;
395 396 397 398 399 400

	/*
	 * If there are free pages between these,
	 * free the section of the memmap array.
	 */
	if (pg < pgend)
401
		memblock_free_early(pg, pgend - pg);
402 403 404 405 406
}

/*
 * The mem_map array can get very big.  Free the unused area of the memory map.
 */
L
Laura Abbott 已提交
407
static void __init free_unused_memmap(void)
408
{
L
Laura Abbott 已提交
409 410
	unsigned long start, prev_end = 0;
	struct memblock_region *reg;
411 412

	/*
413 414
	 * This relies on each bank being in address order.
	 * The banks are sorted previously in bootmem_init().
415
	 */
L
Laura Abbott 已提交
416 417
	for_each_memblock(memory, reg) {
		start = memblock_region_memory_base_pfn(reg);
418

419 420 421 422 423
#ifdef CONFIG_SPARSEMEM
		/*
		 * Take care not to free memmap entries that don't exist
		 * due to SPARSEMEM sections which aren't present.
		 */
L
Laura Abbott 已提交
424 425
		start = min(start,
				 ALIGN(prev_end, PAGES_PER_SECTION));
426 427 428 429 430 431
#else
		/*
		 * Align down here since the VM subsystem insists that the
		 * memmap entries are valid from the bank start aligned to
		 * MAX_ORDER_NR_PAGES.
		 */
L
Laura Abbott 已提交
432
		start = round_down(start, MAX_ORDER_NR_PAGES);
433
#endif
434 435 436 437
		/*
		 * If we had a previous bank, and there is a space
		 * between the current bank and the previous, free it.
		 */
L
Laura Abbott 已提交
438 439
		if (prev_end && prev_end < start)
			free_memmap(prev_end, start);
440

441 442 443 444 445
		/*
		 * Align up here since the VM subsystem insists that the
		 * memmap entries are valid from the bank end aligned to
		 * MAX_ORDER_NR_PAGES.
		 */
L
Laura Abbott 已提交
446 447
		prev_end = ALIGN(memblock_region_memory_end_pfn(reg),
				 MAX_ORDER_NR_PAGES);
448
	}
449 450

#ifdef CONFIG_SPARSEMEM
L
Laura Abbott 已提交
451 452 453
	if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION))
		free_memmap(prev_end,
			    ALIGN(prev_end, PAGES_PER_SECTION));
454
#endif
455 456
}

457 458 459
#ifdef CONFIG_HIGHMEM
static inline void free_area_high(unsigned long pfn, unsigned long end)
{
460 461
	for (; pfn < end; pfn++)
		free_highmem_page(pfn_to_page(pfn));
462 463 464
}
#endif

465 466 467
static void __init free_highpages(void)
{
#ifdef CONFIG_HIGHMEM
468
	unsigned long max_low = max_low_pfn;
469
	struct memblock_region *mem, *res;
470 471

	/* set highmem page free */
472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499
	for_each_memblock(memory, mem) {
		unsigned long start = memblock_region_memory_base_pfn(mem);
		unsigned long end = memblock_region_memory_end_pfn(mem);

		/* Ignore complete lowmem entries */
		if (end <= max_low)
			continue;

		/* Truncate partial highmem entries */
		if (start < max_low)
			start = max_low;

		/* Find and exclude any reserved regions */
		for_each_memblock(reserved, res) {
			unsigned long res_start, res_end;

			res_start = memblock_region_reserved_base_pfn(res);
			res_end = memblock_region_reserved_end_pfn(res);

			if (res_end < start)
				continue;
			if (res_start < start)
				res_start = start;
			if (res_start > end)
				res_start = end;
			if (res_end > end)
				res_end = end;
			if (res_start != start)
500
				free_area_high(start, res_start);
501 502 503 504 505 506 507
			start = res_end;
			if (start == end)
				break;
		}

		/* And now free anything which remains */
		if (start < end)
508
			free_area_high(start, end);
509 510 511 512
	}
#endif
}

L
Linus Torvalds 已提交
513 514 515 516 517 518 519
/*
 * mem_init() marks the free areas in the mem_map and tells us how much
 * memory is free.  This is done after various parts of the system have
 * claimed their memory after the kernel image.
 */
void __init mem_init(void)
{
520 521 522 523 524
#ifdef CONFIG_HAVE_TCM
	/* These pointers are filled in on TCM detection */
	extern u32 dtcm_end;
	extern u32 itcm_end;
#endif
L
Linus Torvalds 已提交
525

526
	set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
L
Linus Torvalds 已提交
527 528

	/* this will put all unused low memory onto the freelists */
L
Laura Abbott 已提交
529
	free_unused_memmap();
530
	free_all_bootmem();
L
Linus Torvalds 已提交
531 532 533

#ifdef CONFIG_SA1111
	/* now that our DMA memory is actually so designated, we can free it */
534
	free_reserved_area(__va(PHYS_OFFSET), swapper_pg_dir, -1, NULL);
L
Linus Torvalds 已提交
535 536
#endif

537
	free_highpages();
538

539
	mem_init_print_info(NULL);
L
Linus Torvalds 已提交
540

541 542 543 544
#define MLK(b, t) b, t, ((t) - (b)) >> 10
#define MLM(b, t) b, t, ((t) - (b)) >> 20
#define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K)

R
Russell King 已提交
545
	pr_notice("Virtual kernel memory layout:\n"
546
			"    vector  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
547 548 549 550
#ifdef CONFIG_HAVE_TCM
			"    DTCM    : 0x%08lx - 0x%08lx   (%4ld kB)\n"
			"    ITCM    : 0x%08lx - 0x%08lx   (%4ld kB)\n"
#endif
551 552 553 554 555 556
			"    fixmap  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
			"    vmalloc : 0x%08lx - 0x%08lx   (%4ld MB)\n"
			"    lowmem  : 0x%08lx - 0x%08lx   (%4ld MB)\n"
#ifdef CONFIG_HIGHMEM
			"    pkmap   : 0x%08lx - 0x%08lx   (%4ld MB)\n"
#endif
557
#ifdef CONFIG_MODULES
558
			"    modules : 0x%08lx - 0x%08lx   (%4ld MB)\n"
559
#endif
R
Russell King 已提交
560 561 562 563
			"      .text : 0x%p" " - 0x%p" "   (%4td kB)\n"
			"      .init : 0x%p" " - 0x%p" "   (%4td kB)\n"
			"      .data : 0x%p" " - 0x%p" "   (%4td kB)\n"
			"       .bss : 0x%p" " - 0x%p" "   (%4td kB)\n",
564 565 566

			MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) +
				(PAGE_SIZE)),
567
#ifdef CONFIG_HAVE_TCM
568 569
			MLK(DTCM_OFFSET, (unsigned long) dtcm_end),
			MLK(ITCM_OFFSET, (unsigned long) itcm_end),
570
#endif
M
Mark Salter 已提交
571
			MLK(FIXADDR_START, FIXADDR_END),
572
			MLM(VMALLOC_START, VMALLOC_END),
573 574 575 576 577
			MLM(PAGE_OFFSET, (unsigned long)high_memory),
#ifdef CONFIG_HIGHMEM
			MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) *
				(PAGE_SIZE)),
#endif
578
#ifdef CONFIG_MODULES
579
			MLM(MODULES_VADDR, MODULES_END),
580
#endif
581 582

			MLK_ROUNDUP(_text, _etext),
583
			MLK_ROUNDUP(__init_begin, __init_end),
584 585
			MLK_ROUNDUP(_sdata, _edata),
			MLK_ROUNDUP(__bss_start, __bss_stop));
586 587 588 589 590

#undef MLK
#undef MLM
#undef MLK_ROUNDUP

591 592 593 594 595 596 597 598 599 600 601 602 603 604
	/*
	 * Check boundaries twice: Some fundamental inconsistencies can
	 * be detected at build time already.
	 */
#ifdef CONFIG_MMU
	BUILD_BUG_ON(TASK_SIZE				> MODULES_VADDR);
	BUG_ON(TASK_SIZE 				> MODULES_VADDR);
#endif

#ifdef CONFIG_HIGHMEM
	BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
	BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE	> PAGE_OFFSET);
#endif

605
	if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) {
L
Linus Torvalds 已提交
606 607 608 609 610 611 612 613 614 615
		extern int sysctl_overcommit_memory;
		/*
		 * On a machine this small we won't get
		 * anywhere without overcommit, so turn
		 * it on by default.
		 */
		sysctl_overcommit_memory = OVERCOMMIT_ALWAYS;
	}
}

616 617 618 619 620 621
#ifdef CONFIG_ARM_KERNMEM_PERMS
struct section_perm {
	unsigned long start;
	unsigned long end;
	pmdval_t mask;
	pmdval_t prot;
622
	pmdval_t clear;
623 624
};

625
static struct section_perm nx_perms[] = {
626 627 628 629 630 631 632 633 634 635 636 637 638 639
	/* Make pages tables, etc before _stext RW (set NX). */
	{
		.start	= PAGE_OFFSET,
		.end	= (unsigned long)_stext,
		.mask	= ~PMD_SECT_XN,
		.prot	= PMD_SECT_XN,
	},
	/* Make init RW (set NX). */
	{
		.start	= (unsigned long)__init_begin,
		.end	= (unsigned long)_sdata,
		.mask	= ~PMD_SECT_XN,
		.prot	= PMD_SECT_XN,
	},
640 641 642 643 644 645 646 647 648
#ifdef CONFIG_DEBUG_RODATA
	/* Make rodata NX (set RO in ro_perms below). */
	{
		.start  = (unsigned long)__start_rodata,
		.end    = (unsigned long)__init_begin,
		.mask   = ~PMD_SECT_XN,
		.prot   = PMD_SECT_XN,
	},
#endif
649 650
};

651 652 653 654 655 656 657
#ifdef CONFIG_DEBUG_RODATA
static struct section_perm ro_perms[] = {
	/* Make kernel code and rodata RX (set RO). */
	{
		.start  = (unsigned long)_stext,
		.end    = (unsigned long)__init_begin,
#ifdef CONFIG_ARM_LPAE
658 659
		.mask   = ~L_PMD_SECT_RDONLY,
		.prot   = L_PMD_SECT_RDONLY,
660 661 662 663 664 665 666 667 668
#else
		.mask   = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE),
		.prot   = PMD_SECT_APX | PMD_SECT_AP_WRITE,
		.clear  = PMD_SECT_AP_WRITE,
#endif
	},
};
#endif

669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731
/*
 * Updates section permissions only for the current mm (sections are
 * copied into each mm). During startup, this is the init_mm. Is only
 * safe to be called with preemption disabled, as under stop_machine().
 */
static inline void section_update(unsigned long addr, pmdval_t mask,
				  pmdval_t prot)
{
	struct mm_struct *mm;
	pmd_t *pmd;

	mm = current->active_mm;
	pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr);

#ifdef CONFIG_ARM_LPAE
	pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
#else
	if (addr & SECTION_SIZE)
		pmd[1] = __pmd((pmd_val(pmd[1]) & mask) | prot);
	else
		pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
#endif
	flush_pmd_entry(pmd);
	local_flush_tlb_kernel_range(addr, addr + SECTION_SIZE);
}

/* Make sure extended page tables are in use. */
static inline bool arch_has_strict_perms(void)
{
	if (cpu_architecture() < CPU_ARCH_ARMv6)
		return false;

	return !!(get_cr() & CR_XP);
}

#define set_section_perms(perms, field)	{				\
	size_t i;							\
	unsigned long addr;						\
									\
	if (!arch_has_strict_perms())					\
		return;							\
									\
	for (i = 0; i < ARRAY_SIZE(perms); i++) {			\
		if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) ||	\
		    !IS_ALIGNED(perms[i].end, SECTION_SIZE)) {		\
			pr_err("BUG: section %lx-%lx not aligned to %lx\n", \
				perms[i].start, perms[i].end,		\
				SECTION_SIZE);				\
			continue;					\
		}							\
									\
		for (addr = perms[i].start;				\
		     addr < perms[i].end;				\
		     addr += SECTION_SIZE)				\
			section_update(addr, perms[i].mask,		\
				       perms[i].field);			\
	}								\
}

static inline void fix_kernmem_perms(void)
{
	set_section_perms(nx_perms, prot);
}
732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749

#ifdef CONFIG_DEBUG_RODATA
void mark_rodata_ro(void)
{
	set_section_perms(ro_perms, prot);
}

void set_kernel_text_rw(void)
{
	set_section_perms(ro_perms, clear);
}

void set_kernel_text_ro(void)
{
	set_section_perms(ro_perms, prot);
}
#endif /* CONFIG_DEBUG_RODATA */

750 751 752 753 754
#else
static inline void fix_kernmem_perms(void) { }
#endif /* CONFIG_ARM_KERNMEM_PERMS */

void free_tcmmem(void)
L
Linus Torvalds 已提交
755
{
756
#ifdef CONFIG_HAVE_TCM
757
	extern char __tcm_start, __tcm_end;
758

759
	poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
760
	free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link");
761
#endif
762 763 764 765 766 767
}

void free_initmem(void)
{
	fix_kernmem_perms();
	free_tcmmem();
768

769
	poison_init_mem(__init_begin, __init_end - __init_begin);
N
Nicolas Pitre 已提交
770
	if (!machine_is_integrator() && !machine_is_cintegrator())
771
		free_initmem_default(-1);
L
Linus Torvalds 已提交
772 773 774 775 776 777 778 779
}

#ifdef CONFIG_BLK_DEV_INITRD

static int keep_initrd;

void free_initrd_mem(unsigned long start, unsigned long end)
{
780
	if (!keep_initrd) {
781 782 783 784 785
		if (start == initrd_start)
			start = round_down(start, PAGE_SIZE);
		if (end == initrd_end)
			end = round_up(end, PAGE_SIZE);

786
		poison_init_mem((void *)start, PAGE_ALIGN(end) - start);
787
		free_reserved_area((void *)start, (void *)end, -1, "initrd");
788
	}
L
Linus Torvalds 已提交
789 790 791 792 793 794 795 796 797 798
}

static int __init keepinitrd_setup(char *__unused)
{
	keep_initrd = 1;
	return 1;
}

__setup("keepinitrd", keepinitrd_setup);
#endif