init.c 18.5 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3
/*
 *  linux/arch/arm/mm/init.c
 *
R
Russell King 已提交
4
 *  Copyright (C) 1995-2005 Russell King
L
Linus Torvalds 已提交
5 6 7 8 9 10 11 12 13 14 15
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/swap.h>
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/mman.h>
16
#include <linux/export.h>
L
Linus Torvalds 已提交
17 18
#include <linux/nodemask.h>
#include <linux/initrd.h>
G
Grant Likely 已提交
19
#include <linux/of_fdt.h>
20
#include <linux/highmem.h>
21
#include <linux/gfp.h>
R
Russell King 已提交
22
#include <linux/memblock.h>
23
#include <linux/dma-contiguous.h>
24
#include <linux/sizes.h>
L
Linus Torvalds 已提交
25

26
#include <asm/cp15.h>
L
Linus Torvalds 已提交
27
#include <asm/mach-types.h>
28
#include <asm/memblock.h>
29
#include <asm/prom.h>
R
Russell King 已提交
30
#include <asm/sections.h>
L
Linus Torvalds 已提交
31
#include <asm/setup.h>
32
#include <asm/system_info.h>
L
Linus Torvalds 已提交
33
#include <asm/tlb.h>
34
#include <asm/fixmap.h>
L
Linus Torvalds 已提交
35 36 37 38

#include <asm/mach/arch.h>
#include <asm/mach/map.h>

39 40
#include "mm.h"

41 42 43 44 45 46 47 48
#ifdef CONFIG_CPU_CP15_MMU
unsigned long __init __clear_cr(unsigned long mask)
{
	cr_alignment = cr_alignment & ~mask;
	return cr_alignment;
}
#endif

49
static phys_addr_t phys_initrd_start __initdata = 0;
50 51
static unsigned long phys_initrd_size __initdata = 0;

52
static int __init early_initrd(char *p)
53
{
54 55
	phys_addr_t start;
	unsigned long size;
56
	char *endp;
57

58 59 60
	start = memparse(p, &endp);
	if (*endp == ',') {
		size = memparse(endp + 1, NULL);
61 62 63 64

		phys_initrd_start = start;
		phys_initrd_size = size;
	}
65
	return 0;
66
}
67
early_param("initrd", early_initrd);
68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87

static int __init parse_tag_initrd(const struct tag *tag)
{
	printk(KERN_WARNING "ATAG_INITRD is deprecated; "
		"please update your bootloader.\n");
	phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
	phys_initrd_size = tag->u.initrd.size;
	return 0;
}

__tagtable(ATAG_INITRD, parse_tag_initrd);

static int __init parse_tag_initrd2(const struct tag *tag)
{
	phys_initrd_start = tag->u.initrd.start;
	phys_initrd_size = tag->u.initrd.size;
	return 0;
}

__tagtable(ATAG_INITRD2, parse_tag_initrd2);
L
Linus Torvalds 已提交
88 89

/*
90 91 92
 * This keeps memory configuration data used by a couple memory
 * initialization functions, as well as show_mem() for the skipping
 * of holes in the memory map.  It is populated by arm_add_memory().
L
Linus Torvalds 已提交
93
 */
94
void show_mem(unsigned int filter)
L
Linus Torvalds 已提交
95 96
{
	int free = 0, total = 0, reserved = 0;
L
Laura Abbott 已提交
97 98
	int shared = 0, cached = 0, slab = 0;
	struct memblock_region *reg;
L
Linus Torvalds 已提交
99 100

	printk("Mem-info:\n");
101
	show_free_areas(filter);
R
Russell King 已提交
102

L
Laura Abbott 已提交
103
	for_each_memblock (memory, reg) {
R
Russell King 已提交
104 105 106
		unsigned int pfn1, pfn2;
		struct page *page, *end;

L
Laura Abbott 已提交
107 108
		pfn1 = memblock_region_memory_base_pfn(reg);
		pfn2 = memblock_region_memory_end_pfn(reg);
R
Russell King 已提交
109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124

		page = pfn_to_page(pfn1);
		end  = pfn_to_page(pfn2 - 1) + 1;

		do {
			total++;
			if (PageReserved(page))
				reserved++;
			else if (PageSwapCache(page))
				cached++;
			else if (PageSlab(page))
				slab++;
			else if (!page_count(page))
				free++;
			else
				shared += page_count(page) - 1;
L
Laura Abbott 已提交
125 126 127
			pfn1++;
			page = pfn_to_page(pfn1);
		} while (pfn1 < pfn2);
L
Linus Torvalds 已提交
128 129 130 131 132 133 134 135 136 137
	}

	printk("%d pages of RAM\n", total);
	printk("%d free pages\n", free);
	printk("%d reserved pages\n", reserved);
	printk("%d slab pages\n", slab);
	printk("%d pages shared\n", shared);
	printk("%d pages swap cached\n", cached);
}

138
static void __init find_limits(unsigned long *min, unsigned long *max_low,
139
			       unsigned long *max_high)
R
Russell King 已提交
140
{
L
Laura Abbott 已提交
141 142 143
	*max_low = PFN_DOWN(memblock_get_current_limit());
	*min = PFN_UP(memblock_start_of_DRAM());
	*max_high = PFN_DOWN(memblock_end_of_DRAM());
R
Russell King 已提交
144 145
}

146
#ifdef CONFIG_ZONE_DMA
147

148
phys_addr_t arm_dma_zone_size __read_mostly;
149 150
EXPORT_SYMBOL(arm_dma_zone_size);

151 152 153 154 155 156
/*
 * The DMA mask corresponding to the maximum bus address allocatable
 * using GFP_DMA.  The default here places no restriction on DMA
 * allocations.  This must be the smallest DMA mask in the system,
 * so a successful GFP_DMA allocation will always satisfy this.
 */
157
phys_addr_t arm_dma_limit;
158
unsigned long arm_dma_pfn_limit;
159

160 161 162 163 164 165 166 167 168 169 170 171 172
static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
	unsigned long dma_size)
{
	if (size[0] <= dma_size)
		return;

	size[ZONE_NORMAL] = size[0] - dma_size;
	size[ZONE_DMA] = dma_size;
	hole[ZONE_NORMAL] = hole[0];
	hole[ZONE_DMA] = 0;
}
#endif

173
void __init setup_dma_zone(const struct machine_desc *mdesc)
174 175 176 177
{
#ifdef CONFIG_ZONE_DMA
	if (mdesc->dma_zone_size) {
		arm_dma_zone_size = mdesc->dma_zone_size;
178
		arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
179 180
	} else
		arm_dma_limit = 0xffffffff;
181
	arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT;
182 183 184
#endif
}

185
static void __init zone_sizes_init(unsigned long min, unsigned long max_low,
186
	unsigned long max_high)
187 188
{
	unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
189
	struct memblock_region *reg;
190

R
Russell King 已提交
191
	/*
R
Russell King 已提交
192
	 * initialise the zones.
R
Russell King 已提交
193 194 195 196
	 */
	memset(zone_size, 0, sizeof(zone_size));

	/*
R
Russell King 已提交
197 198 199
	 * The memory size has already been determined.  If we need
	 * to do anything fancy with the allocation of this memory
	 * to the zones, now is the time to do it.
R
Russell King 已提交
200
	 */
R
Russell King 已提交
201 202 203 204
	zone_size[0] = max_low - min;
#ifdef CONFIG_HIGHMEM
	zone_size[ZONE_HIGHMEM] = max_high - max_low;
#endif
R
Russell King 已提交
205 206

	/*
R
Russell King 已提交
207 208
	 * Calculate the size of the holes.
	 *  holes = node_size - sum(bank_sizes)
R
Russell King 已提交
209
	 */
R
Russell King 已提交
210
	memcpy(zhole_size, zone_size, sizeof(zhole_size));
211 212 213 214 215 216 217 218
	for_each_memblock(memory, reg) {
		unsigned long start = memblock_region_memory_base_pfn(reg);
		unsigned long end = memblock_region_memory_end_pfn(reg);

		if (start < max_low) {
			unsigned long low_end = min(end, max_low);
			zhole_size[0] -= low_end - start;
		}
R
Russell King 已提交
219
#ifdef CONFIG_HIGHMEM
220 221 222 223
		if (end > max_low) {
			unsigned long high_start = max(start, max_low);
			zhole_size[ZONE_HIGHMEM] -= end - high_start;
		}
R
Russell King 已提交
224 225
#endif
	}
R
Russell King 已提交
226

227
#ifdef CONFIG_ZONE_DMA
R
Russell King 已提交
228 229 230 231
	/*
	 * Adjust the sizes according to any special requirements for
	 * this machine type.
	 */
232
	if (arm_dma_zone_size)
233 234
		arm_adjust_dma_zone(zone_size, zhole_size,
			arm_dma_zone_size >> PAGE_SHIFT);
235
#endif
R
Russell King 已提交
236

R
Russell King 已提交
237
	free_area_init_node(0, zone_size, min, zhole_size);
L
Linus Torvalds 已提交
238 239
}

240
#ifdef CONFIG_HAVE_ARCH_PFN_VALID
241 242
int pfn_valid(unsigned long pfn)
{
243
	return memblock_is_memory(__pfn_to_phys(pfn));
244 245
}
EXPORT_SYMBOL(pfn_valid);
246
#endif
247

248
#ifndef CONFIG_SPARSEMEM
249
static void __init arm_memory_present(void)
250 251 252
{
}
#else
253
static void __init arm_memory_present(void)
254
{
255 256
	struct memblock_region *reg;

257
	for_each_memblock(memory, reg)
258 259
		memory_present(0, memblock_region_memory_base_pfn(reg),
			       memblock_region_memory_end_pfn(reg));
260
}
261 262
#endif

263 264
static bool arm_memblock_steal_permitted = true;

265
phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align)
266 267 268 269 270
{
	phys_addr_t phys;

	BUG_ON(!arm_memblock_steal_permitted);

271
	phys = memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ANYWHERE);
272 273 274 275 276 277
	memblock_free(phys, size);
	memblock_remove(phys, size);

	return phys;
}

L
Laura Abbott 已提交
278
void __init arm_memblock_init(const struct machine_desc *mdesc)
R
Russell King 已提交
279 280 281
{
	/* Register the kernel text, kernel data and initrd with memblock. */
#ifdef CONFIG_XIP_KERNEL
282
	memblock_reserve(__pa(_sdata), _end - _sdata);
R
Russell King 已提交
283 284 285 286
#else
	memblock_reserve(__pa(_stext), _end - _stext);
#endif
#ifdef CONFIG_BLK_DEV_INITRD
287
	/* FDT scan will populate initrd_start */
288
	if (initrd_start && !phys_initrd_size) {
289 290 291
		phys_initrd_start = __virt_to_phys(initrd_start);
		phys_initrd_size = initrd_end - initrd_start;
	}
292
	initrd_start = initrd_end = 0;
293 294
	if (phys_initrd_size &&
	    !memblock_is_region_memory(phys_initrd_start, phys_initrd_size)) {
295 296
		pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region - disabling initrd\n",
		       (u64)phys_initrd_start, phys_initrd_size);
297 298
		phys_initrd_start = phys_initrd_size = 0;
	}
299 300
	if (phys_initrd_size &&
	    memblock_is_region_reserved(phys_initrd_start, phys_initrd_size)) {
301 302
		pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region - disabling initrd\n",
		       (u64)phys_initrd_start, phys_initrd_size);
303 304
		phys_initrd_start = phys_initrd_size = 0;
	}
R
Russell King 已提交
305 306 307 308 309 310 311 312 313 314 315
	if (phys_initrd_size) {
		memblock_reserve(phys_initrd_start, phys_initrd_size);

		/* Now convert initrd to virtual addresses */
		initrd_start = __phys_to_virt(phys_initrd_start);
		initrd_end = initrd_start + phys_initrd_size;
	}
#endif

	arm_mm_memblock_reserve();

316 317 318 319
	/* reserve any platform specific memblock areas */
	if (mdesc->reserve)
		mdesc->reserve();

320 321
	early_init_fdt_scan_reserved_mem();

322 323 324 325 326 327
	/*
	 * reserve memory for DMA contigouos allocations,
	 * must come from DMA area inside low memory
	 */
	dma_contiguous_reserve(min(arm_dma_limit, arm_lowmem_limit));

328
	arm_memblock_steal_permitted = false;
R
Russell King 已提交
329 330 331
	memblock_dump_all();
}

332
void __init bootmem_init(void)
L
Linus Torvalds 已提交
333
{
R
Russell King 已提交
334
	unsigned long min, max_low, max_high;
L
Linus Torvalds 已提交
335

336
	memblock_allow_resize();
R
Russell King 已提交
337 338
	max_low = max_high = 0;

339
	find_limits(&min, &max_low, &max_high);
R
Russell King 已提交
340

R
Russell King 已提交
341 342 343 344
	/*
	 * Sparsemem tries to allocate bootmem in memory_present(),
	 * so must be done after the fixed reservations
	 */
345
	arm_memory_present();
L
Linus Torvalds 已提交
346

347 348 349 350 351 352
	/*
	 * sparse_init() needs the bootmem allocator up and running.
	 */
	sparse_init();

	/*
R
Russell King 已提交
353
	 * Now free the memory - free_area_init_node needs
354 355 356
	 * the sparse mem_map arrays initialized by sparse_init()
	 * for memmap_init_zone(), otherwise all PFNs are invalid.
	 */
357
	zone_sizes_init(min, max_low, max_high);
358

R
Russell King 已提交
359 360 361 362 363
	/*
	 * This doesn't seem to be used by the Linux memory manager any
	 * more, but is used by ll_rw_block.  If we can get rid of it, we
	 * also get rid of some of the stuff above as well.
	 */
364 365 366
	min_low_pfn = min;
	max_low_pfn = max_low;
	max_pfn = max_high;
R
Russell King 已提交
367
}
L
Linus Torvalds 已提交
368

369 370 371 372 373 374 375
/*
 * Poison init memory with an undefined instruction (ARM) or a branch to an
 * undefined instruction (Thumb).
 */
static inline void poison_init_mem(void *s, size_t count)
{
	u32 *p = (u32 *)s;
376
	for (; count != 0; count -= 4)
377 378 379
		*p++ = 0xe7fddef0;
}

380
static inline void
R
Russell King 已提交
381
free_memmap(unsigned long start_pfn, unsigned long end_pfn)
382 383
{
	struct page *start_pg, *end_pg;
384
	phys_addr_t pg, pgend;
385 386 387 388

	/*
	 * Convert start_pfn/end_pfn to a struct page pointer.
	 */
389
	start_pg = pfn_to_page(start_pfn - 1) + 1;
390
	end_pg = pfn_to_page(end_pfn - 1) + 1;
391 392 393 394 395

	/*
	 * Convert to physical addresses, and
	 * round start upwards and end downwards.
	 */
396 397
	pg = PAGE_ALIGN(__pa(start_pg));
	pgend = __pa(end_pg) & PAGE_MASK;
398 399 400 401 402 403

	/*
	 * If there are free pages between these,
	 * free the section of the memmap array.
	 */
	if (pg < pgend)
404
		memblock_free_early(pg, pgend - pg);
405 406 407 408 409
}

/*
 * The mem_map array can get very big.  Free the unused area of the memory map.
 */
L
Laura Abbott 已提交
410
static void __init free_unused_memmap(void)
411
{
L
Laura Abbott 已提交
412 413
	unsigned long start, prev_end = 0;
	struct memblock_region *reg;
414 415

	/*
416 417
	 * This relies on each bank being in address order.
	 * The banks are sorted previously in bootmem_init().
418
	 */
L
Laura Abbott 已提交
419 420
	for_each_memblock(memory, reg) {
		start = memblock_region_memory_base_pfn(reg);
421

422 423 424 425 426
#ifdef CONFIG_SPARSEMEM
		/*
		 * Take care not to free memmap entries that don't exist
		 * due to SPARSEMEM sections which aren't present.
		 */
L
Laura Abbott 已提交
427 428
		start = min(start,
				 ALIGN(prev_end, PAGES_PER_SECTION));
429 430 431 432 433 434
#else
		/*
		 * Align down here since the VM subsystem insists that the
		 * memmap entries are valid from the bank start aligned to
		 * MAX_ORDER_NR_PAGES.
		 */
L
Laura Abbott 已提交
435
		start = round_down(start, MAX_ORDER_NR_PAGES);
436
#endif
437 438 439 440
		/*
		 * If we had a previous bank, and there is a space
		 * between the current bank and the previous, free it.
		 */
L
Laura Abbott 已提交
441 442
		if (prev_end && prev_end < start)
			free_memmap(prev_end, start);
443

444 445 446 447 448
		/*
		 * Align up here since the VM subsystem insists that the
		 * memmap entries are valid from the bank end aligned to
		 * MAX_ORDER_NR_PAGES.
		 */
L
Laura Abbott 已提交
449 450
		prev_end = ALIGN(memblock_region_memory_end_pfn(reg),
				 MAX_ORDER_NR_PAGES);
451
	}
452 453

#ifdef CONFIG_SPARSEMEM
L
Laura Abbott 已提交
454 455 456
	if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION))
		free_memmap(prev_end,
			    ALIGN(prev_end, PAGES_PER_SECTION));
457
#endif
458 459
}

460 461 462
#ifdef CONFIG_HIGHMEM
static inline void free_area_high(unsigned long pfn, unsigned long end)
{
463 464
	for (; pfn < end; pfn++)
		free_highmem_page(pfn_to_page(pfn));
465 466 467
}
#endif

468 469 470
static void __init free_highpages(void)
{
#ifdef CONFIG_HIGHMEM
471
	unsigned long max_low = max_low_pfn;
472
	struct memblock_region *mem, *res;
473 474

	/* set highmem page free */
475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502
	for_each_memblock(memory, mem) {
		unsigned long start = memblock_region_memory_base_pfn(mem);
		unsigned long end = memblock_region_memory_end_pfn(mem);

		/* Ignore complete lowmem entries */
		if (end <= max_low)
			continue;

		/* Truncate partial highmem entries */
		if (start < max_low)
			start = max_low;

		/* Find and exclude any reserved regions */
		for_each_memblock(reserved, res) {
			unsigned long res_start, res_end;

			res_start = memblock_region_reserved_base_pfn(res);
			res_end = memblock_region_reserved_end_pfn(res);

			if (res_end < start)
				continue;
			if (res_start < start)
				res_start = start;
			if (res_start > end)
				res_start = end;
			if (res_end > end)
				res_end = end;
			if (res_start != start)
503
				free_area_high(start, res_start);
504 505 506 507 508 509 510
			start = res_end;
			if (start == end)
				break;
		}

		/* And now free anything which remains */
		if (start < end)
511
			free_area_high(start, end);
512 513 514 515
	}
#endif
}

L
Linus Torvalds 已提交
516 517 518 519 520 521 522
/*
 * mem_init() marks the free areas in the mem_map and tells us how much
 * memory is free.  This is done after various parts of the system have
 * claimed their memory after the kernel image.
 */
void __init mem_init(void)
{
523 524 525 526 527
#ifdef CONFIG_HAVE_TCM
	/* These pointers are filled in on TCM detection */
	extern u32 dtcm_end;
	extern u32 itcm_end;
#endif
L
Linus Torvalds 已提交
528

529
	set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
L
Linus Torvalds 已提交
530 531

	/* this will put all unused low memory onto the freelists */
L
Laura Abbott 已提交
532
	free_unused_memmap();
533
	free_all_bootmem();
L
Linus Torvalds 已提交
534 535 536

#ifdef CONFIG_SA1111
	/* now that our DMA memory is actually so designated, we can free it */
537
	free_reserved_area(__va(PHYS_OFFSET), swapper_pg_dir, -1, NULL);
L
Linus Torvalds 已提交
538 539
#endif

540
	free_highpages();
541

542
	mem_init_print_info(NULL);
L
Linus Torvalds 已提交
543

544 545 546 547 548 549
#define MLK(b, t) b, t, ((t) - (b)) >> 10
#define MLM(b, t) b, t, ((t) - (b)) >> 20
#define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K)

	printk(KERN_NOTICE "Virtual kernel memory layout:\n"
			"    vector  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
550 551 552 553
#ifdef CONFIG_HAVE_TCM
			"    DTCM    : 0x%08lx - 0x%08lx   (%4ld kB)\n"
			"    ITCM    : 0x%08lx - 0x%08lx   (%4ld kB)\n"
#endif
554 555 556 557 558 559
			"    fixmap  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
			"    vmalloc : 0x%08lx - 0x%08lx   (%4ld MB)\n"
			"    lowmem  : 0x%08lx - 0x%08lx   (%4ld MB)\n"
#ifdef CONFIG_HIGHMEM
			"    pkmap   : 0x%08lx - 0x%08lx   (%4ld MB)\n"
#endif
560
#ifdef CONFIG_MODULES
561
			"    modules : 0x%08lx - 0x%08lx   (%4ld MB)\n"
562
#endif
563
			"      .text : 0x%p" " - 0x%p" "   (%4d kB)\n"
564
			"      .init : 0x%p" " - 0x%p" "   (%4d kB)\n"
565 566
			"      .data : 0x%p" " - 0x%p" "   (%4d kB)\n"
			"       .bss : 0x%p" " - 0x%p" "   (%4d kB)\n",
567 568 569

			MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) +
				(PAGE_SIZE)),
570
#ifdef CONFIG_HAVE_TCM
571 572
			MLK(DTCM_OFFSET, (unsigned long) dtcm_end),
			MLK(ITCM_OFFSET, (unsigned long) itcm_end),
573
#endif
M
Mark Salter 已提交
574
			MLK(FIXADDR_START, FIXADDR_END),
575
			MLM(VMALLOC_START, VMALLOC_END),
576 577 578 579 580
			MLM(PAGE_OFFSET, (unsigned long)high_memory),
#ifdef CONFIG_HIGHMEM
			MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) *
				(PAGE_SIZE)),
#endif
581
#ifdef CONFIG_MODULES
582
			MLM(MODULES_VADDR, MODULES_END),
583
#endif
584 585

			MLK_ROUNDUP(_text, _etext),
586
			MLK_ROUNDUP(__init_begin, __init_end),
587 588
			MLK_ROUNDUP(_sdata, _edata),
			MLK_ROUNDUP(__bss_start, __bss_stop));
589 590 591 592 593

#undef MLK
#undef MLM
#undef MLK_ROUNDUP

594 595 596 597 598 599 600 601 602 603 604 605 606 607
	/*
	 * Check boundaries twice: Some fundamental inconsistencies can
	 * be detected at build time already.
	 */
#ifdef CONFIG_MMU
	BUILD_BUG_ON(TASK_SIZE				> MODULES_VADDR);
	BUG_ON(TASK_SIZE 				> MODULES_VADDR);
#endif

#ifdef CONFIG_HIGHMEM
	BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
	BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE	> PAGE_OFFSET);
#endif

608
	if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) {
L
Linus Torvalds 已提交
609 610 611 612 613 614 615 616 617 618
		extern int sysctl_overcommit_memory;
		/*
		 * On a machine this small we won't get
		 * anywhere without overcommit, so turn
		 * it on by default.
		 */
		sysctl_overcommit_memory = OVERCOMMIT_ALWAYS;
	}
}

619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711
#ifdef CONFIG_ARM_KERNMEM_PERMS
struct section_perm {
	unsigned long start;
	unsigned long end;
	pmdval_t mask;
	pmdval_t prot;
};

struct section_perm nx_perms[] = {
	/* Make pages tables, etc before _stext RW (set NX). */
	{
		.start	= PAGE_OFFSET,
		.end	= (unsigned long)_stext,
		.mask	= ~PMD_SECT_XN,
		.prot	= PMD_SECT_XN,
	},
	/* Make init RW (set NX). */
	{
		.start	= (unsigned long)__init_begin,
		.end	= (unsigned long)_sdata,
		.mask	= ~PMD_SECT_XN,
		.prot	= PMD_SECT_XN,
	},
};

/*
 * Updates section permissions only for the current mm (sections are
 * copied into each mm). During startup, this is the init_mm. Is only
 * safe to be called with preemption disabled, as under stop_machine().
 */
static inline void section_update(unsigned long addr, pmdval_t mask,
				  pmdval_t prot)
{
	struct mm_struct *mm;
	pmd_t *pmd;

	mm = current->active_mm;
	pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr);

#ifdef CONFIG_ARM_LPAE
	pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
#else
	if (addr & SECTION_SIZE)
		pmd[1] = __pmd((pmd_val(pmd[1]) & mask) | prot);
	else
		pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
#endif
	flush_pmd_entry(pmd);
	local_flush_tlb_kernel_range(addr, addr + SECTION_SIZE);
}

/* Make sure extended page tables are in use. */
static inline bool arch_has_strict_perms(void)
{
	if (cpu_architecture() < CPU_ARCH_ARMv6)
		return false;

	return !!(get_cr() & CR_XP);
}

#define set_section_perms(perms, field)	{				\
	size_t i;							\
	unsigned long addr;						\
									\
	if (!arch_has_strict_perms())					\
		return;							\
									\
	for (i = 0; i < ARRAY_SIZE(perms); i++) {			\
		if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) ||	\
		    !IS_ALIGNED(perms[i].end, SECTION_SIZE)) {		\
			pr_err("BUG: section %lx-%lx not aligned to %lx\n", \
				perms[i].start, perms[i].end,		\
				SECTION_SIZE);				\
			continue;					\
		}							\
									\
		for (addr = perms[i].start;				\
		     addr < perms[i].end;				\
		     addr += SECTION_SIZE)				\
			section_update(addr, perms[i].mask,		\
				       perms[i].field);			\
	}								\
}

static inline void fix_kernmem_perms(void)
{
	set_section_perms(nx_perms, prot);
}
#else
static inline void fix_kernmem_perms(void) { }
#endif /* CONFIG_ARM_KERNMEM_PERMS */

void free_tcmmem(void)
L
Linus Torvalds 已提交
712
{
713
#ifdef CONFIG_HAVE_TCM
714
	extern char __tcm_start, __tcm_end;
715

716
	poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
717
	free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link");
718
#endif
719 720 721 722 723 724
}

void free_initmem(void)
{
	fix_kernmem_perms();
	free_tcmmem();
725

726
	poison_init_mem(__init_begin, __init_end - __init_begin);
N
Nicolas Pitre 已提交
727
	if (!machine_is_integrator() && !machine_is_cintegrator())
728
		free_initmem_default(-1);
L
Linus Torvalds 已提交
729 730 731 732 733 734 735 736
}

#ifdef CONFIG_BLK_DEV_INITRD

static int keep_initrd;

void free_initrd_mem(unsigned long start, unsigned long end)
{
737 738
	if (!keep_initrd) {
		poison_init_mem((void *)start, PAGE_ALIGN(end) - start);
739
		free_reserved_area((void *)start, (void *)end, -1, "initrd");
740
	}
L
Linus Torvalds 已提交
741 742 743 744 745 746 747 748 749 750
}

static int __init keepinitrd_setup(char *__unused)
{
	keep_initrd = 1;
	return 1;
}

__setup("keepinitrd", keepinitrd_setup);
#endif