init.c 18.6 KB
Newer Older
1
#include <linux/gfp.h>
2
#include <linux/initrd.h>
3
#include <linux/ioport.h>
4
#include <linux/swap.h>
5
#include <linux/memblock.h>
P
Pekka Enberg 已提交
6
#include <linux/bootmem.h>	/* for max_low_pfn */
7

8
#include <asm/cacheflush.h>
9
#include <asm/e820.h>
10
#include <asm/init.h>
11
#include <asm/page.h>
12
#include <asm/page_types.h>
13
#include <asm/sections.h>
14
#include <asm/setup.h>
15
#include <asm/tlbflush.h>
16
#include <asm/tlb.h>
17
#include <asm/proto.h>
P
Pekka Enberg 已提交
18
#include <asm/dma.h>		/* for MAX_DMA_PFN */
19
#include <asm/microcode.h>
20

21 22
#include "mm_internal.h"

23 24 25
static unsigned long __initdata pgt_buf_start;
static unsigned long __initdata pgt_buf_end;
static unsigned long __initdata pgt_buf_top;
26

27 28
static unsigned long min_pfn_mapped;

29 30
static bool __initdata can_use_brk_pgt = true;

31 32 33 34 35 36 37 38 39
/*
 * Pages returned are already directly mapped.
 *
 * Changing that is likely to break Xen, see commit:
 *
 *    279b706 x86,xen: introduce x86_init.mapping.pagetable_reserve
 *
 * for detailed information.
 */
Y
Yinghai Lu 已提交
40
__ref void *alloc_low_pages(unsigned int num)
41 42
{
	unsigned long pfn;
Y
Yinghai Lu 已提交
43
	int i;
44 45

	if (after_bootmem) {
Y
Yinghai Lu 已提交
46
		unsigned int order;
47

Y
Yinghai Lu 已提交
48 49 50
		order = get_order((unsigned long)num << PAGE_SHIFT);
		return (void *)__get_free_pages(GFP_ATOMIC | __GFP_NOTRACK |
						__GFP_ZERO, order);
51 52
	}

53
	if ((pgt_buf_end + num) > pgt_buf_top || !can_use_brk_pgt) {
54 55
		unsigned long ret;
		if (min_pfn_mapped >= max_pfn_mapped)
56
			panic("alloc_low_pages: ran out of memory");
57 58
		ret = memblock_find_in_range(min_pfn_mapped << PAGE_SHIFT,
					max_pfn_mapped << PAGE_SHIFT,
Y
Yinghai Lu 已提交
59
					PAGE_SIZE * num , PAGE_SIZE);
60
		if (!ret)
61
			panic("alloc_low_pages: can not alloc memory");
Y
Yinghai Lu 已提交
62
		memblock_reserve(ret, PAGE_SIZE * num);
63
		pfn = ret >> PAGE_SHIFT;
Y
Yinghai Lu 已提交
64 65 66
	} else {
		pfn = pgt_buf_end;
		pgt_buf_end += num;
67 68
		printk(KERN_DEBUG "BRK [%#010lx, %#010lx] PGTABLE\n",
			pfn << PAGE_SHIFT, (pgt_buf_end << PAGE_SHIFT) - 1);
Y
Yinghai Lu 已提交
69 70 71 72 73 74 75 76
	}

	for (i = 0; i < num; i++) {
		void *adr;

		adr = __va((pfn + i) << PAGE_SHIFT);
		clear_page(adr);
	}
77

Y
Yinghai Lu 已提交
78
	return __va(pfn << PAGE_SHIFT);
79 80
}

81 82
/* need 3 4k for initial PMD_SIZE,  3 4k for 0-ISA_END_ADDRESS */
#define INIT_PGT_BUF_SIZE	(6 * PAGE_SIZE)
Y
Yinghai Lu 已提交
83 84 85 86 87 88 89 90 91 92 93 94 95
RESERVE_BRK(early_pgt_alloc, INIT_PGT_BUF_SIZE);
void  __init early_alloc_pgt_buf(void)
{
	unsigned long tables = INIT_PGT_BUF_SIZE;
	phys_addr_t base;

	base = __pa(extend_brk(tables, PAGE_SIZE));

	pgt_buf_start = base >> PAGE_SHIFT;
	pgt_buf_end = pgt_buf_start;
	pgt_buf_top = pgt_buf_start + (tables >> PAGE_SHIFT);
}

96 97 98 99 100 101 102 103
int after_bootmem;

int direct_gbpages
#ifdef CONFIG_DIRECT_GBPAGES
				= 1
#endif
;

104 105 106 107 108 109 110 111 112 113
static void __init init_gbpages(void)
{
#ifdef CONFIG_X86_64
	if (direct_gbpages && cpu_has_gbpages)
		printk(KERN_INFO "Using GB pages for direct mapping\n");
	else
		direct_gbpages = 0;
#endif
}

114 115 116 117 118 119
struct map_range {
	unsigned long start;
	unsigned long end;
	unsigned page_size_mask;
};

120
static int page_size_mask;
121

122
static void __init probe_page_size_mask(void)
123
{
124 125
	init_gbpages();

126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147
#if !defined(CONFIG_DEBUG_PAGEALLOC) && !defined(CONFIG_KMEMCHECK)
	/*
	 * For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages.
	 * This will simplify cpa(), which otherwise needs to support splitting
	 * large pages into small in interrupt context, etc.
	 */
	if (direct_gbpages)
		page_size_mask |= 1 << PG_LEVEL_1G;
	if (cpu_has_pse)
		page_size_mask |= 1 << PG_LEVEL_2M;
#endif

	/* Enable PSE if available */
	if (cpu_has_pse)
		set_in_cr4(X86_CR4_PSE);

	/* Enable PGE if available */
	if (cpu_has_pge) {
		set_in_cr4(X86_CR4_PGE);
		__supported_pte_mask |= _PAGE_GLOBAL;
	}
}
148

149 150 151 152 153 154
#ifdef CONFIG_X86_32
#define NR_RANGE_MR 3
#else /* CONFIG_X86_64 */
#define NR_RANGE_MR 5
#endif

155 156 157
static int __meminit save_mr(struct map_range *mr, int nr_range,
			     unsigned long start_pfn, unsigned long end_pfn,
			     unsigned long page_size_mask)
158 159 160 161 162 163 164 165 166 167 168 169 170
{
	if (start_pfn < end_pfn) {
		if (nr_range >= NR_RANGE_MR)
			panic("run out of range for init_memory_mapping\n");
		mr[nr_range].start = start_pfn<<PAGE_SHIFT;
		mr[nr_range].end   = end_pfn<<PAGE_SHIFT;
		mr[nr_range].page_size_mask = page_size_mask;
		nr_range++;
	}

	return nr_range;
}

171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204
/*
 * adjust the page_size_mask for small range to go with
 *	big page size instead small one if nearby are ram too.
 */
static void __init_refok adjust_range_page_size_mask(struct map_range *mr,
							 int nr_range)
{
	int i;

	for (i = 0; i < nr_range; i++) {
		if ((page_size_mask & (1<<PG_LEVEL_2M)) &&
		    !(mr[i].page_size_mask & (1<<PG_LEVEL_2M))) {
			unsigned long start = round_down(mr[i].start, PMD_SIZE);
			unsigned long end = round_up(mr[i].end, PMD_SIZE);

#ifdef CONFIG_X86_32
			if ((end >> PAGE_SHIFT) > max_low_pfn)
				continue;
#endif

			if (memblock_is_region_memory(start, end - start))
				mr[i].page_size_mask |= 1<<PG_LEVEL_2M;
		}
		if ((page_size_mask & (1<<PG_LEVEL_1G)) &&
		    !(mr[i].page_size_mask & (1<<PG_LEVEL_1G))) {
			unsigned long start = round_down(mr[i].start, PUD_SIZE);
			unsigned long end = round_up(mr[i].end, PUD_SIZE);

			if (memblock_is_region_memory(start, end - start))
				mr[i].page_size_mask |= 1<<PG_LEVEL_1G;
		}
	}
}

205 206 207
static int __meminit split_mem_range(struct map_range *mr, int nr_range,
				     unsigned long start,
				     unsigned long end)
208
{
Y
Yinghai Lu 已提交
209
	unsigned long start_pfn, end_pfn, limit_pfn;
210
	unsigned long pfn;
211
	int i;
212

Y
Yinghai Lu 已提交
213 214
	limit_pfn = PFN_DOWN(end);

215
	/* head if not big page alignment ? */
216
	pfn = start_pfn = PFN_DOWN(start);
217 218 219 220 221 222 223
#ifdef CONFIG_X86_32
	/*
	 * Don't use a large page for the first 2/4MB of memory
	 * because there are often fixed size MTRRs in there
	 * and overlapping MTRRs into large pages can cause
	 * slowdowns.
	 */
224
	if (pfn == 0)
225
		end_pfn = PFN_DOWN(PMD_SIZE);
226
	else
227
		end_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
228
#else /* CONFIG_X86_64 */
229
	end_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
230
#endif
Y
Yinghai Lu 已提交
231 232
	if (end_pfn > limit_pfn)
		end_pfn = limit_pfn;
233 234
	if (start_pfn < end_pfn) {
		nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
235
		pfn = end_pfn;
236 237 238
	}

	/* big page (2M) range */
239
	start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
240
#ifdef CONFIG_X86_32
Y
Yinghai Lu 已提交
241
	end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE));
242
#else /* CONFIG_X86_64 */
243
	end_pfn = round_up(pfn, PFN_DOWN(PUD_SIZE));
Y
Yinghai Lu 已提交
244 245
	if (end_pfn > round_down(limit_pfn, PFN_DOWN(PMD_SIZE)))
		end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE));
246 247 248 249 250
#endif

	if (start_pfn < end_pfn) {
		nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
				page_size_mask & (1<<PG_LEVEL_2M));
251
		pfn = end_pfn;
252 253 254 255
	}

#ifdef CONFIG_X86_64
	/* big page (1G) range */
256
	start_pfn = round_up(pfn, PFN_DOWN(PUD_SIZE));
Y
Yinghai Lu 已提交
257
	end_pfn = round_down(limit_pfn, PFN_DOWN(PUD_SIZE));
258 259 260 261
	if (start_pfn < end_pfn) {
		nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
				page_size_mask &
				 ((1<<PG_LEVEL_2M)|(1<<PG_LEVEL_1G)));
262
		pfn = end_pfn;
263 264 265
	}

	/* tail is not big page (1G) alignment */
266
	start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
Y
Yinghai Lu 已提交
267
	end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE));
268 269 270
	if (start_pfn < end_pfn) {
		nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
				page_size_mask & (1<<PG_LEVEL_2M));
271
		pfn = end_pfn;
272 273 274 275
	}
#endif

	/* tail is not big page (2M) alignment */
276
	start_pfn = pfn;
Y
Yinghai Lu 已提交
277
	end_pfn = limit_pfn;
278 279
	nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);

280 281 282
	if (!after_bootmem)
		adjust_range_page_size_mask(mr, nr_range);

283 284 285 286 287 288 289 290 291 292 293 294 295 296 297
	/* try to merge same page size and continuous */
	for (i = 0; nr_range > 1 && i < nr_range - 1; i++) {
		unsigned long old_start;
		if (mr[i].end != mr[i+1].start ||
		    mr[i].page_size_mask != mr[i+1].page_size_mask)
			continue;
		/* move it */
		old_start = mr[i].start;
		memmove(&mr[i], &mr[i+1],
			(nr_range - 1 - i) * sizeof(struct map_range));
		mr[i--].start = old_start;
		nr_range--;
	}

	for (i = 0; i < nr_range; i++)
298 299
		printk(KERN_DEBUG " [mem %#010lx-%#010lx] page %s\n",
				mr[i].start, mr[i].end - 1,
300 301 302
			(mr[i].page_size_mask & (1<<PG_LEVEL_1G))?"1G":(
			 (mr[i].page_size_mask & (1<<PG_LEVEL_2M))?"2M":"4k"));

303 304 305
	return nr_range;
}

306 307
struct range pfn_mapped[E820_X_MAX];
int nr_pfn_mapped;
308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333

static void add_pfn_range_mapped(unsigned long start_pfn, unsigned long end_pfn)
{
	nr_pfn_mapped = add_range_with_merge(pfn_mapped, E820_X_MAX,
					     nr_pfn_mapped, start_pfn, end_pfn);
	nr_pfn_mapped = clean_sort_range(pfn_mapped, E820_X_MAX);

	max_pfn_mapped = max(max_pfn_mapped, end_pfn);

	if (start_pfn < (1UL<<(32-PAGE_SHIFT)))
		max_low_pfn_mapped = max(max_low_pfn_mapped,
					 min(end_pfn, 1UL<<(32-PAGE_SHIFT)));
}

bool pfn_range_is_mapped(unsigned long start_pfn, unsigned long end_pfn)
{
	int i;

	for (i = 0; i < nr_pfn_mapped; i++)
		if ((start_pfn >= pfn_mapped[i].start) &&
		    (end_pfn <= pfn_mapped[i].end))
			return true;

	return false;
}

334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351
/*
 * Setup the direct mapping of the physical memory at PAGE_OFFSET.
 * This runs before bootmem is initialized and gets pages directly from
 * the physical memory. To access them they are temporarily mapped.
 */
unsigned long __init_refok init_memory_mapping(unsigned long start,
					       unsigned long end)
{
	struct map_range mr[NR_RANGE_MR];
	unsigned long ret = 0;
	int nr_range, i;

	pr_info("init_memory_mapping: [mem %#010lx-%#010lx]\n",
	       start, end - 1);

	memset(mr, 0, sizeof(mr));
	nr_range = split_mem_range(mr, 0, start, end);

352 353 354 355
	for (i = 0; i < nr_range; i++)
		ret = kernel_physical_mapping_init(mr[i].start, mr[i].end,
						   mr[i].page_size_mask);

356 357
	add_pfn_range_mapped(start >> PAGE_SHIFT, ret >> PAGE_SHIFT);

358 359 360
	return ret >> PAGE_SHIFT;
}

361
/*
362 363 364 365 366 367 368 369 370 371 372
 * We need to iterate through the E820 memory map and create direct mappings
 * for only E820_RAM and E820_KERN_RESERVED regions. We cannot simply
 * create direct mappings for all pfns from [0 to max_low_pfn) and
 * [4GB to max_pfn) because of possible memory holes in high addresses
 * that cannot be marked as UC by fixed/variable range MTRRs.
 * Depending on the alignment of E820 ranges, this may possibly result
 * in using smaller size (i.e. 4K instead of 2M or 1G) page tables.
 *
 * init_mem_mapping() calls init_range_memory_mapping() with big range.
 * That range would have hole in the middle or ends, and only ram parts
 * will be mapped in init_range_memory_mapping().
373
 */
Y
Yinghai Lu 已提交
374
static unsigned long __init init_range_memory_mapping(
375 376
					   unsigned long r_start,
					   unsigned long r_end)
377 378
{
	unsigned long start_pfn, end_pfn;
Y
Yinghai Lu 已提交
379
	unsigned long mapped_ram_size = 0;
380 381 382
	int i;

	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) {
383 384 385
		u64 start = clamp_val(PFN_PHYS(start_pfn), r_start, r_end);
		u64 end = clamp_val(PFN_PHYS(end_pfn), r_start, r_end);
		if (start >= end)
386 387
			continue;

388 389 390 391 392 393
		/*
		 * if it is overlapping with brk pgt, we need to
		 * alloc pgt buf from memblock instead.
		 */
		can_use_brk_pgt = max(start, (u64)pgt_buf_end<<PAGE_SHIFT) >=
				    min(end, (u64)pgt_buf_top<<PAGE_SHIFT);
394
		init_memory_mapping(start, end);
Y
Yinghai Lu 已提交
395
		mapped_ram_size += end - start;
396
		can_use_brk_pgt = true;
397
	}
Y
Yinghai Lu 已提交
398 399

	return mapped_ram_size;
400 401
}

402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420
static unsigned long __init get_new_step_size(unsigned long step_size)
{
	/*
	 * Explain why we shift by 5 and why we don't have to worry about
	 * 'step_size << 5' overflowing:
	 *
	 * initial mapped size is PMD_SIZE (2M).
	 * We can not set step_size to be PUD_SIZE (1G) yet.
	 * In worse case, when we cross the 1G boundary, and
	 * PG_LEVEL_2M is not set, we will need 1+1+512 pages (2M + 8k)
	 * to map 1G range with PTE. Use 5 as shift for now.
	 *
	 * Don't need to worry about overflow, on 32bit, when step_size
	 * is 0, round_down() returns 0 for start, and that turns it
	 * into 0x100000000ULL.
	 */
	return step_size << 5;
}

421 422 423 424 425 426 427 428 429 430 431 432
/**
 * memory_map_top_down - Map [map_start, map_end) top down
 * @map_start: start address of the target memory range
 * @map_end: end address of the target memory range
 *
 * This function will setup direct mapping for memory range
 * [map_start, map_end) in top-down. That said, the page tables
 * will be allocated at the end of the memory, and we map the
 * memory in top-down.
 */
static void __init memory_map_top_down(unsigned long map_start,
				       unsigned long map_end)
433
{
434
	unsigned long real_end, start, last_start;
Y
Yinghai Lu 已提交
435 436 437 438
	unsigned long step_size;
	unsigned long addr;
	unsigned long mapped_ram_size = 0;
	unsigned long new_mapped_ram_size;
439

440
	/* xen has big range in reserved near end of ram, skip it at first.*/
441
	addr = memblock_find_in_range(map_start, map_end, PMD_SIZE, PMD_SIZE);
Y
Yinghai Lu 已提交
442 443 444 445 446 447 448
	real_end = addr + PMD_SIZE;

	/* step_size need to be small so pgt_buf from BRK could cover it */
	step_size = PMD_SIZE;
	max_pfn_mapped = 0; /* will get exact value next */
	min_pfn_mapped = real_end >> PAGE_SHIFT;
	last_start = start = real_end;
449 450 451 452 453 454 455

	/*
	 * We start from the top (end of memory) and go to the bottom.
	 * The memblock_find_in_range() gets us a block of RAM from the
	 * end of RAM in [min_pfn_mapped, max_pfn_mapped) used as new pages
	 * for page table.
	 */
456
	while (last_start > map_start) {
Y
Yinghai Lu 已提交
457 458
		if (last_start > step_size) {
			start = round_down(last_start - 1, step_size);
459 460
			if (start < map_start)
				start = map_start;
Y
Yinghai Lu 已提交
461
		} else
462
			start = map_start;
Y
Yinghai Lu 已提交
463 464 465 466 467 468
		new_mapped_ram_size = init_range_memory_mapping(start,
							last_start);
		last_start = start;
		min_pfn_mapped = last_start >> PAGE_SHIFT;
		/* only increase step_size after big range get mapped */
		if (new_mapped_ram_size > mapped_ram_size)
469
			step_size = get_new_step_size(step_size);
Y
Yinghai Lu 已提交
470 471 472
		mapped_ram_size += new_mapped_ram_size;
	}

473 474 475 476
	if (real_end < map_end)
		init_range_memory_mapping(real_end, map_end);
}

477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521
/**
 * memory_map_bottom_up - Map [map_start, map_end) bottom up
 * @map_start: start address of the target memory range
 * @map_end: end address of the target memory range
 *
 * This function will setup direct mapping for memory range
 * [map_start, map_end) in bottom-up. Since we have limited the
 * bottom-up allocation above the kernel, the page tables will
 * be allocated just above the kernel and we map the memory
 * in [map_start, map_end) in bottom-up.
 */
static void __init memory_map_bottom_up(unsigned long map_start,
					unsigned long map_end)
{
	unsigned long next, new_mapped_ram_size, start;
	unsigned long mapped_ram_size = 0;
	/* step_size need to be small so pgt_buf from BRK could cover it */
	unsigned long step_size = PMD_SIZE;

	start = map_start;
	min_pfn_mapped = start >> PAGE_SHIFT;

	/*
	 * We start from the bottom (@map_start) and go to the top (@map_end).
	 * The memblock_find_in_range() gets us a block of RAM from the
	 * end of RAM in [min_pfn_mapped, max_pfn_mapped) used as new pages
	 * for page table.
	 */
	while (start < map_end) {
		if (map_end - start > step_size) {
			next = round_up(start + 1, step_size);
			if (next > map_end)
				next = map_end;
		} else
			next = map_end;

		new_mapped_ram_size = init_range_memory_mapping(start, next);
		start = next;

		if (new_mapped_ram_size > mapped_ram_size)
			step_size = get_new_step_size(step_size);
		mapped_ram_size += new_mapped_ram_size;
	}
}

522 523 524 525 526 527 528 529 530 531 532 533 534 535 536
void __init init_mem_mapping(void)
{
	unsigned long end;

	probe_page_size_mask();

#ifdef CONFIG_X86_64
	end = max_pfn << PAGE_SHIFT;
#else
	end = max_low_pfn << PAGE_SHIFT;
#endif

	/* the ISA range is always mapped regardless of memory holes */
	init_memory_mapping(0, ISA_END_ADDRESS);

537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555
	/*
	 * If the allocation is in bottom-up direction, we setup direct mapping
	 * in bottom-up, otherwise we setup direct mapping in top-down.
	 */
	if (memblock_bottom_up()) {
		unsigned long kernel_end = __pa_symbol(_end);

		/*
		 * we need two separate calls here. This is because we want to
		 * allocate page tables above the kernel. So we first map
		 * [kernel_end, end) to make memory above the kernel be mapped
		 * as soon as possible. And then use page tables allocated above
		 * the kernel to map [ISA_END_ADDRESS, kernel_end).
		 */
		memory_map_bottom_up(kernel_end, end);
		memory_map_bottom_up(ISA_END_ADDRESS, kernel_end);
	} else {
		memory_map_top_down(ISA_END_ADDRESS, end);
	}
Y
Yinghai Lu 已提交
556

557 558 559 560 561
#ifdef CONFIG_X86_64
	if (max_pfn > max_low_pfn) {
		/* can we preseve max_low_pfn ?*/
		max_low_pfn = max_pfn;
	}
562 563
#else
	early_ioremap_page_table_range_init();
564 565
#endif

566 567 568
	load_cr3(swapper_pg_dir);
	__flush_tlb_all();

569
	early_memtest(0, max_pfn_mapped << PAGE_SHIFT);
570
}
571

572 573 574 575 576 577 578 579 580 581 582 583
/*
 * devmem_is_allowed() checks to see if /dev/mem access to a certain address
 * is valid. The argument is a physical page number.
 *
 *
 * On x86, access has to be given to the first megabyte of ram because that area
 * contains bios code and data regions used by X and dosemu and similar apps.
 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
 * mmio resources as well as potential bios/acpi data regions.
 */
int devmem_is_allowed(unsigned long pagenr)
{
584
	if (pagenr < 256)
585 586 587 588 589 590 591 592
		return 1;
	if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
		return 0;
	if (!page_is_ram(pagenr))
		return 1;
	return 0;
}

593 594
void free_init_pages(char *what, unsigned long begin, unsigned long end)
{
595
	unsigned long begin_aligned, end_aligned;
596

597 598 599 600 601 602 603 604 605 606
	/* Make sure boundaries are page aligned */
	begin_aligned = PAGE_ALIGN(begin);
	end_aligned   = end & PAGE_MASK;

	if (WARN_ON(begin_aligned != begin || end_aligned != end)) {
		begin = begin_aligned;
		end   = end_aligned;
	}

	if (begin >= end)
607 608 609 610 611 612 613 614
		return;

	/*
	 * If debugging page accesses then do not free this memory but
	 * mark them not present - any buggy init-section access will
	 * create a kernel page fault:
	 */
#ifdef CONFIG_DEBUG_PAGEALLOC
615 616
	printk(KERN_INFO "debug: unmapping init [mem %#010lx-%#010lx]\n",
		begin, end - 1);
617 618 619 620 621
	set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
#else
	/*
	 * We just marked the kernel text read only above, now that
	 * we are going to free part of that, we need to make that
622
	 * writeable and non-executable first.
623
	 */
624
	set_memory_nx(begin, (end - begin) >> PAGE_SHIFT);
625 626
	set_memory_rw(begin, (end - begin) >> PAGE_SHIFT);

627
	free_reserved_area((void *)begin, (void *)end, POISON_FREE_INITMEM, what);
628 629 630 631 632
#endif
}

void free_initmem(void)
{
633
	free_init_pages("unused kernel",
634 635 636
			(unsigned long)(&__init_begin),
			(unsigned long)(&__init_end));
}
637 638

#ifdef CONFIG_BLK_DEV_INITRD
639
void __init free_initrd_mem(unsigned long start, unsigned long end)
640
{
641 642 643 644 645 646 647 648 649
#ifdef CONFIG_MICROCODE_EARLY
	/*
	 * Remember, initrd memory may contain microcode or other useful things.
	 * Before we lose initrd mem, we need to find a place to hold them
	 * now that normal virtual memory is enabled.
	 */
	save_microcode_in_initrd();
#endif

650 651 652 653 654 655 656 657 658
	/*
	 * end could be not aligned, and We can not align that,
	 * decompresser could be confused by aligned initrd_end
	 * We already reserve the end partial page before in
	 *   - i386_start_kernel()
	 *   - x86_64_start_kernel()
	 *   - relocate_initrd()
	 * So here We can do PAGE_ALIGN() safely to get partial page to be freed
	 */
659
	free_init_pages("initrd", start, PAGE_ALIGN(end));
660 661
}
#endif
P
Pekka Enberg 已提交
662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682

void __init zone_sizes_init(void)
{
	unsigned long max_zone_pfns[MAX_NR_ZONES];

	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));

#ifdef CONFIG_ZONE_DMA
	max_zone_pfns[ZONE_DMA]		= MAX_DMA_PFN;
#endif
#ifdef CONFIG_ZONE_DMA32
	max_zone_pfns[ZONE_DMA32]	= MAX_DMA32_PFN;
#endif
	max_zone_pfns[ZONE_NORMAL]	= max_low_pfn;
#ifdef CONFIG_HIGHMEM
	max_zone_pfns[ZONE_HIGHMEM]	= max_pfn;
#endif

	free_area_init_nodes(max_zone_pfns);
}