init.c 21.4 KB
Newer Older
1
#include <linux/gfp.h>
2
#include <linux/initrd.h>
3
#include <linux/ioport.h>
4
#include <linux/swap.h>
5
#include <linux/memblock.h>
P
Pekka Enberg 已提交
6
#include <linux/bootmem.h>	/* for max_low_pfn */
7

8
#include <asm/cacheflush.h>
9
#include <asm/e820.h>
10
#include <asm/init.h>
11
#include <asm/page.h>
12
#include <asm/page_types.h>
13
#include <asm/sections.h>
14
#include <asm/setup.h>
15
#include <asm/tlbflush.h>
16
#include <asm/tlb.h>
17
#include <asm/proto.h>
P
Pekka Enberg 已提交
18
#include <asm/dma.h>		/* for MAX_DMA_PFN */
19
#include <asm/microcode.h>
20
#include <asm/kaslr.h>
21

22 23 24 25 26 27 28
/*
 * We need to define the tracepoints somewhere, and tlb.c
 * is only compied when SMP=y.
 */
#define CREATE_TRACE_POINTS
#include <trace/events/tlb.h>

29 30
#include "mm_internal.h"

31 32
/*
 * Tables translating between page_cache_type_t and pte encoding.
33
 *
34 35 36 37 38
 * The default values are defined statically as minimal supported mode;
 * WC and WT fall back to UC-.  pat_init() updates these values to support
 * more cache modes, WC and WT, when it is safe to do so.  See pat_init()
 * for the details.  Note, __early_ioremap() used during early boot-time
 * takes pgprot_t (pte encoding) and does not use these tables.
39 40 41 42 43
 *
 *   Index into __cachemode2pte_tbl[] is the cachemode.
 *
 *   Index into __pte2cachemode_tbl[] are the caching attribute bits of the pte
 *   (_PAGE_PWT, _PAGE_PCD, _PAGE_PAT) at index bit positions 0, 1, 2.
44 45
 */
uint16_t __cachemode2pte_tbl[_PAGE_CACHE_MODE_NUM] = {
46
	[_PAGE_CACHE_MODE_WB      ]	= 0         | 0        ,
47
	[_PAGE_CACHE_MODE_WC      ]	= 0         | _PAGE_PCD,
48 49 50 51
	[_PAGE_CACHE_MODE_UC_MINUS]	= 0         | _PAGE_PCD,
	[_PAGE_CACHE_MODE_UC      ]	= _PAGE_PWT | _PAGE_PCD,
	[_PAGE_CACHE_MODE_WT      ]	= 0         | _PAGE_PCD,
	[_PAGE_CACHE_MODE_WP      ]	= 0         | _PAGE_PCD,
52
};
53
EXPORT_SYMBOL(__cachemode2pte_tbl);
54

55
uint8_t __pte2cachemode_tbl[8] = {
56
	[__pte2cm_idx( 0        | 0         | 0        )] = _PAGE_CACHE_MODE_WB,
57
	[__pte2cm_idx(_PAGE_PWT | 0         | 0        )] = _PAGE_CACHE_MODE_UC_MINUS,
58 59 60
	[__pte2cm_idx( 0        | _PAGE_PCD | 0        )] = _PAGE_CACHE_MODE_UC_MINUS,
	[__pte2cm_idx(_PAGE_PWT | _PAGE_PCD | 0        )] = _PAGE_CACHE_MODE_UC,
	[__pte2cm_idx( 0        | 0         | _PAGE_PAT)] = _PAGE_CACHE_MODE_WB,
61
	[__pte2cm_idx(_PAGE_PWT | 0         | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC_MINUS,
62
	[__pte2cm_idx(0         | _PAGE_PCD | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC_MINUS,
63 64
	[__pte2cm_idx(_PAGE_PWT | _PAGE_PCD | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC,
};
65
EXPORT_SYMBOL(__pte2cachemode_tbl);
66

67 68 69
static unsigned long __initdata pgt_buf_start;
static unsigned long __initdata pgt_buf_end;
static unsigned long __initdata pgt_buf_top;
70

71 72
static unsigned long min_pfn_mapped;

73 74
static bool __initdata can_use_brk_pgt = true;

75 76 77 78 79 80 81 82 83
/*
 * Pages returned are already directly mapped.
 *
 * Changing that is likely to break Xen, see commit:
 *
 *    279b706 x86,xen: introduce x86_init.mapping.pagetable_reserve
 *
 * for detailed information.
 */
Y
Yinghai Lu 已提交
84
__ref void *alloc_low_pages(unsigned int num)
85 86
{
	unsigned long pfn;
Y
Yinghai Lu 已提交
87
	int i;
88 89

	if (after_bootmem) {
Y
Yinghai Lu 已提交
90
		unsigned int order;
91

Y
Yinghai Lu 已提交
92 93 94
		order = get_order((unsigned long)num << PAGE_SHIFT);
		return (void *)__get_free_pages(GFP_ATOMIC | __GFP_NOTRACK |
						__GFP_ZERO, order);
95 96
	}

97
	if ((pgt_buf_end + num) > pgt_buf_top || !can_use_brk_pgt) {
98 99
		unsigned long ret;
		if (min_pfn_mapped >= max_pfn_mapped)
100
			panic("alloc_low_pages: ran out of memory");
101 102
		ret = memblock_find_in_range(min_pfn_mapped << PAGE_SHIFT,
					max_pfn_mapped << PAGE_SHIFT,
Y
Yinghai Lu 已提交
103
					PAGE_SIZE * num , PAGE_SIZE);
104
		if (!ret)
105
			panic("alloc_low_pages: can not alloc memory");
Y
Yinghai Lu 已提交
106
		memblock_reserve(ret, PAGE_SIZE * num);
107
		pfn = ret >> PAGE_SHIFT;
Y
Yinghai Lu 已提交
108 109 110
	} else {
		pfn = pgt_buf_end;
		pgt_buf_end += num;
111 112
		printk(KERN_DEBUG "BRK [%#010lx, %#010lx] PGTABLE\n",
			pfn << PAGE_SHIFT, (pgt_buf_end << PAGE_SHIFT) - 1);
Y
Yinghai Lu 已提交
113 114 115 116 117 118 119 120
	}

	for (i = 0; i < num; i++) {
		void *adr;

		adr = __va((pfn + i) << PAGE_SHIFT);
		clear_page(adr);
	}
121

Y
Yinghai Lu 已提交
122
	return __va(pfn << PAGE_SHIFT);
123 124
}

125 126
/* need 3 4k for initial PMD_SIZE,  3 4k for 0-ISA_END_ADDRESS */
#define INIT_PGT_BUF_SIZE	(6 * PAGE_SIZE)
Y
Yinghai Lu 已提交
127 128 129 130 131 132 133 134 135 136 137 138 139
RESERVE_BRK(early_pgt_alloc, INIT_PGT_BUF_SIZE);
void  __init early_alloc_pgt_buf(void)
{
	unsigned long tables = INIT_PGT_BUF_SIZE;
	phys_addr_t base;

	base = __pa(extend_brk(tables, PAGE_SIZE));

	pgt_buf_start = base >> PAGE_SHIFT;
	pgt_buf_end = pgt_buf_start;
	pgt_buf_top = pgt_buf_start + (tables >> PAGE_SHIFT);
}

140 141
int after_bootmem;

142
early_param_on_off("gbpages", "nogbpages", direct_gbpages, CONFIG_X86_DIRECT_GBPAGES);
143

144 145 146 147 148 149
struct map_range {
	unsigned long start;
	unsigned long end;
	unsigned page_size_mask;
};

150
static int page_size_mask;
151

152
static void __init probe_page_size_mask(void)
153
{
154
#if !defined(CONFIG_KMEMCHECK)
155
	/*
156 157
	 * For CONFIG_KMEMCHECK or pagealloc debugging, identity mapping will
	 * use small pages.
158 159 160
	 * This will simplify cpa(), which otherwise needs to support splitting
	 * large pages into small in interrupt context, etc.
	 */
161
	if (boot_cpu_has(X86_FEATURE_PSE) && !debug_pagealloc_enabled())
162 163 164 165
		page_size_mask |= 1 << PG_LEVEL_2M;
#endif

	/* Enable PSE if available */
166
	if (boot_cpu_has(X86_FEATURE_PSE))
A
Andy Lutomirski 已提交
167
		cr4_set_bits_and_update_boot(X86_CR4_PSE);
168 169

	/* Enable PGE if available */
170
	if (boot_cpu_has(X86_FEATURE_PGE)) {
A
Andy Lutomirski 已提交
171
		cr4_set_bits_and_update_boot(X86_CR4_PGE);
172
		__supported_pte_mask |= _PAGE_GLOBAL;
173 174
	} else
		__supported_pte_mask &= ~_PAGE_GLOBAL;
175 176

	/* Enable 1 GB linear kernel mappings if available: */
177
	if (direct_gbpages && boot_cpu_has(X86_FEATURE_GBPAGES)) {
178 179 180 181 182
		printk(KERN_INFO "Using GB pages for direct mapping\n");
		page_size_mask |= 1 << PG_LEVEL_1G;
	} else {
		direct_gbpages = 0;
	}
183
}
184

185 186 187 188 189 190
#ifdef CONFIG_X86_32
#define NR_RANGE_MR 3
#else /* CONFIG_X86_64 */
#define NR_RANGE_MR 5
#endif

191 192 193
static int __meminit save_mr(struct map_range *mr, int nr_range,
			     unsigned long start_pfn, unsigned long end_pfn,
			     unsigned long page_size_mask)
194 195 196 197 198 199 200 201 202 203 204 205 206
{
	if (start_pfn < end_pfn) {
		if (nr_range >= NR_RANGE_MR)
			panic("run out of range for init_memory_mapping\n");
		mr[nr_range].start = start_pfn<<PAGE_SHIFT;
		mr[nr_range].end   = end_pfn<<PAGE_SHIFT;
		mr[nr_range].page_size_mask = page_size_mask;
		nr_range++;
	}

	return nr_range;
}

207 208 209 210
/*
 * adjust the page_size_mask for small range to go with
 *	big page size instead small one if nearby are ram too.
 */
211
static void __ref adjust_range_page_size_mask(struct map_range *mr,
212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240
							 int nr_range)
{
	int i;

	for (i = 0; i < nr_range; i++) {
		if ((page_size_mask & (1<<PG_LEVEL_2M)) &&
		    !(mr[i].page_size_mask & (1<<PG_LEVEL_2M))) {
			unsigned long start = round_down(mr[i].start, PMD_SIZE);
			unsigned long end = round_up(mr[i].end, PMD_SIZE);

#ifdef CONFIG_X86_32
			if ((end >> PAGE_SHIFT) > max_low_pfn)
				continue;
#endif

			if (memblock_is_region_memory(start, end - start))
				mr[i].page_size_mask |= 1<<PG_LEVEL_2M;
		}
		if ((page_size_mask & (1<<PG_LEVEL_1G)) &&
		    !(mr[i].page_size_mask & (1<<PG_LEVEL_1G))) {
			unsigned long start = round_down(mr[i].start, PUD_SIZE);
			unsigned long end = round_up(mr[i].end, PUD_SIZE);

			if (memblock_is_region_memory(start, end - start))
				mr[i].page_size_mask |= 1<<PG_LEVEL_1G;
		}
	}
}

241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265
static const char *page_size_string(struct map_range *mr)
{
	static const char str_1g[] = "1G";
	static const char str_2m[] = "2M";
	static const char str_4m[] = "4M";
	static const char str_4k[] = "4k";

	if (mr->page_size_mask & (1<<PG_LEVEL_1G))
		return str_1g;
	/*
	 * 32-bit without PAE has a 4M large page size.
	 * PG_LEVEL_2M is misnamed, but we can at least
	 * print out the right size in the string.
	 */
	if (IS_ENABLED(CONFIG_X86_32) &&
	    !IS_ENABLED(CONFIG_X86_PAE) &&
	    mr->page_size_mask & (1<<PG_LEVEL_2M))
		return str_4m;

	if (mr->page_size_mask & (1<<PG_LEVEL_2M))
		return str_2m;

	return str_4k;
}

266 267 268
static int __meminit split_mem_range(struct map_range *mr, int nr_range,
				     unsigned long start,
				     unsigned long end)
269
{
Y
Yinghai Lu 已提交
270
	unsigned long start_pfn, end_pfn, limit_pfn;
271
	unsigned long pfn;
272
	int i;
273

Y
Yinghai Lu 已提交
274 275
	limit_pfn = PFN_DOWN(end);

276
	/* head if not big page alignment ? */
277
	pfn = start_pfn = PFN_DOWN(start);
278 279 280 281 282 283 284
#ifdef CONFIG_X86_32
	/*
	 * Don't use a large page for the first 2/4MB of memory
	 * because there are often fixed size MTRRs in there
	 * and overlapping MTRRs into large pages can cause
	 * slowdowns.
	 */
285
	if (pfn == 0)
286
		end_pfn = PFN_DOWN(PMD_SIZE);
287
	else
288
		end_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
289
#else /* CONFIG_X86_64 */
290
	end_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
291
#endif
Y
Yinghai Lu 已提交
292 293
	if (end_pfn > limit_pfn)
		end_pfn = limit_pfn;
294 295
	if (start_pfn < end_pfn) {
		nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
296
		pfn = end_pfn;
297 298 299
	}

	/* big page (2M) range */
300
	start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
301
#ifdef CONFIG_X86_32
Y
Yinghai Lu 已提交
302
	end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE));
303
#else /* CONFIG_X86_64 */
304
	end_pfn = round_up(pfn, PFN_DOWN(PUD_SIZE));
Y
Yinghai Lu 已提交
305 306
	if (end_pfn > round_down(limit_pfn, PFN_DOWN(PMD_SIZE)))
		end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE));
307 308 309 310 311
#endif

	if (start_pfn < end_pfn) {
		nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
				page_size_mask & (1<<PG_LEVEL_2M));
312
		pfn = end_pfn;
313 314 315 316
	}

#ifdef CONFIG_X86_64
	/* big page (1G) range */
317
	start_pfn = round_up(pfn, PFN_DOWN(PUD_SIZE));
Y
Yinghai Lu 已提交
318
	end_pfn = round_down(limit_pfn, PFN_DOWN(PUD_SIZE));
319 320 321 322
	if (start_pfn < end_pfn) {
		nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
				page_size_mask &
				 ((1<<PG_LEVEL_2M)|(1<<PG_LEVEL_1G)));
323
		pfn = end_pfn;
324 325 326
	}

	/* tail is not big page (1G) alignment */
327
	start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
Y
Yinghai Lu 已提交
328
	end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE));
329 330 331
	if (start_pfn < end_pfn) {
		nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
				page_size_mask & (1<<PG_LEVEL_2M));
332
		pfn = end_pfn;
333 334 335 336
	}
#endif

	/* tail is not big page (2M) alignment */
337
	start_pfn = pfn;
Y
Yinghai Lu 已提交
338
	end_pfn = limit_pfn;
339 340
	nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);

341 342 343
	if (!after_bootmem)
		adjust_range_page_size_mask(mr, nr_range);

344 345 346 347 348 349 350 351 352 353 354 355 356 357 358
	/* try to merge same page size and continuous */
	for (i = 0; nr_range > 1 && i < nr_range - 1; i++) {
		unsigned long old_start;
		if (mr[i].end != mr[i+1].start ||
		    mr[i].page_size_mask != mr[i+1].page_size_mask)
			continue;
		/* move it */
		old_start = mr[i].start;
		memmove(&mr[i], &mr[i+1],
			(nr_range - 1 - i) * sizeof(struct map_range));
		mr[i--].start = old_start;
		nr_range--;
	}

	for (i = 0; i < nr_range; i++)
D
Dan Williams 已提交
359
		pr_debug(" [mem %#010lx-%#010lx] page %s\n",
360
				mr[i].start, mr[i].end - 1,
361
				page_size_string(&mr[i]));
362

363 364 365
	return nr_range;
}

366 367
struct range pfn_mapped[E820_X_MAX];
int nr_pfn_mapped;
368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393

static void add_pfn_range_mapped(unsigned long start_pfn, unsigned long end_pfn)
{
	nr_pfn_mapped = add_range_with_merge(pfn_mapped, E820_X_MAX,
					     nr_pfn_mapped, start_pfn, end_pfn);
	nr_pfn_mapped = clean_sort_range(pfn_mapped, E820_X_MAX);

	max_pfn_mapped = max(max_pfn_mapped, end_pfn);

	if (start_pfn < (1UL<<(32-PAGE_SHIFT)))
		max_low_pfn_mapped = max(max_low_pfn_mapped,
					 min(end_pfn, 1UL<<(32-PAGE_SHIFT)));
}

bool pfn_range_is_mapped(unsigned long start_pfn, unsigned long end_pfn)
{
	int i;

	for (i = 0; i < nr_pfn_mapped; i++)
		if ((start_pfn >= pfn_mapped[i].start) &&
		    (end_pfn <= pfn_mapped[i].end))
			return true;

	return false;
}

394 395 396 397 398
/*
 * Setup the direct mapping of the physical memory at PAGE_OFFSET.
 * This runs before bootmem is initialized and gets pages directly from
 * the physical memory. To access them they are temporarily mapped.
 */
399
unsigned long __ref init_memory_mapping(unsigned long start,
400 401 402 403 404 405
					       unsigned long end)
{
	struct map_range mr[NR_RANGE_MR];
	unsigned long ret = 0;
	int nr_range, i;

D
Dan Williams 已提交
406
	pr_debug("init_memory_mapping: [mem %#010lx-%#010lx]\n",
407 408 409 410 411
	       start, end - 1);

	memset(mr, 0, sizeof(mr));
	nr_range = split_mem_range(mr, 0, start, end);

412 413 414 415
	for (i = 0; i < nr_range; i++)
		ret = kernel_physical_mapping_init(mr[i].start, mr[i].end,
						   mr[i].page_size_mask);

416 417
	add_pfn_range_mapped(start >> PAGE_SHIFT, ret >> PAGE_SHIFT);

418 419 420
	return ret >> PAGE_SHIFT;
}

421
/*
422 423 424 425 426 427 428 429 430 431 432
 * We need to iterate through the E820 memory map and create direct mappings
 * for only E820_RAM and E820_KERN_RESERVED regions. We cannot simply
 * create direct mappings for all pfns from [0 to max_low_pfn) and
 * [4GB to max_pfn) because of possible memory holes in high addresses
 * that cannot be marked as UC by fixed/variable range MTRRs.
 * Depending on the alignment of E820 ranges, this may possibly result
 * in using smaller size (i.e. 4K instead of 2M or 1G) page tables.
 *
 * init_mem_mapping() calls init_range_memory_mapping() with big range.
 * That range would have hole in the middle or ends, and only ram parts
 * will be mapped in init_range_memory_mapping().
433
 */
Y
Yinghai Lu 已提交
434
static unsigned long __init init_range_memory_mapping(
435 436
					   unsigned long r_start,
					   unsigned long r_end)
437 438
{
	unsigned long start_pfn, end_pfn;
Y
Yinghai Lu 已提交
439
	unsigned long mapped_ram_size = 0;
440 441 442
	int i;

	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) {
443 444 445
		u64 start = clamp_val(PFN_PHYS(start_pfn), r_start, r_end);
		u64 end = clamp_val(PFN_PHYS(end_pfn), r_start, r_end);
		if (start >= end)
446 447
			continue;

448 449 450 451 452 453
		/*
		 * if it is overlapping with brk pgt, we need to
		 * alloc pgt buf from memblock instead.
		 */
		can_use_brk_pgt = max(start, (u64)pgt_buf_end<<PAGE_SHIFT) >=
				    min(end, (u64)pgt_buf_top<<PAGE_SHIFT);
454
		init_memory_mapping(start, end);
Y
Yinghai Lu 已提交
455
		mapped_ram_size += end - start;
456
		can_use_brk_pgt = true;
457
	}
Y
Yinghai Lu 已提交
458 459

	return mapped_ram_size;
460 461
}

462 463 464
static unsigned long __init get_new_step_size(unsigned long step_size)
{
	/*
465
	 * Initial mapped size is PMD_SIZE (2M).
466 467 468
	 * We can not set step_size to be PUD_SIZE (1G) yet.
	 * In worse case, when we cross the 1G boundary, and
	 * PG_LEVEL_2M is not set, we will need 1+1+512 pages (2M + 8k)
469 470
	 * to map 1G range with PTE. Hence we use one less than the
	 * difference of page table level shifts.
471
	 *
472 473 474 475 476
	 * Don't need to worry about overflow in the top-down case, on 32bit,
	 * when step_size is 0, round_down() returns 0 for start, and that
	 * turns it into 0x100000000ULL.
	 * In the bottom-up case, round_up(x, 0) returns 0 though too, which
	 * needs to be taken into consideration by the code below.
477
	 */
478
	return step_size << (PMD_SHIFT - PAGE_SHIFT - 1);
479 480
}

481 482 483 484 485 486 487 488 489 490 491 492
/**
 * memory_map_top_down - Map [map_start, map_end) top down
 * @map_start: start address of the target memory range
 * @map_end: end address of the target memory range
 *
 * This function will setup direct mapping for memory range
 * [map_start, map_end) in top-down. That said, the page tables
 * will be allocated at the end of the memory, and we map the
 * memory in top-down.
 */
static void __init memory_map_top_down(unsigned long map_start,
				       unsigned long map_end)
493
{
494
	unsigned long real_end, start, last_start;
Y
Yinghai Lu 已提交
495 496 497
	unsigned long step_size;
	unsigned long addr;
	unsigned long mapped_ram_size = 0;
498

499
	/* xen has big range in reserved near end of ram, skip it at first.*/
500
	addr = memblock_find_in_range(map_start, map_end, PMD_SIZE, PMD_SIZE);
Y
Yinghai Lu 已提交
501 502 503 504 505 506 507
	real_end = addr + PMD_SIZE;

	/* step_size need to be small so pgt_buf from BRK could cover it */
	step_size = PMD_SIZE;
	max_pfn_mapped = 0; /* will get exact value next */
	min_pfn_mapped = real_end >> PAGE_SHIFT;
	last_start = start = real_end;
508 509 510 511 512 513 514

	/*
	 * We start from the top (end of memory) and go to the bottom.
	 * The memblock_find_in_range() gets us a block of RAM from the
	 * end of RAM in [min_pfn_mapped, max_pfn_mapped) used as new pages
	 * for page table.
	 */
515
	while (last_start > map_start) {
Y
Yinghai Lu 已提交
516 517
		if (last_start > step_size) {
			start = round_down(last_start - 1, step_size);
518 519
			if (start < map_start)
				start = map_start;
Y
Yinghai Lu 已提交
520
		} else
521
			start = map_start;
522
		mapped_ram_size += init_range_memory_mapping(start,
Y
Yinghai Lu 已提交
523 524 525
							last_start);
		last_start = start;
		min_pfn_mapped = last_start >> PAGE_SHIFT;
526
		if (mapped_ram_size >= step_size)
527
			step_size = get_new_step_size(step_size);
Y
Yinghai Lu 已提交
528 529
	}

530 531 532 533
	if (real_end < map_end)
		init_range_memory_mapping(real_end, map_end);
}

534 535 536 537 538 539 540 541 542 543 544 545 546 547
/**
 * memory_map_bottom_up - Map [map_start, map_end) bottom up
 * @map_start: start address of the target memory range
 * @map_end: end address of the target memory range
 *
 * This function will setup direct mapping for memory range
 * [map_start, map_end) in bottom-up. Since we have limited the
 * bottom-up allocation above the kernel, the page tables will
 * be allocated just above the kernel and we map the memory
 * in [map_start, map_end) in bottom-up.
 */
static void __init memory_map_bottom_up(unsigned long map_start,
					unsigned long map_end)
{
548
	unsigned long next, start;
549 550 551 552 553 554 555 556 557 558 559 560 561 562
	unsigned long mapped_ram_size = 0;
	/* step_size need to be small so pgt_buf from BRK could cover it */
	unsigned long step_size = PMD_SIZE;

	start = map_start;
	min_pfn_mapped = start >> PAGE_SHIFT;

	/*
	 * We start from the bottom (@map_start) and go to the top (@map_end).
	 * The memblock_find_in_range() gets us a block of RAM from the
	 * end of RAM in [min_pfn_mapped, max_pfn_mapped) used as new pages
	 * for page table.
	 */
	while (start < map_end) {
563
		if (step_size && map_end - start > step_size) {
564 565 566
			next = round_up(start + 1, step_size);
			if (next > map_end)
				next = map_end;
567
		} else {
568
			next = map_end;
569
		}
570

571
		mapped_ram_size += init_range_memory_mapping(start, next);
572 573
		start = next;

574
		if (mapped_ram_size >= step_size)
575 576 577 578
			step_size = get_new_step_size(step_size);
	}
}

579 580 581 582 583 584 585 586 587 588 589 590 591 592 593
void __init init_mem_mapping(void)
{
	unsigned long end;

	probe_page_size_mask();

#ifdef CONFIG_X86_64
	end = max_pfn << PAGE_SHIFT;
#else
	end = max_low_pfn << PAGE_SHIFT;
#endif

	/* the ISA range is always mapped regardless of memory holes */
	init_memory_mapping(0, ISA_END_ADDRESS);

594 595 596
	/* Init the trampoline, possibly with KASLR memory offset */
	init_trampoline();

597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615
	/*
	 * If the allocation is in bottom-up direction, we setup direct mapping
	 * in bottom-up, otherwise we setup direct mapping in top-down.
	 */
	if (memblock_bottom_up()) {
		unsigned long kernel_end = __pa_symbol(_end);

		/*
		 * we need two separate calls here. This is because we want to
		 * allocate page tables above the kernel. So we first map
		 * [kernel_end, end) to make memory above the kernel be mapped
		 * as soon as possible. And then use page tables allocated above
		 * the kernel to map [ISA_END_ADDRESS, kernel_end).
		 */
		memory_map_bottom_up(kernel_end, end);
		memory_map_bottom_up(ISA_END_ADDRESS, kernel_end);
	} else {
		memory_map_top_down(ISA_END_ADDRESS, end);
	}
Y
Yinghai Lu 已提交
616

617 618 619 620 621
#ifdef CONFIG_X86_64
	if (max_pfn > max_low_pfn) {
		/* can we preseve max_low_pfn ?*/
		max_low_pfn = max_pfn;
	}
622 623
#else
	early_ioremap_page_table_range_init();
624 625
#endif

626 627 628
	load_cr3(swapper_pg_dir);
	__flush_tlb_all();

629
	early_memtest(0, max_pfn_mapped << PAGE_SHIFT);
630
}
631

632 633 634 635 636 637
/*
 * devmem_is_allowed() checks to see if /dev/mem access to a certain address
 * is valid. The argument is a physical page number.
 *
 *
 * On x86, access has to be given to the first megabyte of ram because that area
638
 * contains BIOS code and data regions used by X and dosemu and similar apps.
639 640 641 642 643
 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
 * mmio resources as well as potential bios/acpi data regions.
 */
int devmem_is_allowed(unsigned long pagenr)
{
644
	if (pagenr < 256)
645 646 647 648 649 650 651 652
		return 1;
	if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
		return 0;
	if (!page_is_ram(pagenr))
		return 1;
	return 0;
}

653 654
void free_init_pages(char *what, unsigned long begin, unsigned long end)
{
655
	unsigned long begin_aligned, end_aligned;
656

657 658 659 660 661 662 663 664 665 666
	/* Make sure boundaries are page aligned */
	begin_aligned = PAGE_ALIGN(begin);
	end_aligned   = end & PAGE_MASK;

	if (WARN_ON(begin_aligned != begin || end_aligned != end)) {
		begin = begin_aligned;
		end   = end_aligned;
	}

	if (begin >= end)
667 668 669 670 671 672 673
		return;

	/*
	 * If debugging page accesses then do not free this memory but
	 * mark them not present - any buggy init-section access will
	 * create a kernel page fault:
	 */
674 675 676 677 678 679 680 681 682 683 684 685
	if (debug_pagealloc_enabled()) {
		pr_info("debug: unmapping init [mem %#010lx-%#010lx]\n",
			begin, end - 1);
		set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
	} else {
		/*
		 * We just marked the kernel text read only above, now that
		 * we are going to free part of that, we need to make that
		 * writeable and non-executable first.
		 */
		set_memory_nx(begin, (end - begin) >> PAGE_SHIFT);
		set_memory_rw(begin, (end - begin) >> PAGE_SHIFT);
686

687 688 689
		free_reserved_area((void *)begin, (void *)end,
				   POISON_FREE_INITMEM, what);
	}
690 691 692 693
}

void free_initmem(void)
{
694
	free_init_pages("unused kernel",
695 696 697
			(unsigned long)(&__init_begin),
			(unsigned long)(&__init_end));
}
698 699

#ifdef CONFIG_BLK_DEV_INITRD
700
void __init free_initrd_mem(unsigned long start, unsigned long end)
701
{
702 703 704 705 706 707 708 709 710
	/*
	 * end could be not aligned, and We can not align that,
	 * decompresser could be confused by aligned initrd_end
	 * We already reserve the end partial page before in
	 *   - i386_start_kernel()
	 *   - x86_64_start_kernel()
	 *   - relocate_initrd()
	 * So here We can do PAGE_ALIGN() safely to get partial page to be freed
	 */
711
	free_init_pages("initrd", start, PAGE_ALIGN(end));
712 713
}
#endif
P
Pekka Enberg 已提交
714 715 716 717 718 719 720 721

void __init zone_sizes_init(void)
{
	unsigned long max_zone_pfns[MAX_NR_ZONES];

	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));

#ifdef CONFIG_ZONE_DMA
X
Xishi Qiu 已提交
722
	max_zone_pfns[ZONE_DMA]		= min(MAX_DMA_PFN, max_low_pfn);
P
Pekka Enberg 已提交
723 724
#endif
#ifdef CONFIG_ZONE_DMA32
X
Xishi Qiu 已提交
725
	max_zone_pfns[ZONE_DMA32]	= min(MAX_DMA32_PFN, max_low_pfn);
P
Pekka Enberg 已提交
726 727 728 729 730 731 732 733 734
#endif
	max_zone_pfns[ZONE_NORMAL]	= max_low_pfn;
#ifdef CONFIG_HIGHMEM
	max_zone_pfns[ZONE_HIGHMEM]	= max_pfn;
#endif

	free_area_init_nodes(max_zone_pfns);
}

735 736 737 738 739 740 741 742 743
DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = {
#ifdef CONFIG_SMP
	.active_mm = &init_mm,
	.state = 0,
#endif
	.cr4 = ~0UL,	/* fail hard if we screw up cr4 shadow initialization */
};
EXPORT_SYMBOL_GPL(cpu_tlbstate);

744 745 746 747 748 749 750 751
void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache)
{
	/* entry 0 MUST be WB (hardwired to speed up translations) */
	BUG_ON(!entry && cache != _PAGE_CACHE_MODE_WB);

	__cachemode2pte_tbl[cache] = __cm_idx2pte(entry);
	__pte2cachemode_tbl[entry] = cache;
}