init_32.c 27.0 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
/*
 *
 *  Copyright (C) 1995  Linus Torvalds
 *
 *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
 */

#include <linux/module.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/swap.h>
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
24
#include <linux/pci.h>
25
#include <linux/pfn.h>
26
#include <linux/poison.h>
L
Linus Torvalds 已提交
27 28 29
#include <linux/bootmem.h>
#include <linux/slab.h>
#include <linux/proc_fs.h>
30
#include <linux/memory_hotplug.h>
31
#include <linux/initrd.h>
32
#include <linux/cpumask.h>
L
Linus Torvalds 已提交
33

34
#include <asm/asm.h>
35
#include <asm/bios_ebda.h>
L
Linus Torvalds 已提交
36 37 38 39 40 41 42 43
#include <asm/processor.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/dma.h>
#include <asm/fixmap.h>
#include <asm/e820.h>
#include <asm/apic.h>
I
Ingo Molnar 已提交
44
#include <asm/bugs.h>
L
Linus Torvalds 已提交
45 46
#include <asm/tlb.h>
#include <asm/tlbflush.h>
47
#include <asm/pgalloc.h>
L
Linus Torvalds 已提交
48
#include <asm/sections.h>
49
#include <asm/paravirt.h>
50
#include <asm/setup.h>
51
#include <asm/cacheflush.h>
52
#include <asm/init.h>
L
Linus Torvalds 已提交
53

54
unsigned long max_low_pfn_mapped;
55
unsigned long max_pfn_mapped;
56

L
Linus Torvalds 已提交
57 58 59
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
unsigned long highstart_pfn, highend_pfn;

I
Ingo Molnar 已提交
60
static noinline int do_test_wp_bit(void);
L
Linus Torvalds 已提交
61

62
static __init void *alloc_low_page(void)
63
{
64
	unsigned long pfn = e820_table_end++;
65 66
	void *adr;

67
	if (pfn >= e820_table_top)
68 69 70 71 72 73 74
		panic("alloc_low_page: ran out of memory");

	adr = __va(pfn * PAGE_SIZE);
	memset(adr, 0, PAGE_SIZE);
	return adr;
}

L
Linus Torvalds 已提交
75 76 77 78 79 80 81 82 83
/*
 * Creates a middle page table and puts a pointer to it in the
 * given global directory entry. This only returns the gd entry
 * in non-PAE compilation mode, since the middle layer is folded.
 */
static pmd_t * __init one_md_table_init(pgd_t *pgd)
{
	pud_t *pud;
	pmd_t *pmd_table;
I
Ingo Molnar 已提交
84

L
Linus Torvalds 已提交
85
#ifdef CONFIG_X86_PAE
86
	if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
87
		if (after_bootmem)
88 89
			pmd_table = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE);
		else
90
			pmd_table = (pmd_t *)alloc_low_page();
91
		paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
92 93
		set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
		pud = pud_offset(pgd, 0);
I
Ingo Molnar 已提交
94
		BUG_ON(pmd_table != pmd_offset(pud, 0));
95 96

		return pmd_table;
97 98
	}
#endif
L
Linus Torvalds 已提交
99 100
	pud = pud_offset(pgd, 0);
	pmd_table = pmd_offset(pud, 0);
I
Ingo Molnar 已提交
101

L
Linus Torvalds 已提交
102 103 104 105 106
	return pmd_table;
}

/*
 * Create a page table and place a pointer to it in a middle page
I
Ingo Molnar 已提交
107
 * directory entry:
L
Linus Torvalds 已提交
108 109 110
 */
static pte_t * __init one_page_table_init(pmd_t *pmd)
{
111
	if (!(pmd_val(*pmd) & _PAGE_PRESENT)) {
112 113
		pte_t *page_table = NULL;

114
		if (after_bootmem) {
115
#ifdef CONFIG_DEBUG_PAGEALLOC
116
			page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
117
#endif
118 119
			if (!page_table)
				page_table =
120
				(pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
121 122
		} else
			page_table = (pte_t *)alloc_low_page();
123

124
		paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
L
Linus Torvalds 已提交
125
		set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
126
		BUG_ON(page_table != pte_offset_kernel(pmd, 0));
L
Linus Torvalds 已提交
127
	}
128

L
Linus Torvalds 已提交
129 130 131
	return pte_offset_kernel(pmd, 0);
}

132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147
static pte_t *__init page_table_kmap_check(pte_t *pte, pmd_t *pmd,
					   unsigned long vaddr, pte_t *lastpte)
{
#ifdef CONFIG_HIGHMEM
	/*
	 * Something (early fixmap) may already have put a pte
	 * page here, which causes the page table allocation
	 * to become nonlinear. Attempt to fix it, and if it
	 * is still nonlinear then we have to bug.
	 */
	int pmd_idx_kmap_begin = fix_to_virt(FIX_KMAP_END) >> PMD_SHIFT;
	int pmd_idx_kmap_end = fix_to_virt(FIX_KMAP_BEGIN) >> PMD_SHIFT;

	if (pmd_idx_kmap_begin != pmd_idx_kmap_end
	    && (vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin
	    && (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end
148 149
	    && ((__pa(pte) >> PAGE_SHIFT) < e820_table_start
		|| (__pa(pte) >> PAGE_SHIFT) >= e820_table_end)) {
150 151 152
		pte_t *newpte;
		int i;

153
		BUG_ON(after_bootmem);
154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172
		newpte = alloc_low_page();
		for (i = 0; i < PTRS_PER_PTE; i++)
			set_pte(newpte + i, pte[i]);

		paravirt_alloc_pte(&init_mm, __pa(newpte) >> PAGE_SHIFT);
		set_pmd(pmd, __pmd(__pa(newpte)|_PAGE_TABLE));
		BUG_ON(newpte != pte_offset_kernel(pmd, 0));
		__flush_tlb_all();

		paravirt_release_pte(__pa(pte) >> PAGE_SHIFT);
		pte = newpte;
	}
	BUG_ON(vaddr < fix_to_virt(FIX_KMAP_BEGIN - 1)
	       && vaddr > fix_to_virt(FIX_KMAP_END)
	       && lastpte && lastpte + PTRS_PER_PTE != pte);
#endif
	return pte;
}

L
Linus Torvalds 已提交
173
/*
I
Ingo Molnar 已提交
174
 * This function initializes a certain range of kernel virtual memory
L
Linus Torvalds 已提交
175 176
 * with new bootmem page tables, everywhere page tables are missing in
 * the given range.
I
Ingo Molnar 已提交
177 178 179
 *
 * NOTE: The pagetables are allocated contiguous on the physical space
 * so we can cache the place of the first one and move around without
L
Linus Torvalds 已提交
180 181
 * checking the pgd every time.
 */
I
Ingo Molnar 已提交
182 183
static void __init
page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
L
Linus Torvalds 已提交
184 185 186
{
	int pgd_idx, pmd_idx;
	unsigned long vaddr;
I
Ingo Molnar 已提交
187 188
	pgd_t *pgd;
	pmd_t *pmd;
189
	pte_t *pte = NULL;
L
Linus Torvalds 已提交
190 191 192 193 194 195 196

	vaddr = start;
	pgd_idx = pgd_index(vaddr);
	pmd_idx = pmd_index(vaddr);
	pgd = pgd_base + pgd_idx;

	for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
197 198
		pmd = one_md_table_init(pgd);
		pmd = pmd + pmd_index(vaddr);
I
Ingo Molnar 已提交
199 200
		for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
							pmd++, pmd_idx++) {
201 202
			pte = page_table_kmap_check(one_page_table_init(pmd),
			                            pmd, vaddr, pte);
L
Linus Torvalds 已提交
203 204 205 206 207 208 209 210 211 212 213 214 215 216 217

			vaddr += PMD_SIZE;
		}
		pmd_idx = 0;
	}
}

static inline int is_kernel_text(unsigned long addr)
{
	if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
		return 1;
	return 0;
}

/*
I
Ingo Molnar 已提交
218 219 220
 * This maps the physical memory to kernel virtual address space, a total
 * of max_low_pfn pages, by creating page tables starting from address
 * PAGE_OFFSET:
L
Linus Torvalds 已提交
221
 */
222 223 224 225
unsigned long __init
kernel_physical_mapping_init(unsigned long start,
			     unsigned long end,
			     unsigned long page_size_mask)
L
Linus Torvalds 已提交
226
{
227 228
	int use_pse = page_size_mask == (1<<PG_LEVEL_2M);
	unsigned long start_pfn, end_pfn;
229
	pgd_t *pgd_base = swapper_pg_dir;
I
Ingo Molnar 已提交
230
	int pgd_idx, pmd_idx, pte_ofs;
L
Linus Torvalds 已提交
231 232 233 234
	unsigned long pfn;
	pgd_t *pgd;
	pmd_t *pmd;
	pte_t *pte;
235 236 237
	unsigned pages_2m, pages_4k;
	int mapping_iter;

238 239 240
	start_pfn = start >> PAGE_SHIFT;
	end_pfn = end >> PAGE_SHIFT;

241 242 243 244 245 246 247 248 249 250 251 252 253 254 255
	/*
	 * First iteration will setup identity mapping using large/small pages
	 * based on use_pse, with other attributes same as set by
	 * the early code in head_32.S
	 *
	 * Second iteration will setup the appropriate attributes (NX, GLOBAL..)
	 * as desired for the kernel identity mapping.
	 *
	 * This two pass mechanism conforms to the TLB app note which says:
	 *
	 *     "Software should not write to a paging-structure entry in a way
	 *      that would change, for any linear address, both the page size
	 *      and either the page frame or attributes."
	 */
	mapping_iter = 1;
L
Linus Torvalds 已提交
256

257 258
	if (!cpu_has_pse)
		use_pse = 0;
L
Linus Torvalds 已提交
259

260 261
repeat:
	pages_2m = pages_4k = 0;
262 263 264
	pfn = start_pfn;
	pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
	pgd = pgd_base + pgd_idx;
L
Linus Torvalds 已提交
265 266
	for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
		pmd = one_md_table_init(pgd);
I
Ingo Molnar 已提交
267

268 269 270 271 272 273 274 275 276
		if (pfn >= end_pfn)
			continue;
#ifdef CONFIG_X86_PAE
		pmd_idx = pmd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
		pmd += pmd_idx;
#else
		pmd_idx = 0;
#endif
		for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
J
Jeremy Fitzhardinge 已提交
277
		     pmd++, pmd_idx++) {
I
Ingo Molnar 已提交
278
			unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
L
Linus Torvalds 已提交
279

I
Ingo Molnar 已提交
280 281 282 283
			/*
			 * Map with big pages if possible, otherwise
			 * create normal page tables:
			 */
284
			if (use_pse) {
I
Ingo Molnar 已提交
285
				unsigned int addr2;
J
Jeremy Fitzhardinge 已提交
286
				pgprot_t prot = PAGE_KERNEL_LARGE;
287 288 289 290 291 292 293
				/*
				 * first pass will use the same initial
				 * identity mapping attribute + _PAGE_PSE.
				 */
				pgprot_t init_prot =
					__pgprot(PTE_IDENT_ATTR |
						 _PAGE_PSE);
J
Jeremy Fitzhardinge 已提交
294

I
Ingo Molnar 已提交
295
				addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
J
Jeremy Fitzhardinge 已提交
296 297
					PAGE_OFFSET + PAGE_SIZE-1;

I
Ingo Molnar 已提交
298 299
				if (is_kernel_text(addr) ||
				    is_kernel_text(addr2))
J
Jeremy Fitzhardinge 已提交
300 301
					prot = PAGE_KERNEL_LARGE_EXEC;

302
				pages_2m++;
303 304 305 306
				if (mapping_iter == 1)
					set_pmd(pmd, pfn_pmd(pfn, init_prot));
				else
					set_pmd(pmd, pfn_pmd(pfn, prot));
307

L
Linus Torvalds 已提交
308
				pfn += PTRS_PER_PTE;
I
Ingo Molnar 已提交
309 310 311
				continue;
			}
			pte = one_page_table_init(pmd);
L
Linus Torvalds 已提交
312

313 314 315
			pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
			pte += pte_ofs;
			for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
I
Ingo Molnar 已提交
316 317
			     pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
				pgprot_t prot = PAGE_KERNEL;
318 319 320 321 322
				/*
				 * first pass will use the same initial
				 * identity mapping attribute.
				 */
				pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
J
Jeremy Fitzhardinge 已提交
323

I
Ingo Molnar 已提交
324 325
				if (is_kernel_text(addr))
					prot = PAGE_KERNEL_EXEC;
J
Jeremy Fitzhardinge 已提交
326

327
				pages_4k++;
328 329 330 331
				if (mapping_iter == 1)
					set_pte(pte, pfn_pte(pfn, init_prot));
				else
					set_pte(pte, pfn_pte(pfn, prot));
L
Linus Torvalds 已提交
332 333 334
			}
		}
	}
335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354
	if (mapping_iter == 1) {
		/*
		 * update direct mapping page count only in the first
		 * iteration.
		 */
		update_page_count(PG_LEVEL_2M, pages_2m);
		update_page_count(PG_LEVEL_4K, pages_4k);

		/*
		 * local global flush tlb, which will flush the previous
		 * mappings present in both small and large page TLB's.
		 */
		__flush_tlb_all();

		/*
		 * Second iteration will set the actual desired PTE attributes.
		 */
		mapping_iter = 2;
		goto repeat;
	}
355
	return 0;
L
Linus Torvalds 已提交
356 357 358 359 360
}

pte_t *kmap_pte;
pgprot_t kmap_prot;

I
Ingo Molnar 已提交
361 362 363 364 365
static inline pte_t *kmap_get_fixmap_pte(unsigned long vaddr)
{
	return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
			vaddr), vaddr), vaddr);
}
L
Linus Torvalds 已提交
366 367 368 369 370

static void __init kmap_init(void)
{
	unsigned long kmap_vstart;

I
Ingo Molnar 已提交
371 372 373
	/*
	 * Cache the first kmap pte:
	 */
L
Linus Torvalds 已提交
374 375 376 377 378 379
	kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
	kmap_pte = kmap_get_fixmap_pte(kmap_vstart);

	kmap_prot = PAGE_KERNEL;
}

380
#ifdef CONFIG_HIGHMEM
L
Linus Torvalds 已提交
381 382
static void __init permanent_kmaps_init(pgd_t *pgd_base)
{
I
Ingo Molnar 已提交
383
	unsigned long vaddr;
L
Linus Torvalds 已提交
384 385 386 387 388 389 390 391 392 393 394 395
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;

	vaddr = PKMAP_BASE;
	page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);

	pgd = swapper_pg_dir + pgd_index(vaddr);
	pud = pud_offset(pgd, vaddr);
	pmd = pmd_offset(pud, vaddr);
	pte = pte_offset_kernel(pmd, vaddr);
I
Ingo Molnar 已提交
396
	pkmap_page_table = pte;
L
Linus Torvalds 已提交
397 398
}

Y
Yinghai Lu 已提交
399
static void __init add_one_highpage_init(struct page *page, int pfn)
L
Linus Torvalds 已提交
400
{
Y
Yinghai Lu 已提交
401 402 403 404
	ClearPageReserved(page);
	init_page_count(page);
	__free_page(page);
	totalhigh_pages++;
L
Linus Torvalds 已提交
405 406
}

407 408 409 410 411
struct add_highpages_data {
	unsigned long start_pfn;
	unsigned long end_pfn;
};

412
static int __init add_highpages_work_fn(unsigned long start_pfn,
413
					 unsigned long end_pfn, void *datax)
L
Linus Torvalds 已提交
414
{
415 416 417 418
	int node_pfn;
	struct page *page;
	unsigned long final_start_pfn, final_end_pfn;
	struct add_highpages_data *data;
I
Ingo Molnar 已提交
419

420 421 422 423 424
	data = (struct add_highpages_data *)datax;

	final_start_pfn = max(start_pfn, data->start_pfn);
	final_end_pfn = min(end_pfn, data->end_pfn);
	if (final_start_pfn >= final_end_pfn)
425
		return 0;
426 427 428 429 430 431

	for (node_pfn = final_start_pfn; node_pfn < final_end_pfn;
	     node_pfn++) {
		if (!pfn_valid(node_pfn))
			continue;
		page = pfn_to_page(node_pfn);
Y
Yinghai Lu 已提交
432
		add_one_highpage_init(page, node_pfn);
433
	}
434

435 436
	return 0;

437 438 439
}

void __init add_highpages_with_active_regions(int nid, unsigned long start_pfn,
Y
Yinghai Lu 已提交
440
					      unsigned long end_pfn)
441 442 443 444 445 446 447 448 449
{
	struct add_highpages_data data;

	data.start_pfn = start_pfn;
	data.end_pfn = end_pfn;

	work_with_active_regions(nid, add_highpages_work_fn, &data);
}

L
Linus Torvalds 已提交
450
#else
451 452 453
static inline void permanent_kmaps_init(pgd_t *pgd_base)
{
}
L
Linus Torvalds 已提交
454 455
#endif /* CONFIG_HIGHMEM */

456
void __init native_pagetable_setup_start(pgd_t *base)
L
Linus Torvalds 已提交
457
{
458 459 460 461 462
	unsigned long pfn, va;
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;
463 464

	/*
465 466
	 * Remove any mappings which extend past the end of physical
	 * memory from the boot time page table:
467
	 */
468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484
	for (pfn = max_low_pfn + 1; pfn < 1<<(32-PAGE_SHIFT); pfn++) {
		va = PAGE_OFFSET + (pfn<<PAGE_SHIFT);
		pgd = base + pgd_index(va);
		if (!pgd_present(*pgd))
			break;

		pud = pud_offset(pgd, va);
		pmd = pmd_offset(pud, va);
		if (!pmd_present(*pmd))
			break;

		pte = pte_offset_kernel(pmd, va);
		if (!pte_present(*pte))
			break;

		pte_clear(NULL, va, pte);
	}
485
	paravirt_alloc_pmd(&init_mm, __pa(base) >> PAGE_SHIFT);
486 487 488 489 490 491 492 493 494 495 496 497
}

void __init native_pagetable_setup_done(pgd_t *base)
{
}

/*
 * Build a proper pagetable for the kernel mappings.  Up until this
 * point, we've been running on some set of pagetables constructed by
 * the boot process.
 *
 * If we're booting on native hardware, this will be a pagetable
498 499
 * constructed in arch/x86/kernel/head_32.S.  The root of the
 * pagetable will be swapper_pg_dir.
500 501 502 503 504 505 506 507 508 509 510
 *
 * If we're booting paravirtualized under a hypervisor, then there are
 * more options: we may already be running PAE, and the pagetable may
 * or may not be based in swapper_pg_dir.  In any case,
 * paravirt_pagetable_setup_start() will set up swapper_pg_dir
 * appropriately for the rest of the initialization to work.
 *
 * In general, pagetable_init() assumes that the pagetable may already
 * be partially populated, and so it avoids stomping on any existing
 * mappings.
 */
511
void __init early_ioremap_page_table_range_init(void)
512
{
513
	pgd_t *pgd_base = swapper_pg_dir;
I
Ingo Molnar 已提交
514
	unsigned long vaddr, end;
515

L
Linus Torvalds 已提交
516 517 518 519 520
	/*
	 * Fixed mappings, only the page table structure has to be
	 * created - mappings will be set by set_fixmap():
	 */
	vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
521 522
	end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
	page_table_range_init(vaddr, end, pgd_base);
523
	early_ioremap_reset();
524 525 526 527 528 529
}

static void __init pagetable_init(void)
{
	pgd_t *pgd_base = swapper_pg_dir;

L
Linus Torvalds 已提交
530 531 532
	permanent_kmaps_init(pgd_base);
}

533
#ifdef CONFIG_ACPI_SLEEP
L
Linus Torvalds 已提交
534
/*
535
 * ACPI suspend needs this for resume, because things like the intel-agp
L
Linus Torvalds 已提交
536 537
 * driver might have split up a kernel 4MB mapping.
 */
538
char swsusp_pg_dir[PAGE_SIZE]
I
Ingo Molnar 已提交
539
	__attribute__ ((aligned(PAGE_SIZE)));
L
Linus Torvalds 已提交
540 541 542 543 544

static inline void save_pg_dir(void)
{
	memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
}
545
#else /* !CONFIG_ACPI_SLEEP */
L
Linus Torvalds 已提交
546 547 548
static inline void save_pg_dir(void)
{
}
549
#endif /* !CONFIG_ACPI_SLEEP */
L
Linus Torvalds 已提交
550

I
Ingo Molnar 已提交
551
void zap_low_mappings(void)
L
Linus Torvalds 已提交
552 553 554 555 556 557 558 559 560
{
	int i;

	/*
	 * Zap initial low-memory mappings.
	 *
	 * Note that "pgd_clear()" doesn't do it for
	 * us, because pgd_clear() is a no-op on i386.
	 */
J
Jeremy Fitzhardinge 已提交
561
	for (i = 0; i < KERNEL_PGD_BOUNDARY; i++) {
L
Linus Torvalds 已提交
562 563 564 565 566
#ifdef CONFIG_X86_PAE
		set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page)));
#else
		set_pgd(swapper_pg_dir+i, __pgd(0));
#endif
I
Ingo Molnar 已提交
567
	}
L
Linus Torvalds 已提交
568 569 570
	flush_tlb_all();
}

I
Ingo Molnar 已提交
571
int nx_enabled;
J
Jan Beulich 已提交
572

573
pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
574 575
EXPORT_SYMBOL_GPL(__supported_pte_mask);

J
Jan Beulich 已提交
576 577
#ifdef CONFIG_X86_PAE

I
Ingo Molnar 已提交
578
static int disable_nx __initdata;
L
Linus Torvalds 已提交
579 580 581 582 583 584 585 586 587

/*
 * noexec = on|off
 *
 * Control non executable mappings.
 *
 * on      Enable
 * off     Disable
 */
588
static int __init noexec_setup(char *str)
L
Linus Torvalds 已提交
589
{
590 591 592 593 594
	if (!str || !strcmp(str, "on")) {
		if (cpu_has_nx) {
			__supported_pte_mask |= _PAGE_NX;
			disable_nx = 0;
		}
I
Ingo Molnar 已提交
595 596 597 598 599 600 601 602
	} else {
		if (!strcmp(str, "off")) {
			disable_nx = 1;
			__supported_pte_mask &= ~_PAGE_NX;
		} else {
			return -EINVAL;
		}
	}
603 604

	return 0;
L
Linus Torvalds 已提交
605
}
606
early_param("noexec", noexec_setup);
L
Linus Torvalds 已提交
607 608 609 610 611 612 613

static void __init set_nx(void)
{
	unsigned int v[4], l, h;

	if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
		cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
I
Ingo Molnar 已提交
614

L
Linus Torvalds 已提交
615 616 617 618 619 620 621 622 623 624 625
		if ((v[3] & (1 << 20)) && !disable_nx) {
			rdmsr(MSR_EFER, l, h);
			l |= EFER_NX;
			wrmsr(MSR_EFER, l, h);
			nx_enabled = 1;
			__supported_pte_mask |= _PAGE_NX;
		}
	}
}
#endif

626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643
/* user-defined highmem size */
static unsigned int highmem_pages = -1;

/*
 * highmem=size forces highmem to be exactly 'size' bytes.
 * This works even on boxes that have no highmem otherwise.
 * This also works to reduce highmem size on bigger boxes.
 */
static int __init parse_highmem(char *arg)
{
	if (!arg)
		return -EINVAL;

	highmem_pages = memparse(arg, &arg) >> PAGE_SHIFT;
	return 0;
}
early_param("highmem", parse_highmem);

644 645 646 647 648
#define MSG_HIGHMEM_TOO_BIG \
	"highmem size (%luMB) is bigger than pages available (%luMB)!\n"

#define MSG_LOWMEM_TOO_SMALL \
	"highmem size (%luMB) results in <64MB lowmem, ignoring it!\n"
649
/*
650 651 652
 * All of RAM fits into lowmem - but if user wants highmem
 * artificially via the highmem=x boot parameter then create
 * it:
653
 */
654
void __init lowmem_pfn_init(void)
655
{
Y
Yinghai Lu 已提交
656
	/* max_low_pfn is 0, we already have early_res support */
657
	max_low_pfn = max_pfn;
658

659 660 661 662 663 664 665 666 667 668 669
	if (highmem_pages == -1)
		highmem_pages = 0;
#ifdef CONFIG_HIGHMEM
	if (highmem_pages >= max_pfn) {
		printk(KERN_ERR MSG_HIGHMEM_TOO_BIG,
			pages_to_mb(highmem_pages), pages_to_mb(max_pfn));
		highmem_pages = 0;
	}
	if (highmem_pages) {
		if (max_low_pfn - highmem_pages < 64*1024*1024/PAGE_SIZE) {
			printk(KERN_ERR MSG_LOWMEM_TOO_SMALL,
670 671 672
				pages_to_mb(highmem_pages));
			highmem_pages = 0;
		}
673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691
		max_low_pfn -= highmem_pages;
	}
#else
	if (highmem_pages)
		printk(KERN_ERR "ignoring highmem size on non-highmem kernel!\n");
#endif
}

#define MSG_HIGHMEM_TOO_SMALL \
	"only %luMB highmem pages available, ignoring highmem size of %luMB!\n"

#define MSG_HIGHMEM_TRIMMED \
	"Warning: only 4GB will be used. Use a HIGHMEM64G enabled kernel!\n"
/*
 * We have more RAM than fits into lowmem - we try to put it into
 * highmem, also taking the highmem=x boot parameter into account:
 */
void __init highmem_pfn_init(void)
{
692 693
	max_low_pfn = MAXMEM_PFN;

694 695 696 697 698 699 700 701 702 703 704 705
	if (highmem_pages == -1)
		highmem_pages = max_pfn - MAXMEM_PFN;

	if (highmem_pages + MAXMEM_PFN < max_pfn)
		max_pfn = MAXMEM_PFN + highmem_pages;

	if (highmem_pages + MAXMEM_PFN > max_pfn) {
		printk(KERN_WARNING MSG_HIGHMEM_TOO_SMALL,
			pages_to_mb(max_pfn - MAXMEM_PFN),
			pages_to_mb(highmem_pages));
		highmem_pages = 0;
	}
706
#ifndef CONFIG_HIGHMEM
707 708 709 710 711 712 713
	/* Maximum memory usable is what is directly addressable */
	printk(KERN_WARNING "Warning only %ldMB will be used.\n", MAXMEM>>20);
	if (max_pfn > MAX_NONPAE_PFN)
		printk(KERN_WARNING "Use a HIGHMEM64G enabled kernel.\n");
	else
		printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
	max_pfn = MAXMEM_PFN;
714 715
#else /* !CONFIG_HIGHMEM */
#ifndef CONFIG_HIGHMEM64G
716 717 718 719
	if (max_pfn > MAX_NONPAE_PFN) {
		max_pfn = MAX_NONPAE_PFN;
		printk(KERN_WARNING MSG_HIGHMEM_TRIMMED);
	}
720 721
#endif /* !CONFIG_HIGHMEM64G */
#endif /* !CONFIG_HIGHMEM */
722 723 724 725 726 727 728 729 730
}

/*
 * Determine low and high memory ranges:
 */
void __init find_low_pfn_range(void)
{
	/* it could update max_pfn */

731
	if (max_pfn <= MAXMEM_PFN)
732
		lowmem_pfn_init();
733 734
	else
		highmem_pfn_init();
735 736
}

737
#ifndef CONFIG_NEED_MULTIPLE_NODES
738
void __init initmem_init(unsigned long start_pfn,
739 740 741 742 743 744 745
				  unsigned long end_pfn)
{
#ifdef CONFIG_HIGHMEM
	highstart_pfn = highend_pfn = max_pfn;
	if (max_pfn > max_low_pfn)
		highstart_pfn = max_low_pfn;
	memory_present(0, 0, highend_pfn);
746
	e820_register_active_regions(0, 0, highend_pfn);
747 748 749 750 751 752
	printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
		pages_to_mb(highend_pfn - highstart_pfn));
	num_physpages = highend_pfn;
	high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
#else
	memory_present(0, 0, max_low_pfn);
753
	e820_register_active_regions(0, 0, max_low_pfn);
754 755 756 757 758 759 760 761 762 763 764
	num_physpages = max_low_pfn;
	high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
#endif
#ifdef CONFIG_FLATMEM
	max_mapnr = num_physpages;
#endif
	printk(KERN_NOTICE "%ldMB LOWMEM available.\n",
			pages_to_mb(max_low_pfn));

	setup_bootmem_allocator();
}
765
#endif /* !CONFIG_NEED_MULTIPLE_NODES */
766

767
static void __init zone_sizes_init(void)
768 769 770 771 772 773 774 775 776 777 778 779 780
{
	unsigned long max_zone_pfns[MAX_NR_ZONES];
	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
	max_zone_pfns[ZONE_DMA] =
		virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
#ifdef CONFIG_HIGHMEM
	max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
#endif

	free_area_init_nodes(max_zone_pfns);
}

781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806
static unsigned long __init setup_node_bootmem(int nodeid,
				 unsigned long start_pfn,
				 unsigned long end_pfn,
				 unsigned long bootmap)
{
	unsigned long bootmap_size;

	if (start_pfn > max_low_pfn)
		return bootmap;
	if (end_pfn > max_low_pfn)
		end_pfn = max_low_pfn;

	/* don't touch min_low_pfn */
	bootmap_size = init_bootmem_node(NODE_DATA(nodeid),
					 bootmap >> PAGE_SHIFT,
					 start_pfn, end_pfn);
	printk(KERN_INFO "  node %d low ram: %08lx - %08lx\n",
		nodeid, start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT);
	printk(KERN_INFO "  node %d bootmap %08lx - %08lx\n",
		 nodeid, bootmap, bootmap + bootmap_size);
	free_bootmem_with_active_regions(nodeid, end_pfn);
	early_res_to_bootmem(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT);

	return bootmap + bootmap_size;
}

807 808
void __init setup_bootmem_allocator(void)
{
809
	int nodeid;
810 811 812 813 814
	unsigned long bootmap_size, bootmap;
	/*
	 * Initialize the boot-time allocator (with low memory only):
	 */
	bootmap_size = bootmem_bootmap_pages(max_low_pfn)<<PAGE_SHIFT;
815
	bootmap = find_e820_area(0, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
816 817 818 819
				 PAGE_SIZE);
	if (bootmap == -1L)
		panic("Cannot find bootmem map of size %ld\n", bootmap_size);
	reserve_early(bootmap, bootmap + bootmap_size, "BOOTMAP");
Y
Yinghai Lu 已提交
820

821 822
	printk(KERN_INFO "  mapped low ram: 0 - %08lx\n",
		 max_pfn_mapped<<PAGE_SHIFT);
823
	printk(KERN_INFO "  low ram: 0 - %08lx\n", max_low_pfn<<PAGE_SHIFT);
824 825 826 827 828 829

#ifdef CONFIG_NEED_MULTIPLE_NODES
	for_each_online_node(nodeid)
		bootmap = setup_node_bootmem(nodeid, node_start_pfn[nodeid],
					node_end_pfn[nodeid], bootmap);
#else
830
	bootmap = setup_node_bootmem(0, 0, max_low_pfn, bootmap);
831
#endif
832

833
	after_bootmem = 1;
834 835
}

L
Linus Torvalds 已提交
836 837 838 839 840 841 842 843 844 845 846 847 848 849
/*
 * paging_init() sets up the page tables - note that the first 8MB are
 * already mapped by head.S.
 *
 * This routines also unmaps the page at virtual kernel address 0, so
 * that we can trap those pesky NULL-reference errors in the kernel.
 */
void __init paging_init(void)
{
	pagetable_init();

	__flush_tlb_all();

	kmap_init();
850 851 852 853 854 855

	/*
	 * NOTE: at this point the bootmem allocator is fully available.
	 */
	sparse_init();
	zone_sizes_init();
L
Linus Torvalds 已提交
856 857 858 859
}

/*
 * Test if the WP bit works in supervisor mode. It isn't supported on 386's
D
Dmitri Vorobiev 已提交
860 861 862
 * and also on some strange 486's. All 586+'s are OK. This used to involve
 * black magic jumps to work around some nasty CPU bugs, but fortunately the
 * switch to using exceptions got rid of all that.
L
Linus Torvalds 已提交
863 864 865
 */
static void __init test_wp_bit(void)
{
866 867
	printk(KERN_INFO
  "Checking if this processor honours the WP bit even in supervisor mode...");
L
Linus Torvalds 已提交
868 869 870 871 872 873 874

	/* Any page-aligned address will do, the test is non-destructive */
	__set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY);
	boot_cpu_data.wp_works_ok = do_test_wp_bit();
	clear_fixmap(FIX_WP_TEST);

	if (!boot_cpu_data.wp_works_ok) {
875
		printk(KERN_CONT "No.\n");
L
Linus Torvalds 已提交
876
#ifdef CONFIG_X86_WP_WORKS_OK
877 878
		panic(
  "This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");
L
Linus Torvalds 已提交
879 880
#endif
	} else {
881
		printk(KERN_CONT "Ok.\n");
L
Linus Torvalds 已提交
882 883 884
	}
}

I
Ingo Molnar 已提交
885
static struct kcore_list kcore_mem, kcore_vmalloc;
L
Linus Torvalds 已提交
886 887 888 889

void __init mem_init(void)
{
	int codesize, reservedpages, datasize, initsize;
Y
Yinghai Lu 已提交
890
	int tmp;
L
Linus Torvalds 已提交
891

892 893
	pci_iommu_alloc();

894
#ifdef CONFIG_FLATMEM
E
Eric Sesterhenn 已提交
895
	BUG_ON(!mem_map);
L
Linus Torvalds 已提交
896 897 898 899 900 901 902
#endif
	/* this will put all low memory onto the freelists */
	totalram_pages += free_all_bootmem();

	reservedpages = 0;
	for (tmp = 0; tmp < max_low_pfn; tmp++)
		/*
I
Ingo Molnar 已提交
903
		 * Only count reserved RAM pages:
L
Linus Torvalds 已提交
904 905 906 907
		 */
		if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
			reservedpages++;

Y
Yinghai Lu 已提交
908
	set_highmem_pages_init();
L
Linus Torvalds 已提交
909 910 911 912 913

	codesize =  (unsigned long) &_etext - (unsigned long) &_text;
	datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
	initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;

I
Ingo Molnar 已提交
914 915
	kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
	kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
L
Linus Torvalds 已提交
916 917
		   VMALLOC_END-VMALLOC_START);

I
Ingo Molnar 已提交
918 919
	printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
			"%dk reserved, %dk data, %dk init, %ldk highmem)\n",
L
Linus Torvalds 已提交
920 921 922 923 924 925 926 927 928
		(unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
		num_physpages << (PAGE_SHIFT-10),
		codesize >> 10,
		reservedpages << (PAGE_SHIFT-10),
		datasize >> 10,
		initsize >> 10,
		(unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
	       );

929
	printk(KERN_INFO "virtual kernel memory layout:\n"
I
Ingo Molnar 已提交
930
		"    fixmap  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
931
#ifdef CONFIG_HIGHMEM
I
Ingo Molnar 已提交
932
		"    pkmap   : 0x%08lx - 0x%08lx   (%4ld kB)\n"
933
#endif
I
Ingo Molnar 已提交
934 935 936 937 938 939 940
		"    vmalloc : 0x%08lx - 0x%08lx   (%4ld MB)\n"
		"    lowmem  : 0x%08lx - 0x%08lx   (%4ld MB)\n"
		"      .init : 0x%08lx - 0x%08lx   (%4ld kB)\n"
		"      .data : 0x%08lx - 0x%08lx   (%4ld kB)\n"
		"      .text : 0x%08lx - 0x%08lx   (%4ld kB)\n",
		FIXADDR_START, FIXADDR_TOP,
		(FIXADDR_TOP - FIXADDR_START) >> 10,
941 942

#ifdef CONFIG_HIGHMEM
I
Ingo Molnar 已提交
943 944
		PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
		(LAST_PKMAP*PAGE_SIZE) >> 10,
945 946
#endif

I
Ingo Molnar 已提交
947 948
		VMALLOC_START, VMALLOC_END,
		(VMALLOC_END - VMALLOC_START) >> 20,
949

I
Ingo Molnar 已提交
950 951
		(unsigned long)__va(0), (unsigned long)high_memory,
		((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
952

I
Ingo Molnar 已提交
953 954 955
		(unsigned long)&__init_begin, (unsigned long)&__init_end,
		((unsigned long)&__init_end -
		 (unsigned long)&__init_begin) >> 10,
956

I
Ingo Molnar 已提交
957 958
		(unsigned long)&_etext, (unsigned long)&_edata,
		((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
959

I
Ingo Molnar 已提交
960 961
		(unsigned long)&_text, (unsigned long)&_etext,
		((unsigned long)&_etext - (unsigned long)&_text) >> 10);
962

963 964 965 966 967 968 969 970 971 972 973 974 975 976
	/*
	 * Check boundaries twice: Some fundamental inconsistencies can
	 * be detected at build time already.
	 */
#define __FIXADDR_TOP (-PAGE_SIZE)
#ifdef CONFIG_HIGHMEM
	BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE	> FIXADDR_START);
	BUILD_BUG_ON(VMALLOC_END			> PKMAP_BASE);
#endif
#define high_memory (-128UL << 20)
	BUILD_BUG_ON(VMALLOC_START			>= VMALLOC_END);
#undef high_memory
#undef __FIXADDR_TOP

977
#ifdef CONFIG_HIGHMEM
I
Ingo Molnar 已提交
978 979
	BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE	> FIXADDR_START);
	BUG_ON(VMALLOC_END				> PKMAP_BASE);
980
#endif
981
	BUG_ON(VMALLOC_START				>= VMALLOC_END);
I
Ingo Molnar 已提交
982
	BUG_ON((unsigned long)high_memory		> VMALLOC_START);
983

L
Linus Torvalds 已提交
984 985 986
	if (boot_cpu_data.wp_works_ok < 0)
		test_wp_bit();

987
	save_pg_dir();
L
Linus Torvalds 已提交
988 989 990
	zap_low_mappings();
}

991
#ifdef CONFIG_MEMORY_HOTPLUG
992
int arch_add_memory(int nid, u64 start, u64 size)
993
{
994
	struct pglist_data *pgdata = NODE_DATA(nid);
995
	struct zone *zone = pgdata->node_zones + ZONE_HIGHMEM;
996 997 998
	unsigned long start_pfn = start >> PAGE_SHIFT;
	unsigned long nr_pages = size >> PAGE_SHIFT;

999
	return __add_pages(nid, zone, start_pfn, nr_pages);
1000
}
1001
#endif
1002

L
Linus Torvalds 已提交
1003 1004 1005 1006
/*
 * This function cannot be __init, since exceptions don't work in that
 * section.  Put this after the callers, so that it cannot be inlined.
 */
I
Ingo Molnar 已提交
1007
static noinline int do_test_wp_bit(void)
L
Linus Torvalds 已提交
1008 1009 1010 1011 1012
{
	char tmp_reg;
	int flag;

	__asm__ __volatile__(
I
Ingo Molnar 已提交
1013 1014 1015
		"	movb %0, %1	\n"
		"1:	movb %1, %0	\n"
		"	xorl %2, %2	\n"
L
Linus Torvalds 已提交
1016
		"2:			\n"
1017
		_ASM_EXTABLE(1b,2b)
L
Linus Torvalds 已提交
1018 1019 1020 1021 1022
		:"=m" (*(char *)fix_to_virt(FIX_WP_TEST)),
		 "=q" (tmp_reg),
		 "=r" (flag)
		:"2" (1)
		:"memory");
I
Ingo Molnar 已提交
1023

L
Linus Torvalds 已提交
1024 1025 1026
	return flag;
}

1027
#ifdef CONFIG_DEBUG_RODATA
1028 1029
const int rodata_test_data = 0xC3;
EXPORT_SYMBOL_GPL(rodata_test_data);
1030 1031 1032

void mark_rodata_ro(void)
{
1033 1034
	unsigned long start = PFN_ALIGN(_text);
	unsigned long size = PFN_ALIGN(_etext) - start;
1035

1036 1037
#ifndef CONFIG_DYNAMIC_FTRACE
	/* Dynamic tracing modifies the kernel text section */
1038 1039 1040
	set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
	printk(KERN_INFO "Write protecting the kernel text: %luk\n",
		size >> 10);
1041 1042

#ifdef CONFIG_CPA_DEBUG
1043 1044 1045
	printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n",
		start, start+size);
	set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT);
1046

1047 1048
	printk(KERN_INFO "Testing CPA: write protecting again\n");
	set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT);
1049
#endif
1050 1051
#endif /* CONFIG_DYNAMIC_FTRACE */

1052 1053
	start += size;
	size = (unsigned long)__end_rodata - start;
1054
	set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
1055 1056
	printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
		size >> 10);
1057
	rodata_test();
1058

1059
#ifdef CONFIG_CPA_DEBUG
1060
	printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, start + size);
1061
	set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT);
1062

1063
	printk(KERN_INFO "Testing CPA: write protecting again\n");
1064
	set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
1065
#endif
1066 1067 1068
}
#endif

1069 1070 1071 1072 1073
int __init reserve_bootmem_generic(unsigned long phys, unsigned long len,
				   int flags)
{
	return reserve_bootmem(phys, len, flags);
}