init_32.c 20.5 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
/*
 *  linux/arch/i386/mm/init.c
 *
 *  Copyright (C) 1995  Linus Torvalds
 *
 *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
 */

#include <linux/module.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/swap.h>
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
25
#include <linux/pfn.h>
26
#include <linux/poison.h>
L
Linus Torvalds 已提交
27 28 29
#include <linux/bootmem.h>
#include <linux/slab.h>
#include <linux/proc_fs.h>
30
#include <linux/memory_hotplug.h>
31
#include <linux/initrd.h>
32
#include <linux/cpumask.h>
L
Linus Torvalds 已提交
33 34 35 36 37 38 39 40 41

#include <asm/processor.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/dma.h>
#include <asm/fixmap.h>
#include <asm/e820.h>
#include <asm/apic.h>
I
Ingo Molnar 已提交
42
#include <asm/bugs.h>
L
Linus Torvalds 已提交
43 44
#include <asm/tlb.h>
#include <asm/tlbflush.h>
45
#include <asm/pgalloc.h>
L
Linus Torvalds 已提交
46
#include <asm/sections.h>
47
#include <asm/paravirt.h>
L
Linus Torvalds 已提交
48 49 50 51 52 53

unsigned int __VMALLOC_RESERVE = 128 << 20;

DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
unsigned long highstart_pfn, highend_pfn;

I
Ingo Molnar 已提交
54
static noinline int do_test_wp_bit(void);
L
Linus Torvalds 已提交
55 56 57 58 59 60 61 62 63 64

/*
 * Creates a middle page table and puts a pointer to it in the
 * given global directory entry. This only returns the gd entry
 * in non-PAE compilation mode, since the middle layer is folded.
 */
static pmd_t * __init one_md_table_init(pgd_t *pgd)
{
	pud_t *pud;
	pmd_t *pmd_table;
I
Ingo Molnar 已提交
65

L
Linus Torvalds 已提交
66
#ifdef CONFIG_X86_PAE
67 68 69
	if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
		pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);

70
		paravirt_alloc_pd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
71 72
		set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
		pud = pud_offset(pgd, 0);
I
Ingo Molnar 已提交
73
		BUG_ON(pmd_table != pmd_offset(pud, 0));
74 75
	}
#endif
L
Linus Torvalds 已提交
76 77
	pud = pud_offset(pgd, 0);
	pmd_table = pmd_offset(pud, 0);
I
Ingo Molnar 已提交
78

L
Linus Torvalds 已提交
79 80 81 82 83
	return pmd_table;
}

/*
 * Create a page table and place a pointer to it in a middle page
I
Ingo Molnar 已提交
84
 * directory entry:
L
Linus Torvalds 已提交
85 86 87
 */
static pte_t * __init one_page_table_init(pmd_t *pmd)
{
88
	if (!(pmd_val(*pmd) & _PAGE_PRESENT)) {
89 90 91 92 93
		pte_t *page_table = NULL;

#ifdef CONFIG_DEBUG_PAGEALLOC
		page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
#endif
I
Ingo Molnar 已提交
94
		if (!page_table) {
95 96
			page_table =
				(pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
I
Ingo Molnar 已提交
97
		}
98

99
		paravirt_alloc_pt(&init_mm, __pa(page_table) >> PAGE_SHIFT);
L
Linus Torvalds 已提交
100
		set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
101
		BUG_ON(page_table != pte_offset_kernel(pmd, 0));
L
Linus Torvalds 已提交
102
	}
103

L
Linus Torvalds 已提交
104 105 106 107
	return pte_offset_kernel(pmd, 0);
}

/*
I
Ingo Molnar 已提交
108
 * This function initializes a certain range of kernel virtual memory
L
Linus Torvalds 已提交
109 110
 * with new bootmem page tables, everywhere page tables are missing in
 * the given range.
I
Ingo Molnar 已提交
111 112 113
 *
 * NOTE: The pagetables are allocated contiguous on the physical space
 * so we can cache the place of the first one and move around without
L
Linus Torvalds 已提交
114 115
 * checking the pgd every time.
 */
I
Ingo Molnar 已提交
116 117
static void __init
page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
L
Linus Torvalds 已提交
118 119 120
{
	int pgd_idx, pmd_idx;
	unsigned long vaddr;
I
Ingo Molnar 已提交
121 122
	pgd_t *pgd;
	pmd_t *pmd;
L
Linus Torvalds 已提交
123 124 125 126 127 128 129

	vaddr = start;
	pgd_idx = pgd_index(vaddr);
	pmd_idx = pmd_index(vaddr);
	pgd = pgd_base + pgd_idx;

	for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
130 131
		pmd = one_md_table_init(pgd);
		pmd = pmd + pmd_index(vaddr);
I
Ingo Molnar 已提交
132 133
		for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
							pmd++, pmd_idx++) {
134
			one_page_table_init(pmd);
L
Linus Torvalds 已提交
135 136 137 138 139 140 141 142 143 144 145 146 147 148 149

			vaddr += PMD_SIZE;
		}
		pmd_idx = 0;
	}
}

static inline int is_kernel_text(unsigned long addr)
{
	if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
		return 1;
	return 0;
}

/*
I
Ingo Molnar 已提交
150 151 152
 * This maps the physical memory to kernel virtual address space, a total
 * of max_low_pfn pages, by creating page tables starting from address
 * PAGE_OFFSET:
L
Linus Torvalds 已提交
153 154 155
 */
static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
{
I
Ingo Molnar 已提交
156
	int pgd_idx, pmd_idx, pte_ofs;
L
Linus Torvalds 已提交
157 158 159 160 161 162 163 164 165 166 167 168 169
	unsigned long pfn;
	pgd_t *pgd;
	pmd_t *pmd;
	pte_t *pte;

	pgd_idx = pgd_index(PAGE_OFFSET);
	pgd = pgd_base + pgd_idx;
	pfn = 0;

	for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
		pmd = one_md_table_init(pgd);
		if (pfn >= max_low_pfn)
			continue;
I
Ingo Molnar 已提交
170

J
Jeremy Fitzhardinge 已提交
171 172 173
		for (pmd_idx = 0;
		     pmd_idx < PTRS_PER_PMD && pfn < max_low_pfn;
		     pmd++, pmd_idx++) {
I
Ingo Molnar 已提交
174
			unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
L
Linus Torvalds 已提交
175

I
Ingo Molnar 已提交
176 177 178 179
			/*
			 * Map with big pages if possible, otherwise
			 * create normal page tables:
			 */
L
Linus Torvalds 已提交
180
			if (cpu_has_pse) {
I
Ingo Molnar 已提交
181
				unsigned int addr2;
J
Jeremy Fitzhardinge 已提交
182 183
				pgprot_t prot = PAGE_KERNEL_LARGE;

I
Ingo Molnar 已提交
184
				addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
J
Jeremy Fitzhardinge 已提交
185 186
					PAGE_OFFSET + PAGE_SIZE-1;

I
Ingo Molnar 已提交
187 188
				if (is_kernel_text(addr) ||
				    is_kernel_text(addr2))
J
Jeremy Fitzhardinge 已提交
189 190 191
					prot = PAGE_KERNEL_LARGE_EXEC;

				set_pmd(pmd, pfn_pmd(pfn, prot));
192

L
Linus Torvalds 已提交
193
				pfn += PTRS_PER_PTE;
I
Ingo Molnar 已提交
194 195 196
				continue;
			}
			pte = one_page_table_init(pmd);
L
Linus Torvalds 已提交
197

I
Ingo Molnar 已提交
198 199 200 201
			for (pte_ofs = 0;
			     pte_ofs < PTRS_PER_PTE && pfn < max_low_pfn;
			     pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
				pgprot_t prot = PAGE_KERNEL;
J
Jeremy Fitzhardinge 已提交
202

I
Ingo Molnar 已提交
203 204
				if (is_kernel_text(addr))
					prot = PAGE_KERNEL_EXEC;
J
Jeremy Fitzhardinge 已提交
205

I
Ingo Molnar 已提交
206
				set_pte(pte, pfn_pte(pfn, prot));
L
Linus Torvalds 已提交
207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222
			}
		}
	}
}

static inline int page_kills_ppro(unsigned long pagenr)
{
	if (pagenr >= 0x70000 && pagenr <= 0x7003F)
		return 1;
	return 0;
}

#ifdef CONFIG_HIGHMEM
pte_t *kmap_pte;
pgprot_t kmap_prot;

I
Ingo Molnar 已提交
223 224 225 226 227
static inline pte_t *kmap_get_fixmap_pte(unsigned long vaddr)
{
	return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
			vaddr), vaddr), vaddr);
}
L
Linus Torvalds 已提交
228 229 230 231 232

static void __init kmap_init(void)
{
	unsigned long kmap_vstart;

I
Ingo Molnar 已提交
233 234 235
	/*
	 * Cache the first kmap pte:
	 */
L
Linus Torvalds 已提交
236 237 238 239 240 241 242 243
	kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
	kmap_pte = kmap_get_fixmap_pte(kmap_vstart);

	kmap_prot = PAGE_KERNEL;
}

static void __init permanent_kmaps_init(pgd_t *pgd_base)
{
I
Ingo Molnar 已提交
244
	unsigned long vaddr;
L
Linus Torvalds 已提交
245 246 247 248 249 250 251 252 253 254 255 256
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;

	vaddr = PKMAP_BASE;
	page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);

	pgd = swapper_pg_dir + pgd_index(vaddr);
	pud = pud_offset(pgd, vaddr);
	pmd = pmd_offset(pud, vaddr);
	pte = pte_offset_kernel(pmd, vaddr);
I
Ingo Molnar 已提交
257
	pkmap_page_table = pte;
L
Linus Torvalds 已提交
258 259
}

260
static void __meminit free_new_highpage(struct page *page)
261
{
262
	init_page_count(page);
263 264 265 266 267
	__free_page(page);
	totalhigh_pages++;
}

void __init add_one_highpage_init(struct page *page, int pfn, int bad_ppro)
L
Linus Torvalds 已提交
268 269 270
{
	if (page_is_ram(pfn) && !(bad_ppro && page_kills_ppro(pfn))) {
		ClearPageReserved(page);
271
		free_new_highpage(page);
L
Linus Torvalds 已提交
272 273 274 275
	} else
		SetPageReserved(page);
}

I
Ingo Molnar 已提交
276 277
static int __meminit
add_one_highpage_hotplug(struct page *page, unsigned long pfn)
278 279 280 281 282 283 284
{
	free_new_highpage(page);
	totalram_pages++;
#ifdef CONFIG_FLATMEM
	max_mapnr = max(pfn, max_mapnr);
#endif
	num_physpages++;
I
Ingo Molnar 已提交
285

286 287 288 289 290 291 292
	return 0;
}

/*
 * Not currently handling the NUMA case.
 * Assuming single node and all memory that
 * has been added dynamically that would be
I
Ingo Molnar 已提交
293
 * onlined here is in HIGHMEM.
294
 */
295
void __meminit online_page(struct page *page)
296 297 298 299 300
{
	ClearPageReserved(page);
	add_one_highpage_hotplug(page, page_to_pfn(page));
}

I
Ingo Molnar 已提交
301
#ifndef CONFIG_NUMA
L
Linus Torvalds 已提交
302 303 304
static void __init set_highmem_pages_init(int bad_ppro)
{
	int pfn;
I
Ingo Molnar 已提交
305

306 307 308 309 310 311 312
	for (pfn = highstart_pfn; pfn < highend_pfn; pfn++) {
		/*
		 * Holes under sparsemem might not have no mem_map[]:
		 */
		if (pfn_valid(pfn))
			add_one_highpage_init(pfn_to_page(pfn), pfn, bad_ppro);
	}
L
Linus Torvalds 已提交
313 314
	totalram_pages += totalhigh_pages;
}
I
Ingo Molnar 已提交
315
#endif /* !CONFIG_NUMA */
L
Linus Torvalds 已提交
316 317

#else
I
Ingo Molnar 已提交
318 319 320
# define kmap_init()				do { } while (0)
# define permanent_kmaps_init(pgd_base)		do { } while (0)
# define set_highmem_pages_init(bad_ppro)	do { } while (0)
L
Linus Torvalds 已提交
321 322
#endif /* CONFIG_HIGHMEM */

323
pteval_t __PAGE_KERNEL = _PAGE_KERNEL;
324
EXPORT_SYMBOL(__PAGE_KERNEL);
L
Linus Torvalds 已提交
325

I
Ingo Molnar 已提交
326
pteval_t __PAGE_KERNEL_EXEC = _PAGE_KERNEL_EXEC;
L
Linus Torvalds 已提交
327

328
void __init native_pagetable_setup_start(pgd_t *base)
L
Linus Torvalds 已提交
329 330 331
{
#ifdef CONFIG_X86_PAE
	int i;
332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350

	/*
	 * Init entries of the first-level page table to the
	 * zero page, if they haven't already been set up.
	 *
	 * In a normal native boot, we'll be running on a
	 * pagetable rooted in swapper_pg_dir, but not in PAE
	 * mode, so this will end up clobbering the mappings
	 * for the lower 24Mbytes of the address space,
	 * without affecting the kernel address space.
	 */
	for (i = 0; i < USER_PTRS_PER_PGD; i++)
		set_pgd(&base[i],
			__pgd(__pa(empty_zero_page) | _PAGE_PRESENT));

	/* Make sure kernel address space is empty so that a pagetable
	   will be allocated for it. */
	memset(&base[USER_PTRS_PER_PGD], 0,
	       KERNEL_PGD_PTRS * sizeof(pgd_t));
351
#else
352
	paravirt_alloc_pd(&init_mm, __pa(base) >> PAGE_SHIFT);
L
Linus Torvalds 已提交
353
#endif
354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389
}

void __init native_pagetable_setup_done(pgd_t *base)
{
#ifdef CONFIG_X86_PAE
	/*
	 * Add low memory identity-mappings - SMP needs it when
	 * starting up on an AP from real-mode. In the non-PAE
	 * case we already have these mappings through head.S.
	 * All user-space mappings are explicitly cleared after
	 * SMP startup.
	 */
	set_pgd(&base[0], base[USER_PTRS_PER_PGD]);
#endif
}

/*
 * Build a proper pagetable for the kernel mappings.  Up until this
 * point, we've been running on some set of pagetables constructed by
 * the boot process.
 *
 * If we're booting on native hardware, this will be a pagetable
 * constructed in arch/i386/kernel/head.S, and not running in PAE mode
 * (even if we'll end up running in PAE).  The root of the pagetable
 * will be swapper_pg_dir.
 *
 * If we're booting paravirtualized under a hypervisor, then there are
 * more options: we may already be running PAE, and the pagetable may
 * or may not be based in swapper_pg_dir.  In any case,
 * paravirt_pagetable_setup_start() will set up swapper_pg_dir
 * appropriately for the rest of the initialization to work.
 *
 * In general, pagetable_init() assumes that the pagetable may already
 * be partially populated, and so it avoids stomping on any existing
 * mappings.
 */
I
Ingo Molnar 已提交
390
static void __init pagetable_init(void)
391 392
{
	pgd_t *pgd_base = swapper_pg_dir;
I
Ingo Molnar 已提交
393
	unsigned long vaddr, end;
394 395

	paravirt_pagetable_setup_start(pgd_base);
L
Linus Torvalds 已提交
396 397

	/* Enable PSE if available */
398
	if (cpu_has_pse)
L
Linus Torvalds 已提交
399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414
		set_in_cr4(X86_CR4_PSE);

	/* Enable PGE if available */
	if (cpu_has_pge) {
		set_in_cr4(X86_CR4_PGE);
		__PAGE_KERNEL |= _PAGE_GLOBAL;
		__PAGE_KERNEL_EXEC |= _PAGE_GLOBAL;
	}

	kernel_physical_mapping_init(pgd_base);
	remap_numa_kva();

	/*
	 * Fixed mappings, only the page table structure has to be
	 * created - mappings will be set by set_fixmap():
	 */
415
	early_ioremap_clear();
L
Linus Torvalds 已提交
416
	vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
417 418
	end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
	page_table_range_init(vaddr, end, pgd_base);
419
	early_ioremap_reset();
L
Linus Torvalds 已提交
420 421 422

	permanent_kmaps_init(pgd_base);

423
	paravirt_pagetable_setup_done(pgd_base);
L
Linus Torvalds 已提交
424 425
}

426
#if defined(CONFIG_HIBERNATION) || defined(CONFIG_ACPI)
L
Linus Torvalds 已提交
427 428 429 430 431
/*
 * Swap suspend & friends need this for resume because things like the intel-agp
 * driver might have split up a kernel 4MB mapping.
 */
char __nosavedata swsusp_pg_dir[PAGE_SIZE]
I
Ingo Molnar 已提交
432
	__attribute__ ((aligned(PAGE_SIZE)));
L
Linus Torvalds 已提交
433 434 435 436 437 438 439 440 441 442 443

static inline void save_pg_dir(void)
{
	memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
}
#else
static inline void save_pg_dir(void)
{
}
#endif

I
Ingo Molnar 已提交
444
void zap_low_mappings(void)
L
Linus Torvalds 已提交
445 446 447 448 449 450 451 452 453 454 455
{
	int i;

	save_pg_dir();

	/*
	 * Zap initial low-memory mappings.
	 *
	 * Note that "pgd_clear()" doesn't do it for
	 * us, because pgd_clear() is a no-op on i386.
	 */
I
Ingo Molnar 已提交
456
	for (i = 0; i < USER_PTRS_PER_PGD; i++) {
L
Linus Torvalds 已提交
457 458 459 460 461
#ifdef CONFIG_X86_PAE
		set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page)));
#else
		set_pgd(swapper_pg_dir+i, __pgd(0));
#endif
I
Ingo Molnar 已提交
462
	}
L
Linus Torvalds 已提交
463 464 465
	flush_tlb_all();
}

I
Ingo Molnar 已提交
466
int nx_enabled;
J
Jan Beulich 已提交
467

468 469 470
pteval_t __supported_pte_mask __read_mostly = ~_PAGE_NX;
EXPORT_SYMBOL_GPL(__supported_pte_mask);

J
Jan Beulich 已提交
471 472
#ifdef CONFIG_X86_PAE

I
Ingo Molnar 已提交
473
static int disable_nx __initdata;
L
Linus Torvalds 已提交
474 475 476 477 478 479 480 481 482

/*
 * noexec = on|off
 *
 * Control non executable mappings.
 *
 * on      Enable
 * off     Disable
 */
483
static int __init noexec_setup(char *str)
L
Linus Torvalds 已提交
484
{
485 486 487 488 489
	if (!str || !strcmp(str, "on")) {
		if (cpu_has_nx) {
			__supported_pte_mask |= _PAGE_NX;
			disable_nx = 0;
		}
I
Ingo Molnar 已提交
490 491 492 493 494 495 496 497
	} else {
		if (!strcmp(str, "off")) {
			disable_nx = 1;
			__supported_pte_mask &= ~_PAGE_NX;
		} else {
			return -EINVAL;
		}
	}
498 499

	return 0;
L
Linus Torvalds 已提交
500
}
501
early_param("noexec", noexec_setup);
L
Linus Torvalds 已提交
502 503 504 505 506 507 508

static void __init set_nx(void)
{
	unsigned int v[4], l, h;

	if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
		cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
I
Ingo Molnar 已提交
509

L
Linus Torvalds 已提交
510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576
		if ((v[3] & (1 << 20)) && !disable_nx) {
			rdmsr(MSR_EFER, l, h);
			l |= EFER_NX;
			wrmsr(MSR_EFER, l, h);
			nx_enabled = 1;
			__supported_pte_mask |= _PAGE_NX;
		}
	}
}
#endif

/*
 * paging_init() sets up the page tables - note that the first 8MB are
 * already mapped by head.S.
 *
 * This routines also unmaps the page at virtual kernel address 0, so
 * that we can trap those pesky NULL-reference errors in the kernel.
 */
void __init paging_init(void)
{
#ifdef CONFIG_X86_PAE
	set_nx();
	if (nx_enabled)
		printk("NX (Execute Disable) protection: active\n");
#endif
	pagetable_init();

	load_cr3(swapper_pg_dir);

#ifdef CONFIG_X86_PAE
	/*
	 * We will bail out later - printk doesn't work right now so
	 * the user would just see a hanging kernel.
	 */
	if (cpu_has_pae)
		set_in_cr4(X86_CR4_PAE);
#endif
	__flush_tlb_all();

	kmap_init();
}

/*
 * Test if the WP bit works in supervisor mode. It isn't supported on 386's
 * and also on some strange 486's (NexGen etc.). All 586+'s are OK. This
 * used to involve black magic jumps to work around some nasty CPU bugs,
 * but fortunately the switch to using exceptions got rid of all that.
 */
static void __init test_wp_bit(void)
{
	printk("Checking if this processor honours the WP bit even in supervisor mode... ");

	/* Any page-aligned address will do, the test is non-destructive */
	__set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY);
	boot_cpu_data.wp_works_ok = do_test_wp_bit();
	clear_fixmap(FIX_WP_TEST);

	if (!boot_cpu_data.wp_works_ok) {
		printk("No.\n");
#ifdef CONFIG_X86_WP_WORKS_OK
		panic("This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");
#endif
	} else {
		printk("Ok.\n");
	}
}

I
Ingo Molnar 已提交
577
static struct kcore_list kcore_mem, kcore_vmalloc;
L
Linus Torvalds 已提交
578 579 580 581

void __init mem_init(void)
{
	int codesize, reservedpages, datasize, initsize;
I
Ingo Molnar 已提交
582
	int tmp, bad_ppro;
L
Linus Torvalds 已提交
583

584
#ifdef CONFIG_FLATMEM
E
Eric Sesterhenn 已提交
585
	BUG_ON(!mem_map);
L
Linus Torvalds 已提交
586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603
#endif
	bad_ppro = ppro_with_ram_bug();

#ifdef CONFIG_HIGHMEM
	/* check that fixmap and pkmap do not overlap */
	if (PKMAP_BASE+LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) {
		printk(KERN_ERR "fixmap and kmap areas overlap - this will crash\n");
		printk(KERN_ERR "pkstart: %lxh pkend: %lxh fixstart %lxh\n",
				PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, FIXADDR_START);
		BUG();
	}
#endif
	/* this will put all low memory onto the freelists */
	totalram_pages += free_all_bootmem();

	reservedpages = 0;
	for (tmp = 0; tmp < max_low_pfn; tmp++)
		/*
I
Ingo Molnar 已提交
604
		 * Only count reserved RAM pages:
L
Linus Torvalds 已提交
605 606 607 608 609 610 611 612 613 614
		 */
		if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
			reservedpages++;

	set_highmem_pages_init(bad_ppro);

	codesize =  (unsigned long) &_etext - (unsigned long) &_text;
	datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
	initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;

I
Ingo Molnar 已提交
615 616
	kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
	kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
L
Linus Torvalds 已提交
617 618
		   VMALLOC_END-VMALLOC_START);

I
Ingo Molnar 已提交
619 620
	printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
			"%dk reserved, %dk data, %dk init, %ldk highmem)\n",
L
Linus Torvalds 已提交
621 622 623 624 625 626 627 628 629
		(unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
		num_physpages << (PAGE_SHIFT-10),
		codesize >> 10,
		reservedpages << (PAGE_SHIFT-10),
		datasize >> 10,
		initsize >> 10,
		(unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
	       );

630 631
#if 1 /* double-sanity-check paranoia */
	printk("virtual kernel memory layout:\n"
I
Ingo Molnar 已提交
632
		"    fixmap  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
633
#ifdef CONFIG_HIGHMEM
I
Ingo Molnar 已提交
634
		"    pkmap   : 0x%08lx - 0x%08lx   (%4ld kB)\n"
635
#endif
I
Ingo Molnar 已提交
636 637 638 639 640 641 642
		"    vmalloc : 0x%08lx - 0x%08lx   (%4ld MB)\n"
		"    lowmem  : 0x%08lx - 0x%08lx   (%4ld MB)\n"
		"      .init : 0x%08lx - 0x%08lx   (%4ld kB)\n"
		"      .data : 0x%08lx - 0x%08lx   (%4ld kB)\n"
		"      .text : 0x%08lx - 0x%08lx   (%4ld kB)\n",
		FIXADDR_START, FIXADDR_TOP,
		(FIXADDR_TOP - FIXADDR_START) >> 10,
643 644

#ifdef CONFIG_HIGHMEM
I
Ingo Molnar 已提交
645 646
		PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
		(LAST_PKMAP*PAGE_SIZE) >> 10,
647 648
#endif

I
Ingo Molnar 已提交
649 650
		VMALLOC_START, VMALLOC_END,
		(VMALLOC_END - VMALLOC_START) >> 20,
651

I
Ingo Molnar 已提交
652 653
		(unsigned long)__va(0), (unsigned long)high_memory,
		((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
654

I
Ingo Molnar 已提交
655 656 657
		(unsigned long)&__init_begin, (unsigned long)&__init_end,
		((unsigned long)&__init_end -
		 (unsigned long)&__init_begin) >> 10,
658

I
Ingo Molnar 已提交
659 660
		(unsigned long)&_etext, (unsigned long)&_edata,
		((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
661

I
Ingo Molnar 已提交
662 663
		(unsigned long)&_text, (unsigned long)&_etext,
		((unsigned long)&_etext - (unsigned long)&_text) >> 10);
664 665

#ifdef CONFIG_HIGHMEM
I
Ingo Molnar 已提交
666 667
	BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE	> FIXADDR_START);
	BUG_ON(VMALLOC_END				> PKMAP_BASE);
668
#endif
I
Ingo Molnar 已提交
669 670
	BUG_ON(VMALLOC_START				> VMALLOC_END);
	BUG_ON((unsigned long)high_memory		> VMALLOC_START);
671 672
#endif /* double-sanity-check paranoia */

L
Linus Torvalds 已提交
673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690
#ifdef CONFIG_X86_PAE
	if (!cpu_has_pae)
		panic("cannot execute a PAE-enabled kernel on a PAE-less CPU!");
#endif
	if (boot_cpu_data.wp_works_ok < 0)
		test_wp_bit();

	/*
	 * Subtle. SMP is doing it's boot stuff late (because it has to
	 * fork idle threads) - but it also needs low mappings for the
	 * protected-mode entry to work. We zap these entries only after
	 * the WP-bit has been tested.
	 */
#ifndef CONFIG_SMP
	zap_low_mappings();
#endif
}

691
#ifdef CONFIG_MEMORY_HOTPLUG
692
int arch_add_memory(int nid, u64 start, u64 size)
693
{
694
	struct pglist_data *pgdata = NODE_DATA(nid);
695
	struct zone *zone = pgdata->node_zones + ZONE_HIGHMEM;
696 697 698 699 700
	unsigned long start_pfn = start >> PAGE_SHIFT;
	unsigned long nr_pages = size >> PAGE_SHIFT;

	return __add_pages(zone, start_pfn, nr_pages);
}
701
#endif
702

703
struct kmem_cache *pmd_cache;
L
Linus Torvalds 已提交
704 705 706

void __init pgtable_cache_init(void)
{
I
Ingo Molnar 已提交
707
	if (PTRS_PER_PMD > 1) {
L
Linus Torvalds 已提交
708
		pmd_cache = kmem_cache_create("pmd",
709 710 711 712
					      PTRS_PER_PMD*sizeof(pmd_t),
					      PTRS_PER_PMD*sizeof(pmd_t),
					      SLAB_PANIC,
					      pmd_ctor);
I
Ingo Molnar 已提交
713
	}
L
Linus Torvalds 已提交
714 715 716 717 718 719
}

/*
 * This function cannot be __init, since exceptions don't work in that
 * section.  Put this after the callers, so that it cannot be inlined.
 */
I
Ingo Molnar 已提交
720
static noinline int do_test_wp_bit(void)
L
Linus Torvalds 已提交
721 722 723 724 725
{
	char tmp_reg;
	int flag;

	__asm__ __volatile__(
I
Ingo Molnar 已提交
726 727 728
		"	movb %0, %1	\n"
		"1:	movb %1, %0	\n"
		"	xorl %2, %2	\n"
L
Linus Torvalds 已提交
729
		"2:			\n"
I
Ingo Molnar 已提交
730
		".section __ex_table, \"a\"\n"
L
Linus Torvalds 已提交
731
		"	.align 4	\n"
I
Ingo Molnar 已提交
732
		"	.long 1b, 2b	\n"
L
Linus Torvalds 已提交
733 734 735 736 737 738
		".previous		\n"
		:"=m" (*(char *)fix_to_virt(FIX_WP_TEST)),
		 "=q" (tmp_reg),
		 "=r" (flag)
		:"2" (1)
		:"memory");
I
Ingo Molnar 已提交
739

L
Linus Torvalds 已提交
740 741 742
	return flag;
}

743
#ifdef CONFIG_DEBUG_RODATA
744 745
const int rodata_test_data = 0xC3;
EXPORT_SYMBOL_GPL(rodata_test_data);
746 747 748

void mark_rodata_ro(void)
{
749 750
	unsigned long start = PFN_ALIGN(_text);
	unsigned long size = PFN_ALIGN(_etext) - start;
751

752 753 754 755 756 757
#ifndef CONFIG_KPROBES
#ifdef CONFIG_HOTPLUG_CPU
	/* It must still be possible to apply SMP alternatives. */
	if (num_possible_cpus() <= 1)
#endif
	{
758
		set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
759
		printk("Write protecting the kernel text: %luk\n", size >> 10);
760 761 762

#ifdef CONFIG_CPA_DEBUG
		printk("Testing CPA: Reverting %lx-%lx\n", start, start+size);
763
		set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT);
764 765

		printk("Testing CPA: write protecting again\n");
766
		set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT);
767
#endif
768 769
	}
#endif
770 771
	start += size;
	size = (unsigned long)__end_rodata - start;
772
	set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
773 774
	printk("Write protecting the kernel read-only data: %luk\n",
	       size >> 10);
775
	rodata_test();
776

777 778
#ifdef CONFIG_CPA_DEBUG
	printk("Testing CPA: undo %lx-%lx\n", start, start + size);
779
	set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT);
780 781

	printk("Testing CPA: write protecting again\n");
782
	set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
783
#endif
784 785 786
}
#endif

G
Gerd Hoffmann 已提交
787 788
void free_init_pages(char *what, unsigned long begin, unsigned long end)
{
I
Ingo Molnar 已提交
789 790 791 792 793 794 795 796 797 798
#ifdef CONFIG_DEBUG_PAGEALLOC
	/*
	 * If debugging page accesses then do not free this memory but
	 * mark them not present - any buggy init-section access will
	 * create a kernel page fault:
	 */
	printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n",
		begin, PAGE_ALIGN(end));
	set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
#else
I
Ingo Molnar 已提交
799 800
	unsigned long addr;

801 802 803 804 805 806 807
	/*
	 * We just marked the kernel text read only above, now that
	 * we are going to free part of that, we need to make that
	 * writeable first.
	 */
	set_memory_rw(begin, (end - begin) >> PAGE_SHIFT);

G
Gerd Hoffmann 已提交
808
	for (addr = begin; addr < end; addr += PAGE_SIZE) {
809 810 811 812
		ClearPageReserved(virt_to_page(addr));
		init_page_count(virt_to_page(addr));
		memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
		free_page(addr);
G
Gerd Hoffmann 已提交
813 814
		totalram_pages++;
	}
815
	printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
I
Ingo Molnar 已提交
816
#endif
G
Gerd Hoffmann 已提交
817 818 819 820 821
}

void free_initmem(void)
{
	free_init_pages("unused kernel memory",
822 823
			(unsigned long)(&__init_begin),
			(unsigned long)(&__init_end));
G
Gerd Hoffmann 已提交
824
}
825

L
Linus Torvalds 已提交
826 827 828
#ifdef CONFIG_BLK_DEV_INITRD
void free_initrd_mem(unsigned long start, unsigned long end)
{
829
	free_init_pages("initrd memory", start, end);
L
Linus Torvalds 已提交
830 831
}
#endif