init_32.c 19.7 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
/*
 *
 *  Copyright (C) 1995  Linus Torvalds
 *
 *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
 */

#include <linux/module.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/swap.h>
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
24
#include <linux/pfn.h>
25
#include <linux/poison.h>
L
Linus Torvalds 已提交
26 27 28
#include <linux/bootmem.h>
#include <linux/slab.h>
#include <linux/proc_fs.h>
29
#include <linux/memory_hotplug.h>
30
#include <linux/initrd.h>
31
#include <linux/cpumask.h>
L
Linus Torvalds 已提交
32

33
#include <asm/asm.h>
L
Linus Torvalds 已提交
34 35 36 37 38 39 40 41
#include <asm/processor.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/dma.h>
#include <asm/fixmap.h>
#include <asm/e820.h>
#include <asm/apic.h>
I
Ingo Molnar 已提交
42
#include <asm/bugs.h>
L
Linus Torvalds 已提交
43 44
#include <asm/tlb.h>
#include <asm/tlbflush.h>
45
#include <asm/pgalloc.h>
L
Linus Torvalds 已提交
46
#include <asm/sections.h>
47
#include <asm/paravirt.h>
48
#include <asm/setup.h>
49
#include <asm/cacheflush.h>
L
Linus Torvalds 已提交
50 51 52

unsigned int __VMALLOC_RESERVE = 128 << 20;

53
unsigned long max_pfn_mapped;
54

L
Linus Torvalds 已提交
55 56 57
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
unsigned long highstart_pfn, highend_pfn;

I
Ingo Molnar 已提交
58
static noinline int do_test_wp_bit(void);
L
Linus Torvalds 已提交
59 60 61 62 63 64 65 66 67 68

/*
 * Creates a middle page table and puts a pointer to it in the
 * given global directory entry. This only returns the gd entry
 * in non-PAE compilation mode, since the middle layer is folded.
 */
static pmd_t * __init one_md_table_init(pgd_t *pgd)
{
	pud_t *pud;
	pmd_t *pmd_table;
I
Ingo Molnar 已提交
69

L
Linus Torvalds 已提交
70
#ifdef CONFIG_X86_PAE
71 72 73
	if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
		pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);

74
		paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
75 76
		set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
		pud = pud_offset(pgd, 0);
I
Ingo Molnar 已提交
77
		BUG_ON(pmd_table != pmd_offset(pud, 0));
78 79
	}
#endif
L
Linus Torvalds 已提交
80 81
	pud = pud_offset(pgd, 0);
	pmd_table = pmd_offset(pud, 0);
I
Ingo Molnar 已提交
82

L
Linus Torvalds 已提交
83 84 85 86 87
	return pmd_table;
}

/*
 * Create a page table and place a pointer to it in a middle page
I
Ingo Molnar 已提交
88
 * directory entry:
L
Linus Torvalds 已提交
89 90 91
 */
static pte_t * __init one_page_table_init(pmd_t *pmd)
{
92
	if (!(pmd_val(*pmd) & _PAGE_PRESENT)) {
93 94 95 96 97
		pte_t *page_table = NULL;

#ifdef CONFIG_DEBUG_PAGEALLOC
		page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
#endif
I
Ingo Molnar 已提交
98
		if (!page_table) {
99 100
			page_table =
				(pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
I
Ingo Molnar 已提交
101
		}
102

103
		paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
L
Linus Torvalds 已提交
104
		set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
105
		BUG_ON(page_table != pte_offset_kernel(pmd, 0));
L
Linus Torvalds 已提交
106
	}
107

L
Linus Torvalds 已提交
108 109 110 111
	return pte_offset_kernel(pmd, 0);
}

/*
I
Ingo Molnar 已提交
112
 * This function initializes a certain range of kernel virtual memory
L
Linus Torvalds 已提交
113 114
 * with new bootmem page tables, everywhere page tables are missing in
 * the given range.
I
Ingo Molnar 已提交
115 116 117
 *
 * NOTE: The pagetables are allocated contiguous on the physical space
 * so we can cache the place of the first one and move around without
L
Linus Torvalds 已提交
118 119
 * checking the pgd every time.
 */
I
Ingo Molnar 已提交
120 121
static void __init
page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
L
Linus Torvalds 已提交
122 123 124
{
	int pgd_idx, pmd_idx;
	unsigned long vaddr;
I
Ingo Molnar 已提交
125 126
	pgd_t *pgd;
	pmd_t *pmd;
L
Linus Torvalds 已提交
127 128 129 130 131 132 133

	vaddr = start;
	pgd_idx = pgd_index(vaddr);
	pmd_idx = pmd_index(vaddr);
	pgd = pgd_base + pgd_idx;

	for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
134 135
		pmd = one_md_table_init(pgd);
		pmd = pmd + pmd_index(vaddr);
I
Ingo Molnar 已提交
136 137
		for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
							pmd++, pmd_idx++) {
138
			one_page_table_init(pmd);
L
Linus Torvalds 已提交
139 140 141 142 143 144 145 146 147 148 149 150 151 152 153

			vaddr += PMD_SIZE;
		}
		pmd_idx = 0;
	}
}

static inline int is_kernel_text(unsigned long addr)
{
	if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
		return 1;
	return 0;
}

/*
I
Ingo Molnar 已提交
154 155 156
 * This maps the physical memory to kernel virtual address space, a total
 * of max_low_pfn pages, by creating page tables starting from address
 * PAGE_OFFSET:
L
Linus Torvalds 已提交
157 158 159
 */
static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
{
I
Ingo Molnar 已提交
160
	int pgd_idx, pmd_idx, pte_ofs;
L
Linus Torvalds 已提交
161 162 163 164 165 166 167 168 169 170 171 172 173
	unsigned long pfn;
	pgd_t *pgd;
	pmd_t *pmd;
	pte_t *pte;

	pgd_idx = pgd_index(PAGE_OFFSET);
	pgd = pgd_base + pgd_idx;
	pfn = 0;

	for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
		pmd = one_md_table_init(pgd);
		if (pfn >= max_low_pfn)
			continue;
I
Ingo Molnar 已提交
174

J
Jeremy Fitzhardinge 已提交
175 176 177
		for (pmd_idx = 0;
		     pmd_idx < PTRS_PER_PMD && pfn < max_low_pfn;
		     pmd++, pmd_idx++) {
I
Ingo Molnar 已提交
178
			unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
L
Linus Torvalds 已提交
179

I
Ingo Molnar 已提交
180 181 182
			/*
			 * Map with big pages if possible, otherwise
			 * create normal page tables:
183 184 185 186 187
			 *
			 * Don't use a large page for the first 2/4MB of memory
			 * because there are often fixed size MTRRs in there
			 * and overlapping MTRRs into large pages can cause
			 * slowdowns.
I
Ingo Molnar 已提交
188
			 */
189
			if (cpu_has_pse && !(pgd_idx == 0 && pmd_idx == 0)) {
I
Ingo Molnar 已提交
190
				unsigned int addr2;
J
Jeremy Fitzhardinge 已提交
191 192
				pgprot_t prot = PAGE_KERNEL_LARGE;

I
Ingo Molnar 已提交
193
				addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
J
Jeremy Fitzhardinge 已提交
194 195
					PAGE_OFFSET + PAGE_SIZE-1;

I
Ingo Molnar 已提交
196 197
				if (is_kernel_text(addr) ||
				    is_kernel_text(addr2))
J
Jeremy Fitzhardinge 已提交
198 199 200
					prot = PAGE_KERNEL_LARGE_EXEC;

				set_pmd(pmd, pfn_pmd(pfn, prot));
201

L
Linus Torvalds 已提交
202
				pfn += PTRS_PER_PTE;
203
				max_pfn_mapped = pfn;
I
Ingo Molnar 已提交
204 205 206
				continue;
			}
			pte = one_page_table_init(pmd);
L
Linus Torvalds 已提交
207

I
Ingo Molnar 已提交
208 209 210 211
			for (pte_ofs = 0;
			     pte_ofs < PTRS_PER_PTE && pfn < max_low_pfn;
			     pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
				pgprot_t prot = PAGE_KERNEL;
J
Jeremy Fitzhardinge 已提交
212

I
Ingo Molnar 已提交
213 214
				if (is_kernel_text(addr))
					prot = PAGE_KERNEL_EXEC;
J
Jeremy Fitzhardinge 已提交
215

I
Ingo Molnar 已提交
216
				set_pte(pte, pfn_pte(pfn, prot));
L
Linus Torvalds 已提交
217
			}
218
			max_pfn_mapped = pfn;
L
Linus Torvalds 已提交
219 220 221 222 223 224 225 226 227 228 229 230 231 232 233
		}
	}
}

static inline int page_kills_ppro(unsigned long pagenr)
{
	if (pagenr >= 0x70000 && pagenr <= 0x7003F)
		return 1;
	return 0;
}

#ifdef CONFIG_HIGHMEM
pte_t *kmap_pte;
pgprot_t kmap_prot;

I
Ingo Molnar 已提交
234 235 236 237 238
static inline pte_t *kmap_get_fixmap_pte(unsigned long vaddr)
{
	return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
			vaddr), vaddr), vaddr);
}
L
Linus Torvalds 已提交
239 240 241 242 243

static void __init kmap_init(void)
{
	unsigned long kmap_vstart;

I
Ingo Molnar 已提交
244 245 246
	/*
	 * Cache the first kmap pte:
	 */
L
Linus Torvalds 已提交
247 248 249 250 251 252 253 254
	kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
	kmap_pte = kmap_get_fixmap_pte(kmap_vstart);

	kmap_prot = PAGE_KERNEL;
}

static void __init permanent_kmaps_init(pgd_t *pgd_base)
{
I
Ingo Molnar 已提交
255
	unsigned long vaddr;
L
Linus Torvalds 已提交
256 257 258 259 260 261 262 263 264 265 266 267
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;

	vaddr = PKMAP_BASE;
	page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);

	pgd = swapper_pg_dir + pgd_index(vaddr);
	pud = pud_offset(pgd, vaddr);
	pmd = pmd_offset(pud, vaddr);
	pte = pte_offset_kernel(pmd, vaddr);
I
Ingo Molnar 已提交
268
	pkmap_page_table = pte;
L
Linus Torvalds 已提交
269 270
}

271
static void __meminit free_new_highpage(struct page *page)
272
{
273
	init_page_count(page);
274 275 276 277 278
	__free_page(page);
	totalhigh_pages++;
}

void __init add_one_highpage_init(struct page *page, int pfn, int bad_ppro)
L
Linus Torvalds 已提交
279 280 281
{
	if (page_is_ram(pfn) && !(bad_ppro && page_kills_ppro(pfn))) {
		ClearPageReserved(page);
282
		free_new_highpage(page);
L
Linus Torvalds 已提交
283 284 285 286
	} else
		SetPageReserved(page);
}

I
Ingo Molnar 已提交
287 288
static int __meminit
add_one_highpage_hotplug(struct page *page, unsigned long pfn)
289 290 291 292 293 294 295
{
	free_new_highpage(page);
	totalram_pages++;
#ifdef CONFIG_FLATMEM
	max_mapnr = max(pfn, max_mapnr);
#endif
	num_physpages++;
I
Ingo Molnar 已提交
296

297 298 299 300 301 302 303
	return 0;
}

/*
 * Not currently handling the NUMA case.
 * Assuming single node and all memory that
 * has been added dynamically that would be
I
Ingo Molnar 已提交
304
 * onlined here is in HIGHMEM.
305
 */
306
void __meminit online_page(struct page *page)
307 308 309 310 311
{
	ClearPageReserved(page);
	add_one_highpage_hotplug(page, page_to_pfn(page));
}

I
Ingo Molnar 已提交
312
#ifndef CONFIG_NUMA
L
Linus Torvalds 已提交
313 314 315
static void __init set_highmem_pages_init(int bad_ppro)
{
	int pfn;
I
Ingo Molnar 已提交
316

317 318 319 320 321 322 323
	for (pfn = highstart_pfn; pfn < highend_pfn; pfn++) {
		/*
		 * Holes under sparsemem might not have no mem_map[]:
		 */
		if (pfn_valid(pfn))
			add_one_highpage_init(pfn_to_page(pfn), pfn, bad_ppro);
	}
L
Linus Torvalds 已提交
324 325
	totalram_pages += totalhigh_pages;
}
I
Ingo Molnar 已提交
326
#endif /* !CONFIG_NUMA */
L
Linus Torvalds 已提交
327 328

#else
I
Ingo Molnar 已提交
329 330 331
# define kmap_init()				do { } while (0)
# define permanent_kmaps_init(pgd_base)		do { } while (0)
# define set_highmem_pages_init(bad_ppro)	do { } while (0)
L
Linus Torvalds 已提交
332 333
#endif /* CONFIG_HIGHMEM */

334
pteval_t __PAGE_KERNEL = _PAGE_KERNEL;
335
EXPORT_SYMBOL(__PAGE_KERNEL);
L
Linus Torvalds 已提交
336

I
Ingo Molnar 已提交
337
pteval_t __PAGE_KERNEL_EXEC = _PAGE_KERNEL_EXEC;
L
Linus Torvalds 已提交
338

339
void __init native_pagetable_setup_start(pgd_t *base)
L
Linus Torvalds 已提交
340
{
341 342 343 344 345
	unsigned long pfn, va;
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;
346 347

	/*
348 349
	 * Remove any mappings which extend past the end of physical
	 * memory from the boot time page table:
350
	 */
351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367
	for (pfn = max_low_pfn + 1; pfn < 1<<(32-PAGE_SHIFT); pfn++) {
		va = PAGE_OFFSET + (pfn<<PAGE_SHIFT);
		pgd = base + pgd_index(va);
		if (!pgd_present(*pgd))
			break;

		pud = pud_offset(pgd, va);
		pmd = pmd_offset(pud, va);
		if (!pmd_present(*pmd))
			break;

		pte = pte_offset_kernel(pmd, va);
		if (!pte_present(*pte))
			break;

		pte_clear(NULL, va, pte);
	}
368
	paravirt_alloc_pmd(&init_mm, __pa(base) >> PAGE_SHIFT);
369 370 371 372 373 374 375 376 377 378 379 380
}

void __init native_pagetable_setup_done(pgd_t *base)
{
}

/*
 * Build a proper pagetable for the kernel mappings.  Up until this
 * point, we've been running on some set of pagetables constructed by
 * the boot process.
 *
 * If we're booting on native hardware, this will be a pagetable
381 382
 * constructed in arch/x86/kernel/head_32.S.  The root of the
 * pagetable will be swapper_pg_dir.
383 384 385 386 387 388 389 390 391 392 393
 *
 * If we're booting paravirtualized under a hypervisor, then there are
 * more options: we may already be running PAE, and the pagetable may
 * or may not be based in swapper_pg_dir.  In any case,
 * paravirt_pagetable_setup_start() will set up swapper_pg_dir
 * appropriately for the rest of the initialization to work.
 *
 * In general, pagetable_init() assumes that the pagetable may already
 * be partially populated, and so it avoids stomping on any existing
 * mappings.
 */
I
Ingo Molnar 已提交
394
static void __init pagetable_init(void)
395 396
{
	pgd_t *pgd_base = swapper_pg_dir;
I
Ingo Molnar 已提交
397
	unsigned long vaddr, end;
398 399

	paravirt_pagetable_setup_start(pgd_base);
L
Linus Torvalds 已提交
400 401

	/* Enable PSE if available */
402
	if (cpu_has_pse)
L
Linus Torvalds 已提交
403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418
		set_in_cr4(X86_CR4_PSE);

	/* Enable PGE if available */
	if (cpu_has_pge) {
		set_in_cr4(X86_CR4_PGE);
		__PAGE_KERNEL |= _PAGE_GLOBAL;
		__PAGE_KERNEL_EXEC |= _PAGE_GLOBAL;
	}

	kernel_physical_mapping_init(pgd_base);
	remap_numa_kva();

	/*
	 * Fixed mappings, only the page table structure has to be
	 * created - mappings will be set by set_fixmap():
	 */
419
	early_ioremap_clear();
L
Linus Torvalds 已提交
420
	vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
421 422
	end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
	page_table_range_init(vaddr, end, pgd_base);
423
	early_ioremap_reset();
L
Linus Torvalds 已提交
424 425 426

	permanent_kmaps_init(pgd_base);

427
	paravirt_pagetable_setup_done(pgd_base);
L
Linus Torvalds 已提交
428 429
}

430
#ifdef CONFIG_ACPI_SLEEP
L
Linus Torvalds 已提交
431
/*
432
 * ACPI suspend needs this for resume, because things like the intel-agp
L
Linus Torvalds 已提交
433 434
 * driver might have split up a kernel 4MB mapping.
 */
435
char swsusp_pg_dir[PAGE_SIZE]
I
Ingo Molnar 已提交
436
	__attribute__ ((aligned(PAGE_SIZE)));
L
Linus Torvalds 已提交
437 438 439 440 441

static inline void save_pg_dir(void)
{
	memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
}
442
#else /* !CONFIG_ACPI_SLEEP */
L
Linus Torvalds 已提交
443 444 445
static inline void save_pg_dir(void)
{
}
446
#endif /* !CONFIG_ACPI_SLEEP */
L
Linus Torvalds 已提交
447

I
Ingo Molnar 已提交
448
void zap_low_mappings(void)
L
Linus Torvalds 已提交
449 450 451 452 453 454 455 456 457 458 459
{
	int i;

	save_pg_dir();

	/*
	 * Zap initial low-memory mappings.
	 *
	 * Note that "pgd_clear()" doesn't do it for
	 * us, because pgd_clear() is a no-op on i386.
	 */
I
Ingo Molnar 已提交
460
	for (i = 0; i < USER_PTRS_PER_PGD; i++) {
L
Linus Torvalds 已提交
461 462 463 464 465
#ifdef CONFIG_X86_PAE
		set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page)));
#else
		set_pgd(swapper_pg_dir+i, __pgd(0));
#endif
I
Ingo Molnar 已提交
466
	}
L
Linus Torvalds 已提交
467 468 469
	flush_tlb_all();
}

I
Ingo Molnar 已提交
470
int nx_enabled;
J
Jan Beulich 已提交
471

472 473 474
pteval_t __supported_pte_mask __read_mostly = ~_PAGE_NX;
EXPORT_SYMBOL_GPL(__supported_pte_mask);

J
Jan Beulich 已提交
475 476
#ifdef CONFIG_X86_PAE

I
Ingo Molnar 已提交
477
static int disable_nx __initdata;
L
Linus Torvalds 已提交
478 479 480 481 482 483 484 485 486

/*
 * noexec = on|off
 *
 * Control non executable mappings.
 *
 * on      Enable
 * off     Disable
 */
487
static int __init noexec_setup(char *str)
L
Linus Torvalds 已提交
488
{
489 490 491 492 493
	if (!str || !strcmp(str, "on")) {
		if (cpu_has_nx) {
			__supported_pte_mask |= _PAGE_NX;
			disable_nx = 0;
		}
I
Ingo Molnar 已提交
494 495 496 497 498 499 500 501
	} else {
		if (!strcmp(str, "off")) {
			disable_nx = 1;
			__supported_pte_mask &= ~_PAGE_NX;
		} else {
			return -EINVAL;
		}
	}
502 503

	return 0;
L
Linus Torvalds 已提交
504
}
505
early_param("noexec", noexec_setup);
L
Linus Torvalds 已提交
506 507 508 509 510 511 512

static void __init set_nx(void)
{
	unsigned int v[4], l, h;

	if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
		cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
I
Ingo Molnar 已提交
513

L
Linus Torvalds 已提交
514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536
		if ((v[3] & (1 << 20)) && !disable_nx) {
			rdmsr(MSR_EFER, l, h);
			l |= EFER_NX;
			wrmsr(MSR_EFER, l, h);
			nx_enabled = 1;
			__supported_pte_mask |= _PAGE_NX;
		}
	}
}
#endif

/*
 * paging_init() sets up the page tables - note that the first 8MB are
 * already mapped by head.S.
 *
 * This routines also unmaps the page at virtual kernel address 0, so
 * that we can trap those pesky NULL-reference errors in the kernel.
 */
void __init paging_init(void)
{
#ifdef CONFIG_X86_PAE
	set_nx();
	if (nx_enabled)
537
		printk(KERN_INFO "NX (Execute Disable) protection: active\n");
L
Linus Torvalds 已提交
538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555
#endif
	pagetable_init();

	load_cr3(swapper_pg_dir);

	__flush_tlb_all();

	kmap_init();
}

/*
 * Test if the WP bit works in supervisor mode. It isn't supported on 386's
 * and also on some strange 486's (NexGen etc.). All 586+'s are OK. This
 * used to involve black magic jumps to work around some nasty CPU bugs,
 * but fortunately the switch to using exceptions got rid of all that.
 */
static void __init test_wp_bit(void)
{
556 557
	printk(KERN_INFO
  "Checking if this processor honours the WP bit even in supervisor mode...");
L
Linus Torvalds 已提交
558 559 560 561 562 563 564

	/* Any page-aligned address will do, the test is non-destructive */
	__set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY);
	boot_cpu_data.wp_works_ok = do_test_wp_bit();
	clear_fixmap(FIX_WP_TEST);

	if (!boot_cpu_data.wp_works_ok) {
565
		printk(KERN_CONT "No.\n");
L
Linus Torvalds 已提交
566
#ifdef CONFIG_X86_WP_WORKS_OK
567 568
		panic(
  "This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");
L
Linus Torvalds 已提交
569 570
#endif
	} else {
571
		printk(KERN_CONT "Ok.\n");
L
Linus Torvalds 已提交
572 573 574
	}
}

I
Ingo Molnar 已提交
575
static struct kcore_list kcore_mem, kcore_vmalloc;
L
Linus Torvalds 已提交
576 577 578 579

void __init mem_init(void)
{
	int codesize, reservedpages, datasize, initsize;
I
Ingo Molnar 已提交
580
	int tmp, bad_ppro;
L
Linus Torvalds 已提交
581

582
#ifdef CONFIG_FLATMEM
E
Eric Sesterhenn 已提交
583
	BUG_ON(!mem_map);
L
Linus Torvalds 已提交
584 585 586 587 588
#endif
	bad_ppro = ppro_with_ram_bug();

#ifdef CONFIG_HIGHMEM
	/* check that fixmap and pkmap do not overlap */
589 590 591
	if (PKMAP_BASE + LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) {
		printk(KERN_ERR
			"fixmap and kmap areas overlap - this will crash\n");
L
Linus Torvalds 已提交
592
		printk(KERN_ERR "pkstart: %lxh pkend: %lxh fixstart %lxh\n",
593 594
				PKMAP_BASE, PKMAP_BASE + LAST_PKMAP*PAGE_SIZE,
				FIXADDR_START);
L
Linus Torvalds 已提交
595 596 597 598 599 600 601 602 603
		BUG();
	}
#endif
	/* this will put all low memory onto the freelists */
	totalram_pages += free_all_bootmem();

	reservedpages = 0;
	for (tmp = 0; tmp < max_low_pfn; tmp++)
		/*
I
Ingo Molnar 已提交
604
		 * Only count reserved RAM pages:
L
Linus Torvalds 已提交
605 606 607 608 609 610 611 612 613 614
		 */
		if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
			reservedpages++;

	set_highmem_pages_init(bad_ppro);

	codesize =  (unsigned long) &_etext - (unsigned long) &_text;
	datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
	initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;

I
Ingo Molnar 已提交
615 616
	kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
	kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
L
Linus Torvalds 已提交
617 618
		   VMALLOC_END-VMALLOC_START);

I
Ingo Molnar 已提交
619 620
	printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
			"%dk reserved, %dk data, %dk init, %ldk highmem)\n",
L
Linus Torvalds 已提交
621 622 623 624 625 626 627 628 629
		(unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
		num_physpages << (PAGE_SHIFT-10),
		codesize >> 10,
		reservedpages << (PAGE_SHIFT-10),
		datasize >> 10,
		initsize >> 10,
		(unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
	       );

630
#if 1 /* double-sanity-check paranoia */
631
	printk(KERN_INFO "virtual kernel memory layout:\n"
I
Ingo Molnar 已提交
632
		"    fixmap  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
633
#ifdef CONFIG_HIGHMEM
I
Ingo Molnar 已提交
634
		"    pkmap   : 0x%08lx - 0x%08lx   (%4ld kB)\n"
635
#endif
I
Ingo Molnar 已提交
636 637 638 639 640 641 642
		"    vmalloc : 0x%08lx - 0x%08lx   (%4ld MB)\n"
		"    lowmem  : 0x%08lx - 0x%08lx   (%4ld MB)\n"
		"      .init : 0x%08lx - 0x%08lx   (%4ld kB)\n"
		"      .data : 0x%08lx - 0x%08lx   (%4ld kB)\n"
		"      .text : 0x%08lx - 0x%08lx   (%4ld kB)\n",
		FIXADDR_START, FIXADDR_TOP,
		(FIXADDR_TOP - FIXADDR_START) >> 10,
643 644

#ifdef CONFIG_HIGHMEM
I
Ingo Molnar 已提交
645 646
		PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
		(LAST_PKMAP*PAGE_SIZE) >> 10,
647 648
#endif

I
Ingo Molnar 已提交
649 650
		VMALLOC_START, VMALLOC_END,
		(VMALLOC_END - VMALLOC_START) >> 20,
651

I
Ingo Molnar 已提交
652 653
		(unsigned long)__va(0), (unsigned long)high_memory,
		((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
654

I
Ingo Molnar 已提交
655 656 657
		(unsigned long)&__init_begin, (unsigned long)&__init_end,
		((unsigned long)&__init_end -
		 (unsigned long)&__init_begin) >> 10,
658

I
Ingo Molnar 已提交
659 660
		(unsigned long)&_etext, (unsigned long)&_edata,
		((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
661

I
Ingo Molnar 已提交
662 663
		(unsigned long)&_text, (unsigned long)&_etext,
		((unsigned long)&_etext - (unsigned long)&_text) >> 10);
664 665

#ifdef CONFIG_HIGHMEM
I
Ingo Molnar 已提交
666 667
	BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE	> FIXADDR_START);
	BUG_ON(VMALLOC_END				> PKMAP_BASE);
668
#endif
I
Ingo Molnar 已提交
669 670
	BUG_ON(VMALLOC_START				> VMALLOC_END);
	BUG_ON((unsigned long)high_memory		> VMALLOC_START);
671 672
#endif /* double-sanity-check paranoia */

L
Linus Torvalds 已提交
673 674 675
	if (boot_cpu_data.wp_works_ok < 0)
		test_wp_bit();

676 677
	cpa_init();

L
Linus Torvalds 已提交
678 679 680 681 682 683 684 685 686 687 688
	/*
	 * Subtle. SMP is doing it's boot stuff late (because it has to
	 * fork idle threads) - but it also needs low mappings for the
	 * protected-mode entry to work. We zap these entries only after
	 * the WP-bit has been tested.
	 */
#ifndef CONFIG_SMP
	zap_low_mappings();
#endif
}

689
#ifdef CONFIG_MEMORY_HOTPLUG
690
int arch_add_memory(int nid, u64 start, u64 size)
691
{
692
	struct pglist_data *pgdata = NODE_DATA(nid);
693
	struct zone *zone = pgdata->node_zones + ZONE_HIGHMEM;
694 695 696 697 698
	unsigned long start_pfn = start >> PAGE_SHIFT;
	unsigned long nr_pages = size >> PAGE_SHIFT;

	return __add_pages(zone, start_pfn, nr_pages);
}
699
#endif
700

L
Linus Torvalds 已提交
701 702 703 704
/*
 * This function cannot be __init, since exceptions don't work in that
 * section.  Put this after the callers, so that it cannot be inlined.
 */
I
Ingo Molnar 已提交
705
static noinline int do_test_wp_bit(void)
L
Linus Torvalds 已提交
706 707 708 709 710
{
	char tmp_reg;
	int flag;

	__asm__ __volatile__(
I
Ingo Molnar 已提交
711 712 713
		"	movb %0, %1	\n"
		"1:	movb %1, %0	\n"
		"	xorl %2, %2	\n"
L
Linus Torvalds 已提交
714
		"2:			\n"
715
		_ASM_EXTABLE(1b,2b)
L
Linus Torvalds 已提交
716 717 718 719 720
		:"=m" (*(char *)fix_to_virt(FIX_WP_TEST)),
		 "=q" (tmp_reg),
		 "=r" (flag)
		:"2" (1)
		:"memory");
I
Ingo Molnar 已提交
721

L
Linus Torvalds 已提交
722 723 724
	return flag;
}

725
#ifdef CONFIG_DEBUG_RODATA
726 727
const int rodata_test_data = 0xC3;
EXPORT_SYMBOL_GPL(rodata_test_data);
728 729 730

void mark_rodata_ro(void)
{
731 732
	unsigned long start = PFN_ALIGN(_text);
	unsigned long size = PFN_ALIGN(_etext) - start;
733

734 735 736
	set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
	printk(KERN_INFO "Write protecting the kernel text: %luk\n",
		size >> 10);
737 738

#ifdef CONFIG_CPA_DEBUG
739 740 741
	printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n",
		start, start+size);
	set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT);
742

743 744
	printk(KERN_INFO "Testing CPA: write protecting again\n");
	set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT);
745
#endif
746 747
	start += size;
	size = (unsigned long)__end_rodata - start;
748
	set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
749 750
	printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
		size >> 10);
751
	rodata_test();
752

753
#ifdef CONFIG_CPA_DEBUG
754
	printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, start + size);
755
	set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT);
756

757
	printk(KERN_INFO "Testing CPA: write protecting again\n");
758
	set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
759
#endif
760 761 762
}
#endif

G
Gerd Hoffmann 已提交
763 764
void free_init_pages(char *what, unsigned long begin, unsigned long end)
{
I
Ingo Molnar 已提交
765 766 767 768 769 770 771 772 773 774
#ifdef CONFIG_DEBUG_PAGEALLOC
	/*
	 * If debugging page accesses then do not free this memory but
	 * mark them not present - any buggy init-section access will
	 * create a kernel page fault:
	 */
	printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n",
		begin, PAGE_ALIGN(end));
	set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
#else
I
Ingo Molnar 已提交
775 776
	unsigned long addr;

777 778 779 780 781 782 783
	/*
	 * We just marked the kernel text read only above, now that
	 * we are going to free part of that, we need to make that
	 * writeable first.
	 */
	set_memory_rw(begin, (end - begin) >> PAGE_SHIFT);

G
Gerd Hoffmann 已提交
784
	for (addr = begin; addr < end; addr += PAGE_SIZE) {
785 786 787 788
		ClearPageReserved(virt_to_page(addr));
		init_page_count(virt_to_page(addr));
		memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
		free_page(addr);
G
Gerd Hoffmann 已提交
789 790
		totalram_pages++;
	}
791
	printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
I
Ingo Molnar 已提交
792
#endif
G
Gerd Hoffmann 已提交
793 794 795 796 797
}

void free_initmem(void)
{
	free_init_pages("unused kernel memory",
798 799
			(unsigned long)(&__init_begin),
			(unsigned long)(&__init_end));
G
Gerd Hoffmann 已提交
800
}
801

L
Linus Torvalds 已提交
802 803 804
#ifdef CONFIG_BLK_DEV_INITRD
void free_initrd_mem(unsigned long start, unsigned long end)
{
805
	free_init_pages("initrd memory", start, end);
L
Linus Torvalds 已提交
806 807
}
#endif