init_32.c 19.7 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
/*
 *
 *  Copyright (C) 1995  Linus Torvalds
 *
 *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
 */

#include <linux/module.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/swap.h>
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
24
#include <linux/pfn.h>
25
#include <linux/poison.h>
L
Linus Torvalds 已提交
26 27 28
#include <linux/bootmem.h>
#include <linux/slab.h>
#include <linux/proc_fs.h>
29
#include <linux/memory_hotplug.h>
30
#include <linux/initrd.h>
31
#include <linux/cpumask.h>
L
Linus Torvalds 已提交
32

33
#include <asm/asm.h>
L
Linus Torvalds 已提交
34 35 36 37 38 39 40 41
#include <asm/processor.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/dma.h>
#include <asm/fixmap.h>
#include <asm/e820.h>
#include <asm/apic.h>
I
Ingo Molnar 已提交
42
#include <asm/bugs.h>
L
Linus Torvalds 已提交
43 44
#include <asm/tlb.h>
#include <asm/tlbflush.h>
45
#include <asm/pgalloc.h>
L
Linus Torvalds 已提交
46
#include <asm/sections.h>
47
#include <asm/paravirt.h>
48
#include <asm/setup.h>
49
#include <asm/cacheflush.h>
L
Linus Torvalds 已提交
50 51 52

unsigned int __VMALLOC_RESERVE = 128 << 20;

53
unsigned long max_pfn_mapped;
54

L
Linus Torvalds 已提交
55 56 57
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
unsigned long highstart_pfn, highend_pfn;

I
Ingo Molnar 已提交
58
static noinline int do_test_wp_bit(void);
L
Linus Torvalds 已提交
59 60 61 62 63 64 65 66 67 68

/*
 * Creates a middle page table and puts a pointer to it in the
 * given global directory entry. This only returns the gd entry
 * in non-PAE compilation mode, since the middle layer is folded.
 */
static pmd_t * __init one_md_table_init(pgd_t *pgd)
{
	pud_t *pud;
	pmd_t *pmd_table;
I
Ingo Molnar 已提交
69

L
Linus Torvalds 已提交
70
#ifdef CONFIG_X86_PAE
71 72 73
	if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
		pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);

74
		paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
75 76
		set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
		pud = pud_offset(pgd, 0);
I
Ingo Molnar 已提交
77
		BUG_ON(pmd_table != pmd_offset(pud, 0));
78 79
	}
#endif
L
Linus Torvalds 已提交
80 81
	pud = pud_offset(pgd, 0);
	pmd_table = pmd_offset(pud, 0);
I
Ingo Molnar 已提交
82

L
Linus Torvalds 已提交
83 84 85 86 87
	return pmd_table;
}

/*
 * Create a page table and place a pointer to it in a middle page
I
Ingo Molnar 已提交
88
 * directory entry:
L
Linus Torvalds 已提交
89 90 91
 */
static pte_t * __init one_page_table_init(pmd_t *pmd)
{
92
	if (!(pmd_val(*pmd) & _PAGE_PRESENT)) {
93 94 95 96 97
		pte_t *page_table = NULL;

#ifdef CONFIG_DEBUG_PAGEALLOC
		page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
#endif
I
Ingo Molnar 已提交
98
		if (!page_table) {
99 100
			page_table =
				(pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
I
Ingo Molnar 已提交
101
		}
102

103
		paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
L
Linus Torvalds 已提交
104
		set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
105
		BUG_ON(page_table != pte_offset_kernel(pmd, 0));
L
Linus Torvalds 已提交
106
	}
107

L
Linus Torvalds 已提交
108 109 110 111
	return pte_offset_kernel(pmd, 0);
}

/*
I
Ingo Molnar 已提交
112
 * This function initializes a certain range of kernel virtual memory
L
Linus Torvalds 已提交
113 114
 * with new bootmem page tables, everywhere page tables are missing in
 * the given range.
I
Ingo Molnar 已提交
115 116 117
 *
 * NOTE: The pagetables are allocated contiguous on the physical space
 * so we can cache the place of the first one and move around without
L
Linus Torvalds 已提交
118 119
 * checking the pgd every time.
 */
I
Ingo Molnar 已提交
120 121
static void __init
page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
L
Linus Torvalds 已提交
122 123 124
{
	int pgd_idx, pmd_idx;
	unsigned long vaddr;
I
Ingo Molnar 已提交
125 126
	pgd_t *pgd;
	pmd_t *pmd;
L
Linus Torvalds 已提交
127 128 129 130 131 132 133

	vaddr = start;
	pgd_idx = pgd_index(vaddr);
	pmd_idx = pmd_index(vaddr);
	pgd = pgd_base + pgd_idx;

	for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
134 135
		pmd = one_md_table_init(pgd);
		pmd = pmd + pmd_index(vaddr);
I
Ingo Molnar 已提交
136 137
		for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
							pmd++, pmd_idx++) {
138
			one_page_table_init(pmd);
L
Linus Torvalds 已提交
139 140 141 142 143 144 145 146 147 148 149 150 151 152 153

			vaddr += PMD_SIZE;
		}
		pmd_idx = 0;
	}
}

static inline int is_kernel_text(unsigned long addr)
{
	if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
		return 1;
	return 0;
}

/*
I
Ingo Molnar 已提交
154 155 156
 * This maps the physical memory to kernel virtual address space, a total
 * of max_low_pfn pages, by creating page tables starting from address
 * PAGE_OFFSET:
L
Linus Torvalds 已提交
157 158 159
 */
static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
{
I
Ingo Molnar 已提交
160
	int pgd_idx, pmd_idx, pte_ofs;
L
Linus Torvalds 已提交
161 162 163 164 165 166 167 168 169 170 171 172 173
	unsigned long pfn;
	pgd_t *pgd;
	pmd_t *pmd;
	pte_t *pte;

	pgd_idx = pgd_index(PAGE_OFFSET);
	pgd = pgd_base + pgd_idx;
	pfn = 0;

	for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
		pmd = one_md_table_init(pgd);
		if (pfn >= max_low_pfn)
			continue;
I
Ingo Molnar 已提交
174

J
Jeremy Fitzhardinge 已提交
175 176 177
		for (pmd_idx = 0;
		     pmd_idx < PTRS_PER_PMD && pfn < max_low_pfn;
		     pmd++, pmd_idx++) {
I
Ingo Molnar 已提交
178
			unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
L
Linus Torvalds 已提交
179

I
Ingo Molnar 已提交
180 181 182
			/*
			 * Map with big pages if possible, otherwise
			 * create normal page tables:
183 184 185 186 187
			 *
			 * Don't use a large page for the first 2/4MB of memory
			 * because there are often fixed size MTRRs in there
			 * and overlapping MTRRs into large pages can cause
			 * slowdowns.
I
Ingo Molnar 已提交
188
			 */
189
			if (cpu_has_pse && !(pgd_idx == 0 && pmd_idx == 0)) {
I
Ingo Molnar 已提交
190
				unsigned int addr2;
J
Jeremy Fitzhardinge 已提交
191 192
				pgprot_t prot = PAGE_KERNEL_LARGE;

I
Ingo Molnar 已提交
193
				addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
J
Jeremy Fitzhardinge 已提交
194 195
					PAGE_OFFSET + PAGE_SIZE-1;

I
Ingo Molnar 已提交
196 197
				if (is_kernel_text(addr) ||
				    is_kernel_text(addr2))
J
Jeremy Fitzhardinge 已提交
198 199 200
					prot = PAGE_KERNEL_LARGE_EXEC;

				set_pmd(pmd, pfn_pmd(pfn, prot));
201

L
Linus Torvalds 已提交
202
				pfn += PTRS_PER_PTE;
203
				max_pfn_mapped = pfn;
I
Ingo Molnar 已提交
204 205 206
				continue;
			}
			pte = one_page_table_init(pmd);
L
Linus Torvalds 已提交
207

I
Ingo Molnar 已提交
208 209 210 211
			for (pte_ofs = 0;
			     pte_ofs < PTRS_PER_PTE && pfn < max_low_pfn;
			     pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
				pgprot_t prot = PAGE_KERNEL;
J
Jeremy Fitzhardinge 已提交
212

I
Ingo Molnar 已提交
213 214
				if (is_kernel_text(addr))
					prot = PAGE_KERNEL_EXEC;
J
Jeremy Fitzhardinge 已提交
215

I
Ingo Molnar 已提交
216
				set_pte(pte, pfn_pte(pfn, prot));
L
Linus Torvalds 已提交
217
			}
218
			max_pfn_mapped = pfn;
L
Linus Torvalds 已提交
219 220 221 222 223 224 225 226 227 228 229
		}
	}
}

static inline int page_kills_ppro(unsigned long pagenr)
{
	if (pagenr >= 0x70000 && pagenr <= 0x7003F)
		return 1;
	return 0;
}

230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248
/*
 * devmem_is_allowed() checks to see if /dev/mem access to a certain address
 * is valid. The argument is a physical page number.
 *
 *
 * On x86, access has to be given to the first megabyte of ram because that area
 * contains bios code and data regions used by X and dosemu and similar apps.
 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
 * mmio resources as well as potential bios/acpi data regions.
 */
int devmem_is_allowed(unsigned long pagenr)
{
	if (pagenr <= 256)
		return 1;
	if (!page_is_ram(pagenr))
		return 1;
	return 0;
}

L
Linus Torvalds 已提交
249 250 251 252
#ifdef CONFIG_HIGHMEM
pte_t *kmap_pte;
pgprot_t kmap_prot;

I
Ingo Molnar 已提交
253 254 255 256 257
static inline pte_t *kmap_get_fixmap_pte(unsigned long vaddr)
{
	return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
			vaddr), vaddr), vaddr);
}
L
Linus Torvalds 已提交
258 259 260 261 262

static void __init kmap_init(void)
{
	unsigned long kmap_vstart;

I
Ingo Molnar 已提交
263 264 265
	/*
	 * Cache the first kmap pte:
	 */
L
Linus Torvalds 已提交
266 267 268 269 270 271 272 273
	kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
	kmap_pte = kmap_get_fixmap_pte(kmap_vstart);

	kmap_prot = PAGE_KERNEL;
}

static void __init permanent_kmaps_init(pgd_t *pgd_base)
{
I
Ingo Molnar 已提交
274
	unsigned long vaddr;
L
Linus Torvalds 已提交
275 276 277 278 279 280 281 282 283 284 285 286
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;

	vaddr = PKMAP_BASE;
	page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);

	pgd = swapper_pg_dir + pgd_index(vaddr);
	pud = pud_offset(pgd, vaddr);
	pmd = pmd_offset(pud, vaddr);
	pte = pte_offset_kernel(pmd, vaddr);
I
Ingo Molnar 已提交
287
	pkmap_page_table = pte;
L
Linus Torvalds 已提交
288 289
}

290
void __init add_one_highpage_init(struct page *page, int pfn, int bad_ppro)
L
Linus Torvalds 已提交
291 292 293
{
	if (page_is_ram(pfn) && !(bad_ppro && page_kills_ppro(pfn))) {
		ClearPageReserved(page);
294 295 296
		init_page_count(page);
		__free_page(page);
		totalhigh_pages++;
L
Linus Torvalds 已提交
297 298 299 300
	} else
		SetPageReserved(page);
}

I
Ingo Molnar 已提交
301
#ifndef CONFIG_NUMA
L
Linus Torvalds 已提交
302 303 304
static void __init set_highmem_pages_init(int bad_ppro)
{
	int pfn;
I
Ingo Molnar 已提交
305

306 307 308 309 310 311 312
	for (pfn = highstart_pfn; pfn < highend_pfn; pfn++) {
		/*
		 * Holes under sparsemem might not have no mem_map[]:
		 */
		if (pfn_valid(pfn))
			add_one_highpage_init(pfn_to_page(pfn), pfn, bad_ppro);
	}
L
Linus Torvalds 已提交
313 314
	totalram_pages += totalhigh_pages;
}
I
Ingo Molnar 已提交
315
#endif /* !CONFIG_NUMA */
L
Linus Torvalds 已提交
316 317

#else
I
Ingo Molnar 已提交
318 319 320
# define kmap_init()				do { } while (0)
# define permanent_kmaps_init(pgd_base)		do { } while (0)
# define set_highmem_pages_init(bad_ppro)	do { } while (0)
L
Linus Torvalds 已提交
321 322
#endif /* CONFIG_HIGHMEM */

323
pteval_t __PAGE_KERNEL = _PAGE_KERNEL;
324
EXPORT_SYMBOL(__PAGE_KERNEL);
L
Linus Torvalds 已提交
325

I
Ingo Molnar 已提交
326
pteval_t __PAGE_KERNEL_EXEC = _PAGE_KERNEL_EXEC;
L
Linus Torvalds 已提交
327

328
void __init native_pagetable_setup_start(pgd_t *base)
L
Linus Torvalds 已提交
329
{
330 331 332 333 334
	unsigned long pfn, va;
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;
335 336

	/*
337 338
	 * Remove any mappings which extend past the end of physical
	 * memory from the boot time page table:
339
	 */
340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356
	for (pfn = max_low_pfn + 1; pfn < 1<<(32-PAGE_SHIFT); pfn++) {
		va = PAGE_OFFSET + (pfn<<PAGE_SHIFT);
		pgd = base + pgd_index(va);
		if (!pgd_present(*pgd))
			break;

		pud = pud_offset(pgd, va);
		pmd = pmd_offset(pud, va);
		if (!pmd_present(*pmd))
			break;

		pte = pte_offset_kernel(pmd, va);
		if (!pte_present(*pte))
			break;

		pte_clear(NULL, va, pte);
	}
357
	paravirt_alloc_pmd(&init_mm, __pa(base) >> PAGE_SHIFT);
358 359 360 361 362 363 364 365 366 367 368 369
}

void __init native_pagetable_setup_done(pgd_t *base)
{
}

/*
 * Build a proper pagetable for the kernel mappings.  Up until this
 * point, we've been running on some set of pagetables constructed by
 * the boot process.
 *
 * If we're booting on native hardware, this will be a pagetable
370 371
 * constructed in arch/x86/kernel/head_32.S.  The root of the
 * pagetable will be swapper_pg_dir.
372 373 374 375 376 377 378 379 380 381 382
 *
 * If we're booting paravirtualized under a hypervisor, then there are
 * more options: we may already be running PAE, and the pagetable may
 * or may not be based in swapper_pg_dir.  In any case,
 * paravirt_pagetable_setup_start() will set up swapper_pg_dir
 * appropriately for the rest of the initialization to work.
 *
 * In general, pagetable_init() assumes that the pagetable may already
 * be partially populated, and so it avoids stomping on any existing
 * mappings.
 */
I
Ingo Molnar 已提交
383
static void __init pagetable_init(void)
384 385
{
	pgd_t *pgd_base = swapper_pg_dir;
I
Ingo Molnar 已提交
386
	unsigned long vaddr, end;
387 388

	paravirt_pagetable_setup_start(pgd_base);
L
Linus Torvalds 已提交
389 390

	/* Enable PSE if available */
391
	if (cpu_has_pse)
L
Linus Torvalds 已提交
392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407
		set_in_cr4(X86_CR4_PSE);

	/* Enable PGE if available */
	if (cpu_has_pge) {
		set_in_cr4(X86_CR4_PGE);
		__PAGE_KERNEL |= _PAGE_GLOBAL;
		__PAGE_KERNEL_EXEC |= _PAGE_GLOBAL;
	}

	kernel_physical_mapping_init(pgd_base);
	remap_numa_kva();

	/*
	 * Fixed mappings, only the page table structure has to be
	 * created - mappings will be set by set_fixmap():
	 */
408
	early_ioremap_clear();
L
Linus Torvalds 已提交
409
	vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
410 411
	end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
	page_table_range_init(vaddr, end, pgd_base);
412
	early_ioremap_reset();
L
Linus Torvalds 已提交
413 414 415

	permanent_kmaps_init(pgd_base);

416
	paravirt_pagetable_setup_done(pgd_base);
L
Linus Torvalds 已提交
417 418
}

419
#ifdef CONFIG_ACPI_SLEEP
L
Linus Torvalds 已提交
420
/*
421
 * ACPI suspend needs this for resume, because things like the intel-agp
L
Linus Torvalds 已提交
422 423
 * driver might have split up a kernel 4MB mapping.
 */
424
char swsusp_pg_dir[PAGE_SIZE]
I
Ingo Molnar 已提交
425
	__attribute__ ((aligned(PAGE_SIZE)));
L
Linus Torvalds 已提交
426 427 428 429 430

static inline void save_pg_dir(void)
{
	memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
}
431
#else /* !CONFIG_ACPI_SLEEP */
L
Linus Torvalds 已提交
432 433 434
static inline void save_pg_dir(void)
{
}
435
#endif /* !CONFIG_ACPI_SLEEP */
L
Linus Torvalds 已提交
436

I
Ingo Molnar 已提交
437
void zap_low_mappings(void)
L
Linus Torvalds 已提交
438 439 440 441 442 443 444 445 446 447 448
{
	int i;

	save_pg_dir();

	/*
	 * Zap initial low-memory mappings.
	 *
	 * Note that "pgd_clear()" doesn't do it for
	 * us, because pgd_clear() is a no-op on i386.
	 */
J
Jeremy Fitzhardinge 已提交
449
	for (i = 0; i < KERNEL_PGD_BOUNDARY; i++) {
L
Linus Torvalds 已提交
450 451 452 453 454
#ifdef CONFIG_X86_PAE
		set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page)));
#else
		set_pgd(swapper_pg_dir+i, __pgd(0));
#endif
I
Ingo Molnar 已提交
455
	}
L
Linus Torvalds 已提交
456 457 458
	flush_tlb_all();
}

I
Ingo Molnar 已提交
459
int nx_enabled;
J
Jan Beulich 已提交
460

461 462 463
pteval_t __supported_pte_mask __read_mostly = ~_PAGE_NX;
EXPORT_SYMBOL_GPL(__supported_pte_mask);

J
Jan Beulich 已提交
464 465
#ifdef CONFIG_X86_PAE

I
Ingo Molnar 已提交
466
static int disable_nx __initdata;
L
Linus Torvalds 已提交
467 468 469 470 471 472 473 474 475

/*
 * noexec = on|off
 *
 * Control non executable mappings.
 *
 * on      Enable
 * off     Disable
 */
476
static int __init noexec_setup(char *str)
L
Linus Torvalds 已提交
477
{
478 479 480 481 482
	if (!str || !strcmp(str, "on")) {
		if (cpu_has_nx) {
			__supported_pte_mask |= _PAGE_NX;
			disable_nx = 0;
		}
I
Ingo Molnar 已提交
483 484 485 486 487 488 489 490
	} else {
		if (!strcmp(str, "off")) {
			disable_nx = 1;
			__supported_pte_mask &= ~_PAGE_NX;
		} else {
			return -EINVAL;
		}
	}
491 492

	return 0;
L
Linus Torvalds 已提交
493
}
494
early_param("noexec", noexec_setup);
L
Linus Torvalds 已提交
495 496 497 498 499 500 501

static void __init set_nx(void)
{
	unsigned int v[4], l, h;

	if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
		cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
I
Ingo Molnar 已提交
502

L
Linus Torvalds 已提交
503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525
		if ((v[3] & (1 << 20)) && !disable_nx) {
			rdmsr(MSR_EFER, l, h);
			l |= EFER_NX;
			wrmsr(MSR_EFER, l, h);
			nx_enabled = 1;
			__supported_pte_mask |= _PAGE_NX;
		}
	}
}
#endif

/*
 * paging_init() sets up the page tables - note that the first 8MB are
 * already mapped by head.S.
 *
 * This routines also unmaps the page at virtual kernel address 0, so
 * that we can trap those pesky NULL-reference errors in the kernel.
 */
void __init paging_init(void)
{
#ifdef CONFIG_X86_PAE
	set_nx();
	if (nx_enabled)
526
		printk(KERN_INFO "NX (Execute Disable) protection: active\n");
L
Linus Torvalds 已提交
527 528 529 530 531 532 533 534 535 536 537 538
#endif
	pagetable_init();

	load_cr3(swapper_pg_dir);

	__flush_tlb_all();

	kmap_init();
}

/*
 * Test if the WP bit works in supervisor mode. It isn't supported on 386's
D
Dmitri Vorobiev 已提交
539 540 541
 * and also on some strange 486's. All 586+'s are OK. This used to involve
 * black magic jumps to work around some nasty CPU bugs, but fortunately the
 * switch to using exceptions got rid of all that.
L
Linus Torvalds 已提交
542 543 544
 */
static void __init test_wp_bit(void)
{
545 546
	printk(KERN_INFO
  "Checking if this processor honours the WP bit even in supervisor mode...");
L
Linus Torvalds 已提交
547 548 549 550 551 552 553

	/* Any page-aligned address will do, the test is non-destructive */
	__set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY);
	boot_cpu_data.wp_works_ok = do_test_wp_bit();
	clear_fixmap(FIX_WP_TEST);

	if (!boot_cpu_data.wp_works_ok) {
554
		printk(KERN_CONT "No.\n");
L
Linus Torvalds 已提交
555
#ifdef CONFIG_X86_WP_WORKS_OK
556 557
		panic(
  "This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");
L
Linus Torvalds 已提交
558 559
#endif
	} else {
560
		printk(KERN_CONT "Ok.\n");
L
Linus Torvalds 已提交
561 562 563
	}
}

I
Ingo Molnar 已提交
564
static struct kcore_list kcore_mem, kcore_vmalloc;
L
Linus Torvalds 已提交
565 566 567 568

void __init mem_init(void)
{
	int codesize, reservedpages, datasize, initsize;
I
Ingo Molnar 已提交
569
	int tmp, bad_ppro;
L
Linus Torvalds 已提交
570

571
#ifdef CONFIG_FLATMEM
E
Eric Sesterhenn 已提交
572
	BUG_ON(!mem_map);
L
Linus Torvalds 已提交
573 574 575 576 577
#endif
	bad_ppro = ppro_with_ram_bug();

#ifdef CONFIG_HIGHMEM
	/* check that fixmap and pkmap do not overlap */
578 579 580
	if (PKMAP_BASE + LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) {
		printk(KERN_ERR
			"fixmap and kmap areas overlap - this will crash\n");
L
Linus Torvalds 已提交
581
		printk(KERN_ERR "pkstart: %lxh pkend: %lxh fixstart %lxh\n",
582 583
				PKMAP_BASE, PKMAP_BASE + LAST_PKMAP*PAGE_SIZE,
				FIXADDR_START);
L
Linus Torvalds 已提交
584 585 586 587 588 589 590 591 592
		BUG();
	}
#endif
	/* this will put all low memory onto the freelists */
	totalram_pages += free_all_bootmem();

	reservedpages = 0;
	for (tmp = 0; tmp < max_low_pfn; tmp++)
		/*
I
Ingo Molnar 已提交
593
		 * Only count reserved RAM pages:
L
Linus Torvalds 已提交
594 595 596 597 598 599 600 601 602 603
		 */
		if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
			reservedpages++;

	set_highmem_pages_init(bad_ppro);

	codesize =  (unsigned long) &_etext - (unsigned long) &_text;
	datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
	initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;

I
Ingo Molnar 已提交
604 605
	kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
	kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
L
Linus Torvalds 已提交
606 607
		   VMALLOC_END-VMALLOC_START);

I
Ingo Molnar 已提交
608 609
	printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
			"%dk reserved, %dk data, %dk init, %ldk highmem)\n",
L
Linus Torvalds 已提交
610 611 612 613 614 615 616 617 618
		(unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
		num_physpages << (PAGE_SHIFT-10),
		codesize >> 10,
		reservedpages << (PAGE_SHIFT-10),
		datasize >> 10,
		initsize >> 10,
		(unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
	       );

619
#if 1 /* double-sanity-check paranoia */
620
	printk(KERN_INFO "virtual kernel memory layout:\n"
I
Ingo Molnar 已提交
621
		"    fixmap  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
622
#ifdef CONFIG_HIGHMEM
I
Ingo Molnar 已提交
623
		"    pkmap   : 0x%08lx - 0x%08lx   (%4ld kB)\n"
624
#endif
I
Ingo Molnar 已提交
625 626 627 628 629 630 631
		"    vmalloc : 0x%08lx - 0x%08lx   (%4ld MB)\n"
		"    lowmem  : 0x%08lx - 0x%08lx   (%4ld MB)\n"
		"      .init : 0x%08lx - 0x%08lx   (%4ld kB)\n"
		"      .data : 0x%08lx - 0x%08lx   (%4ld kB)\n"
		"      .text : 0x%08lx - 0x%08lx   (%4ld kB)\n",
		FIXADDR_START, FIXADDR_TOP,
		(FIXADDR_TOP - FIXADDR_START) >> 10,
632 633

#ifdef CONFIG_HIGHMEM
I
Ingo Molnar 已提交
634 635
		PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
		(LAST_PKMAP*PAGE_SIZE) >> 10,
636 637
#endif

I
Ingo Molnar 已提交
638 639
		VMALLOC_START, VMALLOC_END,
		(VMALLOC_END - VMALLOC_START) >> 20,
640

I
Ingo Molnar 已提交
641 642
		(unsigned long)__va(0), (unsigned long)high_memory,
		((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
643

I
Ingo Molnar 已提交
644 645 646
		(unsigned long)&__init_begin, (unsigned long)&__init_end,
		((unsigned long)&__init_end -
		 (unsigned long)&__init_begin) >> 10,
647

I
Ingo Molnar 已提交
648 649
		(unsigned long)&_etext, (unsigned long)&_edata,
		((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
650

I
Ingo Molnar 已提交
651 652
		(unsigned long)&_text, (unsigned long)&_etext,
		((unsigned long)&_etext - (unsigned long)&_text) >> 10);
653 654

#ifdef CONFIG_HIGHMEM
I
Ingo Molnar 已提交
655 656
	BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE	> FIXADDR_START);
	BUG_ON(VMALLOC_END				> PKMAP_BASE);
657
#endif
I
Ingo Molnar 已提交
658 659
	BUG_ON(VMALLOC_START				> VMALLOC_END);
	BUG_ON((unsigned long)high_memory		> VMALLOC_START);
660 661
#endif /* double-sanity-check paranoia */

L
Linus Torvalds 已提交
662 663 664
	if (boot_cpu_data.wp_works_ok < 0)
		test_wp_bit();

665 666
	cpa_init();

L
Linus Torvalds 已提交
667 668 669 670 671 672 673 674 675 676 677
	/*
	 * Subtle. SMP is doing it's boot stuff late (because it has to
	 * fork idle threads) - but it also needs low mappings for the
	 * protected-mode entry to work. We zap these entries only after
	 * the WP-bit has been tested.
	 */
#ifndef CONFIG_SMP
	zap_low_mappings();
#endif
}

678
#ifdef CONFIG_MEMORY_HOTPLUG
679
int arch_add_memory(int nid, u64 start, u64 size)
680
{
681
	struct pglist_data *pgdata = NODE_DATA(nid);
682
	struct zone *zone = pgdata->node_zones + ZONE_HIGHMEM;
683 684 685 686 687
	unsigned long start_pfn = start >> PAGE_SHIFT;
	unsigned long nr_pages = size >> PAGE_SHIFT;

	return __add_pages(zone, start_pfn, nr_pages);
}
688
#endif
689

L
Linus Torvalds 已提交
690 691 692 693
/*
 * This function cannot be __init, since exceptions don't work in that
 * section.  Put this after the callers, so that it cannot be inlined.
 */
I
Ingo Molnar 已提交
694
static noinline int do_test_wp_bit(void)
L
Linus Torvalds 已提交
695 696 697 698 699
{
	char tmp_reg;
	int flag;

	__asm__ __volatile__(
I
Ingo Molnar 已提交
700 701 702
		"	movb %0, %1	\n"
		"1:	movb %1, %0	\n"
		"	xorl %2, %2	\n"
L
Linus Torvalds 已提交
703
		"2:			\n"
704
		_ASM_EXTABLE(1b,2b)
L
Linus Torvalds 已提交
705 706 707 708 709
		:"=m" (*(char *)fix_to_virt(FIX_WP_TEST)),
		 "=q" (tmp_reg),
		 "=r" (flag)
		:"2" (1)
		:"memory");
I
Ingo Molnar 已提交
710

L
Linus Torvalds 已提交
711 712 713
	return flag;
}

714
#ifdef CONFIG_DEBUG_RODATA
715 716
const int rodata_test_data = 0xC3;
EXPORT_SYMBOL_GPL(rodata_test_data);
717 718 719

void mark_rodata_ro(void)
{
720 721
	unsigned long start = PFN_ALIGN(_text);
	unsigned long size = PFN_ALIGN(_etext) - start;
722

723 724 725
	set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
	printk(KERN_INFO "Write protecting the kernel text: %luk\n",
		size >> 10);
726 727

#ifdef CONFIG_CPA_DEBUG
728 729 730
	printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n",
		start, start+size);
	set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT);
731

732 733
	printk(KERN_INFO "Testing CPA: write protecting again\n");
	set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT);
734
#endif
735 736
	start += size;
	size = (unsigned long)__end_rodata - start;
737
	set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
738 739
	printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
		size >> 10);
740
	rodata_test();
741

742
#ifdef CONFIG_CPA_DEBUG
743
	printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, start + size);
744
	set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT);
745

746
	printk(KERN_INFO "Testing CPA: write protecting again\n");
747
	set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
748
#endif
749 750 751
}
#endif

G
Gerd Hoffmann 已提交
752 753
void free_init_pages(char *what, unsigned long begin, unsigned long end)
{
I
Ingo Molnar 已提交
754 755 756 757 758 759 760 761 762 763
#ifdef CONFIG_DEBUG_PAGEALLOC
	/*
	 * If debugging page accesses then do not free this memory but
	 * mark them not present - any buggy init-section access will
	 * create a kernel page fault:
	 */
	printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n",
		begin, PAGE_ALIGN(end));
	set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
#else
I
Ingo Molnar 已提交
764 765
	unsigned long addr;

766 767 768 769 770 771 772
	/*
	 * We just marked the kernel text read only above, now that
	 * we are going to free part of that, we need to make that
	 * writeable first.
	 */
	set_memory_rw(begin, (end - begin) >> PAGE_SHIFT);

G
Gerd Hoffmann 已提交
773
	for (addr = begin; addr < end; addr += PAGE_SIZE) {
774 775 776 777
		ClearPageReserved(virt_to_page(addr));
		init_page_count(virt_to_page(addr));
		memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
		free_page(addr);
G
Gerd Hoffmann 已提交
778 779
		totalram_pages++;
	}
780
	printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
I
Ingo Molnar 已提交
781
#endif
G
Gerd Hoffmann 已提交
782 783 784 785 786
}

void free_initmem(void)
{
	free_init_pages("unused kernel memory",
787 788
			(unsigned long)(&__init_begin),
			(unsigned long)(&__init_end));
G
Gerd Hoffmann 已提交
789
}
790

L
Linus Torvalds 已提交
791 792 793
#ifdef CONFIG_BLK_DEV_INITRD
void free_initrd_mem(unsigned long start, unsigned long end)
{
794
	free_init_pages("initrd memory", start, end);
L
Linus Torvalds 已提交
795 796
}
#endif