init_32.c 20.5 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
/*
 *
 *  Copyright (C) 1995  Linus Torvalds
 *
 *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
 */

#include <linux/module.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/swap.h>
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
24
#include <linux/pfn.h>
25
#include <linux/poison.h>
L
Linus Torvalds 已提交
26 27 28
#include <linux/bootmem.h>
#include <linux/slab.h>
#include <linux/proc_fs.h>
29
#include <linux/memory_hotplug.h>
30
#include <linux/initrd.h>
31
#include <linux/cpumask.h>
L
Linus Torvalds 已提交
32

33
#include <asm/asm.h>
L
Linus Torvalds 已提交
34 35 36 37 38 39 40 41
#include <asm/processor.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/dma.h>
#include <asm/fixmap.h>
#include <asm/e820.h>
#include <asm/apic.h>
I
Ingo Molnar 已提交
42
#include <asm/bugs.h>
L
Linus Torvalds 已提交
43 44
#include <asm/tlb.h>
#include <asm/tlbflush.h>
45
#include <asm/pgalloc.h>
L
Linus Torvalds 已提交
46
#include <asm/sections.h>
47
#include <asm/paravirt.h>
48
#include <asm/setup.h>
49
#include <asm/cacheflush.h>
L
Linus Torvalds 已提交
50 51 52

unsigned int __VMALLOC_RESERVE = 128 << 20;

53
unsigned long max_pfn_mapped;
54

L
Linus Torvalds 已提交
55 56 57
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
unsigned long highstart_pfn, highend_pfn;

I
Ingo Molnar 已提交
58
static noinline int do_test_wp_bit(void);
L
Linus Torvalds 已提交
59 60 61 62 63 64 65 66 67 68

/*
 * Creates a middle page table and puts a pointer to it in the
 * given global directory entry. This only returns the gd entry
 * in non-PAE compilation mode, since the middle layer is folded.
 */
static pmd_t * __init one_md_table_init(pgd_t *pgd)
{
	pud_t *pud;
	pmd_t *pmd_table;
I
Ingo Molnar 已提交
69

L
Linus Torvalds 已提交
70
#ifdef CONFIG_X86_PAE
71 72 73
	if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
		pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);

74
		paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
75 76
		set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
		pud = pud_offset(pgd, 0);
I
Ingo Molnar 已提交
77
		BUG_ON(pmd_table != pmd_offset(pud, 0));
78 79
	}
#endif
L
Linus Torvalds 已提交
80 81
	pud = pud_offset(pgd, 0);
	pmd_table = pmd_offset(pud, 0);
I
Ingo Molnar 已提交
82

L
Linus Torvalds 已提交
83 84 85 86 87
	return pmd_table;
}

/*
 * Create a page table and place a pointer to it in a middle page
I
Ingo Molnar 已提交
88
 * directory entry:
L
Linus Torvalds 已提交
89 90 91
 */
static pte_t * __init one_page_table_init(pmd_t *pmd)
{
92
	if (!(pmd_val(*pmd) & _PAGE_PRESENT)) {
93 94 95 96 97
		pte_t *page_table = NULL;

#ifdef CONFIG_DEBUG_PAGEALLOC
		page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
#endif
I
Ingo Molnar 已提交
98
		if (!page_table) {
99 100
			page_table =
				(pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
I
Ingo Molnar 已提交
101
		}
102

103
		paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
L
Linus Torvalds 已提交
104
		set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
105
		BUG_ON(page_table != pte_offset_kernel(pmd, 0));
L
Linus Torvalds 已提交
106
	}
107

L
Linus Torvalds 已提交
108 109 110 111
	return pte_offset_kernel(pmd, 0);
}

/*
I
Ingo Molnar 已提交
112
 * This function initializes a certain range of kernel virtual memory
L
Linus Torvalds 已提交
113 114
 * with new bootmem page tables, everywhere page tables are missing in
 * the given range.
I
Ingo Molnar 已提交
115 116 117
 *
 * NOTE: The pagetables are allocated contiguous on the physical space
 * so we can cache the place of the first one and move around without
L
Linus Torvalds 已提交
118 119
 * checking the pgd every time.
 */
I
Ingo Molnar 已提交
120 121
static void __init
page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
L
Linus Torvalds 已提交
122 123 124
{
	int pgd_idx, pmd_idx;
	unsigned long vaddr;
I
Ingo Molnar 已提交
125 126
	pgd_t *pgd;
	pmd_t *pmd;
L
Linus Torvalds 已提交
127 128 129 130 131 132 133

	vaddr = start;
	pgd_idx = pgd_index(vaddr);
	pmd_idx = pmd_index(vaddr);
	pgd = pgd_base + pgd_idx;

	for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
134 135
		pmd = one_md_table_init(pgd);
		pmd = pmd + pmd_index(vaddr);
I
Ingo Molnar 已提交
136 137
		for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
							pmd++, pmd_idx++) {
138
			one_page_table_init(pmd);
L
Linus Torvalds 已提交
139 140 141 142 143 144 145 146 147 148 149 150 151 152 153

			vaddr += PMD_SIZE;
		}
		pmd_idx = 0;
	}
}

static inline int is_kernel_text(unsigned long addr)
{
	if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
		return 1;
	return 0;
}

/*
I
Ingo Molnar 已提交
154 155 156
 * This maps the physical memory to kernel virtual address space, a total
 * of max_low_pfn pages, by creating page tables starting from address
 * PAGE_OFFSET:
L
Linus Torvalds 已提交
157 158 159
 */
static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
{
I
Ingo Molnar 已提交
160
	int pgd_idx, pmd_idx, pte_ofs;
L
Linus Torvalds 已提交
161 162 163 164 165 166 167 168 169 170 171 172 173
	unsigned long pfn;
	pgd_t *pgd;
	pmd_t *pmd;
	pte_t *pte;

	pgd_idx = pgd_index(PAGE_OFFSET);
	pgd = pgd_base + pgd_idx;
	pfn = 0;

	for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
		pmd = one_md_table_init(pgd);
		if (pfn >= max_low_pfn)
			continue;
I
Ingo Molnar 已提交
174

J
Jeremy Fitzhardinge 已提交
175 176 177
		for (pmd_idx = 0;
		     pmd_idx < PTRS_PER_PMD && pfn < max_low_pfn;
		     pmd++, pmd_idx++) {
I
Ingo Molnar 已提交
178
			unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
L
Linus Torvalds 已提交
179

I
Ingo Molnar 已提交
180 181 182
			/*
			 * Map with big pages if possible, otherwise
			 * create normal page tables:
183 184 185 186 187
			 *
			 * Don't use a large page for the first 2/4MB of memory
			 * because there are often fixed size MTRRs in there
			 * and overlapping MTRRs into large pages can cause
			 * slowdowns.
I
Ingo Molnar 已提交
188
			 */
189
			if (cpu_has_pse && !(pgd_idx == 0 && pmd_idx == 0)) {
I
Ingo Molnar 已提交
190
				unsigned int addr2;
J
Jeremy Fitzhardinge 已提交
191 192
				pgprot_t prot = PAGE_KERNEL_LARGE;

I
Ingo Molnar 已提交
193
				addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
J
Jeremy Fitzhardinge 已提交
194 195
					PAGE_OFFSET + PAGE_SIZE-1;

I
Ingo Molnar 已提交
196 197
				if (is_kernel_text(addr) ||
				    is_kernel_text(addr2))
J
Jeremy Fitzhardinge 已提交
198 199 200
					prot = PAGE_KERNEL_LARGE_EXEC;

				set_pmd(pmd, pfn_pmd(pfn, prot));
201

L
Linus Torvalds 已提交
202
				pfn += PTRS_PER_PTE;
203
				max_pfn_mapped = pfn;
I
Ingo Molnar 已提交
204 205 206
				continue;
			}
			pte = one_page_table_init(pmd);
L
Linus Torvalds 已提交
207

I
Ingo Molnar 已提交
208 209 210 211
			for (pte_ofs = 0;
			     pte_ofs < PTRS_PER_PTE && pfn < max_low_pfn;
			     pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
				pgprot_t prot = PAGE_KERNEL;
J
Jeremy Fitzhardinge 已提交
212

I
Ingo Molnar 已提交
213 214
				if (is_kernel_text(addr))
					prot = PAGE_KERNEL_EXEC;
J
Jeremy Fitzhardinge 已提交
215

I
Ingo Molnar 已提交
216
				set_pte(pte, pfn_pte(pfn, prot));
L
Linus Torvalds 已提交
217
			}
218
			max_pfn_mapped = pfn;
L
Linus Torvalds 已提交
219 220 221 222 223 224 225 226 227 228 229
		}
	}
}

static inline int page_kills_ppro(unsigned long pagenr)
{
	if (pagenr >= 0x70000 && pagenr <= 0x7003F)
		return 1;
	return 0;
}

230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248
/*
 * devmem_is_allowed() checks to see if /dev/mem access to a certain address
 * is valid. The argument is a physical page number.
 *
 *
 * On x86, access has to be given to the first megabyte of ram because that area
 * contains bios code and data regions used by X and dosemu and similar apps.
 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
 * mmio resources as well as potential bios/acpi data regions.
 */
int devmem_is_allowed(unsigned long pagenr)
{
	if (pagenr <= 256)
		return 1;
	if (!page_is_ram(pagenr))
		return 1;
	return 0;
}

L
Linus Torvalds 已提交
249 250 251 252
#ifdef CONFIG_HIGHMEM
pte_t *kmap_pte;
pgprot_t kmap_prot;

I
Ingo Molnar 已提交
253 254 255 256 257
static inline pte_t *kmap_get_fixmap_pte(unsigned long vaddr)
{
	return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
			vaddr), vaddr), vaddr);
}
L
Linus Torvalds 已提交
258 259 260 261 262

static void __init kmap_init(void)
{
	unsigned long kmap_vstart;

I
Ingo Molnar 已提交
263 264 265
	/*
	 * Cache the first kmap pte:
	 */
L
Linus Torvalds 已提交
266 267 268 269 270 271 272 273
	kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
	kmap_pte = kmap_get_fixmap_pte(kmap_vstart);

	kmap_prot = PAGE_KERNEL;
}

static void __init permanent_kmaps_init(pgd_t *pgd_base)
{
I
Ingo Molnar 已提交
274
	unsigned long vaddr;
L
Linus Torvalds 已提交
275 276 277 278 279 280 281 282 283 284 285 286
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;

	vaddr = PKMAP_BASE;
	page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);

	pgd = swapper_pg_dir + pgd_index(vaddr);
	pud = pud_offset(pgd, vaddr);
	pmd = pmd_offset(pud, vaddr);
	pte = pte_offset_kernel(pmd, vaddr);
I
Ingo Molnar 已提交
287
	pkmap_page_table = pte;
L
Linus Torvalds 已提交
288 289
}

290 291
static void __init
add_one_highpage_init(struct page *page, int pfn, int bad_ppro)
L
Linus Torvalds 已提交
292
{
293
	if (!(bad_ppro && page_kills_ppro(pfn))) {
L
Linus Torvalds 已提交
294
		ClearPageReserved(page);
295 296 297
		init_page_count(page);
		__free_page(page);
		totalhigh_pages++;
L
Linus Torvalds 已提交
298 299 300 301
	} else
		SetPageReserved(page);
}

302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347
struct add_highpages_data {
	unsigned long start_pfn;
	unsigned long end_pfn;
	int bad_ppro;
};

static void __init add_highpages_work_fn(unsigned long start_pfn,
					 unsigned long end_pfn, void *datax)
{
	int node_pfn;
	struct page *page;
	unsigned long final_start_pfn, final_end_pfn;
	struct add_highpages_data *data;
	int bad_ppro;

	data = (struct add_highpages_data *)datax;
	bad_ppro = data->bad_ppro;

	final_start_pfn = max(start_pfn, data->start_pfn);
	final_end_pfn = min(end_pfn, data->end_pfn);
	if (final_start_pfn >= final_end_pfn)
		return;

	for (node_pfn = final_start_pfn; node_pfn < final_end_pfn;
	     node_pfn++) {
		if (!pfn_valid(node_pfn))
			continue;
		page = pfn_to_page(node_pfn);
		add_one_highpage_init(page, node_pfn, bad_ppro);
	}

}

void __init add_highpages_with_active_regions(int nid, unsigned long start_pfn,
					      unsigned long end_pfn,
					      int bad_ppro)
{
	struct add_highpages_data data;

	data.start_pfn = start_pfn;
	data.end_pfn = end_pfn;
	data.bad_ppro = bad_ppro;

	work_with_active_regions(nid, add_highpages_work_fn, &data);
}

I
Ingo Molnar 已提交
348
#ifndef CONFIG_NUMA
L
Linus Torvalds 已提交
349 350
static void __init set_highmem_pages_init(int bad_ppro)
{
351 352 353
	add_highpages_with_active_regions(0, highstart_pfn, highend_pfn,
						bad_ppro);

L
Linus Torvalds 已提交
354 355
	totalram_pages += totalhigh_pages;
}
I
Ingo Molnar 已提交
356
#endif /* !CONFIG_NUMA */
L
Linus Torvalds 已提交
357 358

#else
I
Ingo Molnar 已提交
359 360 361
# define kmap_init()				do { } while (0)
# define permanent_kmaps_init(pgd_base)		do { } while (0)
# define set_highmem_pages_init(bad_ppro)	do { } while (0)
L
Linus Torvalds 已提交
362 363
#endif /* CONFIG_HIGHMEM */

364
pteval_t __PAGE_KERNEL = _PAGE_KERNEL;
365
EXPORT_SYMBOL(__PAGE_KERNEL);
L
Linus Torvalds 已提交
366

I
Ingo Molnar 已提交
367
pteval_t __PAGE_KERNEL_EXEC = _PAGE_KERNEL_EXEC;
L
Linus Torvalds 已提交
368

369
void __init native_pagetable_setup_start(pgd_t *base)
L
Linus Torvalds 已提交
370
{
371 372 373 374 375
	unsigned long pfn, va;
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;
376 377

	/*
378 379
	 * Remove any mappings which extend past the end of physical
	 * memory from the boot time page table:
380
	 */
381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397
	for (pfn = max_low_pfn + 1; pfn < 1<<(32-PAGE_SHIFT); pfn++) {
		va = PAGE_OFFSET + (pfn<<PAGE_SHIFT);
		pgd = base + pgd_index(va);
		if (!pgd_present(*pgd))
			break;

		pud = pud_offset(pgd, va);
		pmd = pmd_offset(pud, va);
		if (!pmd_present(*pmd))
			break;

		pte = pte_offset_kernel(pmd, va);
		if (!pte_present(*pte))
			break;

		pte_clear(NULL, va, pte);
	}
398
	paravirt_alloc_pmd(&init_mm, __pa(base) >> PAGE_SHIFT);
399 400 401 402 403 404 405 406 407 408 409 410
}

void __init native_pagetable_setup_done(pgd_t *base)
{
}

/*
 * Build a proper pagetable for the kernel mappings.  Up until this
 * point, we've been running on some set of pagetables constructed by
 * the boot process.
 *
 * If we're booting on native hardware, this will be a pagetable
411 412
 * constructed in arch/x86/kernel/head_32.S.  The root of the
 * pagetable will be swapper_pg_dir.
413 414 415 416 417 418 419 420 421 422 423
 *
 * If we're booting paravirtualized under a hypervisor, then there are
 * more options: we may already be running PAE, and the pagetable may
 * or may not be based in swapper_pg_dir.  In any case,
 * paravirt_pagetable_setup_start() will set up swapper_pg_dir
 * appropriately for the rest of the initialization to work.
 *
 * In general, pagetable_init() assumes that the pagetable may already
 * be partially populated, and so it avoids stomping on any existing
 * mappings.
 */
I
Ingo Molnar 已提交
424
static void __init pagetable_init(void)
425 426
{
	pgd_t *pgd_base = swapper_pg_dir;
I
Ingo Molnar 已提交
427
	unsigned long vaddr, end;
428 429

	paravirt_pagetable_setup_start(pgd_base);
L
Linus Torvalds 已提交
430 431

	/* Enable PSE if available */
432
	if (cpu_has_pse)
L
Linus Torvalds 已提交
433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448
		set_in_cr4(X86_CR4_PSE);

	/* Enable PGE if available */
	if (cpu_has_pge) {
		set_in_cr4(X86_CR4_PGE);
		__PAGE_KERNEL |= _PAGE_GLOBAL;
		__PAGE_KERNEL_EXEC |= _PAGE_GLOBAL;
	}

	kernel_physical_mapping_init(pgd_base);
	remap_numa_kva();

	/*
	 * Fixed mappings, only the page table structure has to be
	 * created - mappings will be set by set_fixmap():
	 */
449
	early_ioremap_clear();
L
Linus Torvalds 已提交
450
	vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
451 452
	end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
	page_table_range_init(vaddr, end, pgd_base);
453
	early_ioremap_reset();
L
Linus Torvalds 已提交
454 455 456

	permanent_kmaps_init(pgd_base);

457
	paravirt_pagetable_setup_done(pgd_base);
L
Linus Torvalds 已提交
458 459
}

460
#ifdef CONFIG_ACPI_SLEEP
L
Linus Torvalds 已提交
461
/*
462
 * ACPI suspend needs this for resume, because things like the intel-agp
L
Linus Torvalds 已提交
463 464
 * driver might have split up a kernel 4MB mapping.
 */
465
char swsusp_pg_dir[PAGE_SIZE]
I
Ingo Molnar 已提交
466
	__attribute__ ((aligned(PAGE_SIZE)));
L
Linus Torvalds 已提交
467 468 469 470 471

static inline void save_pg_dir(void)
{
	memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
}
472
#else /* !CONFIG_ACPI_SLEEP */
L
Linus Torvalds 已提交
473 474 475
static inline void save_pg_dir(void)
{
}
476
#endif /* !CONFIG_ACPI_SLEEP */
L
Linus Torvalds 已提交
477

I
Ingo Molnar 已提交
478
void zap_low_mappings(void)
L
Linus Torvalds 已提交
479 480 481 482 483 484 485 486 487
{
	int i;

	/*
	 * Zap initial low-memory mappings.
	 *
	 * Note that "pgd_clear()" doesn't do it for
	 * us, because pgd_clear() is a no-op on i386.
	 */
J
Jeremy Fitzhardinge 已提交
488
	for (i = 0; i < KERNEL_PGD_BOUNDARY; i++) {
L
Linus Torvalds 已提交
489 490 491 492 493
#ifdef CONFIG_X86_PAE
		set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page)));
#else
		set_pgd(swapper_pg_dir+i, __pgd(0));
#endif
I
Ingo Molnar 已提交
494
	}
L
Linus Torvalds 已提交
495 496 497
	flush_tlb_all();
}

I
Ingo Molnar 已提交
498
int nx_enabled;
J
Jan Beulich 已提交
499

500 501 502
pteval_t __supported_pte_mask __read_mostly = ~_PAGE_NX;
EXPORT_SYMBOL_GPL(__supported_pte_mask);

J
Jan Beulich 已提交
503 504
#ifdef CONFIG_X86_PAE

I
Ingo Molnar 已提交
505
static int disable_nx __initdata;
L
Linus Torvalds 已提交
506 507 508 509 510 511 512 513 514

/*
 * noexec = on|off
 *
 * Control non executable mappings.
 *
 * on      Enable
 * off     Disable
 */
515
static int __init noexec_setup(char *str)
L
Linus Torvalds 已提交
516
{
517 518 519 520 521
	if (!str || !strcmp(str, "on")) {
		if (cpu_has_nx) {
			__supported_pte_mask |= _PAGE_NX;
			disable_nx = 0;
		}
I
Ingo Molnar 已提交
522 523 524 525 526 527 528 529
	} else {
		if (!strcmp(str, "off")) {
			disable_nx = 1;
			__supported_pte_mask &= ~_PAGE_NX;
		} else {
			return -EINVAL;
		}
	}
530 531

	return 0;
L
Linus Torvalds 已提交
532
}
533
early_param("noexec", noexec_setup);
L
Linus Torvalds 已提交
534 535 536 537 538 539 540

static void __init set_nx(void)
{
	unsigned int v[4], l, h;

	if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
		cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
I
Ingo Molnar 已提交
541

L
Linus Torvalds 已提交
542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564
		if ((v[3] & (1 << 20)) && !disable_nx) {
			rdmsr(MSR_EFER, l, h);
			l |= EFER_NX;
			wrmsr(MSR_EFER, l, h);
			nx_enabled = 1;
			__supported_pte_mask |= _PAGE_NX;
		}
	}
}
#endif

/*
 * paging_init() sets up the page tables - note that the first 8MB are
 * already mapped by head.S.
 *
 * This routines also unmaps the page at virtual kernel address 0, so
 * that we can trap those pesky NULL-reference errors in the kernel.
 */
void __init paging_init(void)
{
#ifdef CONFIG_X86_PAE
	set_nx();
	if (nx_enabled)
565
		printk(KERN_INFO "NX (Execute Disable) protection: active\n");
L
Linus Torvalds 已提交
566 567 568 569 570 571 572 573 574 575 576 577
#endif
	pagetable_init();

	load_cr3(swapper_pg_dir);

	__flush_tlb_all();

	kmap_init();
}

/*
 * Test if the WP bit works in supervisor mode. It isn't supported on 386's
D
Dmitri Vorobiev 已提交
578 579 580
 * and also on some strange 486's. All 586+'s are OK. This used to involve
 * black magic jumps to work around some nasty CPU bugs, but fortunately the
 * switch to using exceptions got rid of all that.
L
Linus Torvalds 已提交
581 582 583
 */
static void __init test_wp_bit(void)
{
584 585
	printk(KERN_INFO
  "Checking if this processor honours the WP bit even in supervisor mode...");
L
Linus Torvalds 已提交
586 587 588 589 590 591 592

	/* Any page-aligned address will do, the test is non-destructive */
	__set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY);
	boot_cpu_data.wp_works_ok = do_test_wp_bit();
	clear_fixmap(FIX_WP_TEST);

	if (!boot_cpu_data.wp_works_ok) {
593
		printk(KERN_CONT "No.\n");
L
Linus Torvalds 已提交
594
#ifdef CONFIG_X86_WP_WORKS_OK
595 596
		panic(
  "This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");
L
Linus Torvalds 已提交
597 598
#endif
	} else {
599
		printk(KERN_CONT "Ok.\n");
L
Linus Torvalds 已提交
600 601 602
	}
}

I
Ingo Molnar 已提交
603
static struct kcore_list kcore_mem, kcore_vmalloc;
L
Linus Torvalds 已提交
604 605 606 607

void __init mem_init(void)
{
	int codesize, reservedpages, datasize, initsize;
I
Ingo Molnar 已提交
608
	int tmp, bad_ppro;
L
Linus Torvalds 已提交
609

610
#ifdef CONFIG_FLATMEM
E
Eric Sesterhenn 已提交
611
	BUG_ON(!mem_map);
L
Linus Torvalds 已提交
612 613 614 615 616
#endif
	bad_ppro = ppro_with_ram_bug();

#ifdef CONFIG_HIGHMEM
	/* check that fixmap and pkmap do not overlap */
617 618 619
	if (PKMAP_BASE + LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) {
		printk(KERN_ERR
			"fixmap and kmap areas overlap - this will crash\n");
L
Linus Torvalds 已提交
620
		printk(KERN_ERR "pkstart: %lxh pkend: %lxh fixstart %lxh\n",
621 622
				PKMAP_BASE, PKMAP_BASE + LAST_PKMAP*PAGE_SIZE,
				FIXADDR_START);
L
Linus Torvalds 已提交
623 624 625 626 627 628 629 630 631
		BUG();
	}
#endif
	/* this will put all low memory onto the freelists */
	totalram_pages += free_all_bootmem();

	reservedpages = 0;
	for (tmp = 0; tmp < max_low_pfn; tmp++)
		/*
I
Ingo Molnar 已提交
632
		 * Only count reserved RAM pages:
L
Linus Torvalds 已提交
633 634 635 636 637 638 639 640 641 642
		 */
		if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
			reservedpages++;

	set_highmem_pages_init(bad_ppro);

	codesize =  (unsigned long) &_etext - (unsigned long) &_text;
	datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
	initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;

I
Ingo Molnar 已提交
643 644
	kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
	kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
L
Linus Torvalds 已提交
645 646
		   VMALLOC_END-VMALLOC_START);

I
Ingo Molnar 已提交
647 648
	printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
			"%dk reserved, %dk data, %dk init, %ldk highmem)\n",
L
Linus Torvalds 已提交
649 650 651 652 653 654 655 656 657
		(unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
		num_physpages << (PAGE_SHIFT-10),
		codesize >> 10,
		reservedpages << (PAGE_SHIFT-10),
		datasize >> 10,
		initsize >> 10,
		(unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
	       );

658
#if 1 /* double-sanity-check paranoia */
659
	printk(KERN_INFO "virtual kernel memory layout:\n"
I
Ingo Molnar 已提交
660
		"    fixmap  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
661
#ifdef CONFIG_HIGHMEM
I
Ingo Molnar 已提交
662
		"    pkmap   : 0x%08lx - 0x%08lx   (%4ld kB)\n"
663
#endif
I
Ingo Molnar 已提交
664 665 666 667 668 669 670
		"    vmalloc : 0x%08lx - 0x%08lx   (%4ld MB)\n"
		"    lowmem  : 0x%08lx - 0x%08lx   (%4ld MB)\n"
		"      .init : 0x%08lx - 0x%08lx   (%4ld kB)\n"
		"      .data : 0x%08lx - 0x%08lx   (%4ld kB)\n"
		"      .text : 0x%08lx - 0x%08lx   (%4ld kB)\n",
		FIXADDR_START, FIXADDR_TOP,
		(FIXADDR_TOP - FIXADDR_START) >> 10,
671 672

#ifdef CONFIG_HIGHMEM
I
Ingo Molnar 已提交
673 674
		PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
		(LAST_PKMAP*PAGE_SIZE) >> 10,
675 676
#endif

I
Ingo Molnar 已提交
677 678
		VMALLOC_START, VMALLOC_END,
		(VMALLOC_END - VMALLOC_START) >> 20,
679

I
Ingo Molnar 已提交
680 681
		(unsigned long)__va(0), (unsigned long)high_memory,
		((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
682

I
Ingo Molnar 已提交
683 684 685
		(unsigned long)&__init_begin, (unsigned long)&__init_end,
		((unsigned long)&__init_end -
		 (unsigned long)&__init_begin) >> 10,
686

I
Ingo Molnar 已提交
687 688
		(unsigned long)&_etext, (unsigned long)&_edata,
		((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
689

I
Ingo Molnar 已提交
690 691
		(unsigned long)&_text, (unsigned long)&_etext,
		((unsigned long)&_etext - (unsigned long)&_text) >> 10);
692 693

#ifdef CONFIG_HIGHMEM
I
Ingo Molnar 已提交
694 695
	BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE	> FIXADDR_START);
	BUG_ON(VMALLOC_END				> PKMAP_BASE);
696
#endif
I
Ingo Molnar 已提交
697 698
	BUG_ON(VMALLOC_START				> VMALLOC_END);
	BUG_ON((unsigned long)high_memory		> VMALLOC_START);
699 700
#endif /* double-sanity-check paranoia */

L
Linus Torvalds 已提交
701 702 703
	if (boot_cpu_data.wp_works_ok < 0)
		test_wp_bit();

704
	cpa_init();
705
	save_pg_dir();
L
Linus Torvalds 已提交
706 707 708
	zap_low_mappings();
}

709
#ifdef CONFIG_MEMORY_HOTPLUG
710
int arch_add_memory(int nid, u64 start, u64 size)
711
{
712
	struct pglist_data *pgdata = NODE_DATA(nid);
713
	struct zone *zone = pgdata->node_zones + ZONE_HIGHMEM;
714 715 716 717 718
	unsigned long start_pfn = start >> PAGE_SHIFT;
	unsigned long nr_pages = size >> PAGE_SHIFT;

	return __add_pages(zone, start_pfn, nr_pages);
}
719
#endif
720

L
Linus Torvalds 已提交
721 722 723 724
/*
 * This function cannot be __init, since exceptions don't work in that
 * section.  Put this after the callers, so that it cannot be inlined.
 */
I
Ingo Molnar 已提交
725
static noinline int do_test_wp_bit(void)
L
Linus Torvalds 已提交
726 727 728 729 730
{
	char tmp_reg;
	int flag;

	__asm__ __volatile__(
I
Ingo Molnar 已提交
731 732 733
		"	movb %0, %1	\n"
		"1:	movb %1, %0	\n"
		"	xorl %2, %2	\n"
L
Linus Torvalds 已提交
734
		"2:			\n"
735
		_ASM_EXTABLE(1b,2b)
L
Linus Torvalds 已提交
736 737 738 739 740
		:"=m" (*(char *)fix_to_virt(FIX_WP_TEST)),
		 "=q" (tmp_reg),
		 "=r" (flag)
		:"2" (1)
		:"memory");
I
Ingo Molnar 已提交
741

L
Linus Torvalds 已提交
742 743 744
	return flag;
}

745
#ifdef CONFIG_DEBUG_RODATA
746 747
const int rodata_test_data = 0xC3;
EXPORT_SYMBOL_GPL(rodata_test_data);
748 749 750

void mark_rodata_ro(void)
{
751 752
	unsigned long start = PFN_ALIGN(_text);
	unsigned long size = PFN_ALIGN(_etext) - start;
753

754 755 756
	set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
	printk(KERN_INFO "Write protecting the kernel text: %luk\n",
		size >> 10);
757 758

#ifdef CONFIG_CPA_DEBUG
759 760 761
	printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n",
		start, start+size);
	set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT);
762

763 764
	printk(KERN_INFO "Testing CPA: write protecting again\n");
	set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT);
765
#endif
766 767
	start += size;
	size = (unsigned long)__end_rodata - start;
768
	set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
769 770
	printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
		size >> 10);
771
	rodata_test();
772

773
#ifdef CONFIG_CPA_DEBUG
774
	printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, start + size);
775
	set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT);
776

777
	printk(KERN_INFO "Testing CPA: write protecting again\n");
778
	set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
779
#endif
780 781 782
}
#endif

G
Gerd Hoffmann 已提交
783 784
void free_init_pages(char *what, unsigned long begin, unsigned long end)
{
I
Ingo Molnar 已提交
785 786 787 788 789 790 791 792 793 794
#ifdef CONFIG_DEBUG_PAGEALLOC
	/*
	 * If debugging page accesses then do not free this memory but
	 * mark them not present - any buggy init-section access will
	 * create a kernel page fault:
	 */
	printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n",
		begin, PAGE_ALIGN(end));
	set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
#else
I
Ingo Molnar 已提交
795 796
	unsigned long addr;

797 798 799 800 801 802 803
	/*
	 * We just marked the kernel text read only above, now that
	 * we are going to free part of that, we need to make that
	 * writeable first.
	 */
	set_memory_rw(begin, (end - begin) >> PAGE_SHIFT);

G
Gerd Hoffmann 已提交
804
	for (addr = begin; addr < end; addr += PAGE_SIZE) {
805 806 807 808
		ClearPageReserved(virt_to_page(addr));
		init_page_count(virt_to_page(addr));
		memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
		free_page(addr);
G
Gerd Hoffmann 已提交
809 810
		totalram_pages++;
	}
811
	printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
I
Ingo Molnar 已提交
812
#endif
G
Gerd Hoffmann 已提交
813 814 815 816 817
}

void free_initmem(void)
{
	free_init_pages("unused kernel memory",
818 819
			(unsigned long)(&__init_begin),
			(unsigned long)(&__init_end));
G
Gerd Hoffmann 已提交
820
}
821

L
Linus Torvalds 已提交
822 823 824
#ifdef CONFIG_BLK_DEV_INITRD
void free_initrd_mem(unsigned long start, unsigned long end)
{
825
	free_init_pages("initrd memory", start, end);
L
Linus Torvalds 已提交
826 827
}
#endif
828 829 830 831 832 833

int __init reserve_bootmem_generic(unsigned long phys, unsigned long len,
				   int flags)
{
	return reserve_bootmem(phys, len, flags);
}