init_32.c 19.8 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
/*
 *
 *  Copyright (C) 1995  Linus Torvalds
 *
 *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
 */

#include <linux/module.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/swap.h>
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
24
#include <linux/pfn.h>
25
#include <linux/poison.h>
L
Linus Torvalds 已提交
26 27 28
#include <linux/bootmem.h>
#include <linux/slab.h>
#include <linux/proc_fs.h>
29
#include <linux/memory_hotplug.h>
30
#include <linux/initrd.h>
31
#include <linux/cpumask.h>
L
Linus Torvalds 已提交
32

33
#include <asm/asm.h>
L
Linus Torvalds 已提交
34 35 36 37 38 39 40 41
#include <asm/processor.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/dma.h>
#include <asm/fixmap.h>
#include <asm/e820.h>
#include <asm/apic.h>
I
Ingo Molnar 已提交
42
#include <asm/bugs.h>
L
Linus Torvalds 已提交
43 44
#include <asm/tlb.h>
#include <asm/tlbflush.h>
45
#include <asm/pgalloc.h>
L
Linus Torvalds 已提交
46
#include <asm/sections.h>
47
#include <asm/paravirt.h>
48
#include <asm/setup.h>
49
#include <asm/cacheflush.h>
L
Linus Torvalds 已提交
50 51 52

unsigned int __VMALLOC_RESERVE = 128 << 20;

53
unsigned long max_pfn_mapped;
54

L
Linus Torvalds 已提交
55 56 57
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
unsigned long highstart_pfn, highend_pfn;

I
Ingo Molnar 已提交
58
static noinline int do_test_wp_bit(void);
L
Linus Torvalds 已提交
59 60 61 62 63 64 65 66 67 68

/*
 * Creates a middle page table and puts a pointer to it in the
 * given global directory entry. This only returns the gd entry
 * in non-PAE compilation mode, since the middle layer is folded.
 */
static pmd_t * __init one_md_table_init(pgd_t *pgd)
{
	pud_t *pud;
	pmd_t *pmd_table;
I
Ingo Molnar 已提交
69

L
Linus Torvalds 已提交
70
#ifdef CONFIG_X86_PAE
71 72 73
	if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
		pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);

74
		paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
75 76
		set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
		pud = pud_offset(pgd, 0);
I
Ingo Molnar 已提交
77
		BUG_ON(pmd_table != pmd_offset(pud, 0));
78 79
	}
#endif
L
Linus Torvalds 已提交
80 81
	pud = pud_offset(pgd, 0);
	pmd_table = pmd_offset(pud, 0);
I
Ingo Molnar 已提交
82

L
Linus Torvalds 已提交
83 84 85 86 87
	return pmd_table;
}

/*
 * Create a page table and place a pointer to it in a middle page
I
Ingo Molnar 已提交
88
 * directory entry:
L
Linus Torvalds 已提交
89 90 91
 */
static pte_t * __init one_page_table_init(pmd_t *pmd)
{
92
	if (!(pmd_val(*pmd) & _PAGE_PRESENT)) {
93 94 95 96 97
		pte_t *page_table = NULL;

#ifdef CONFIG_DEBUG_PAGEALLOC
		page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
#endif
I
Ingo Molnar 已提交
98
		if (!page_table) {
99 100
			page_table =
				(pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
I
Ingo Molnar 已提交
101
		}
102

103
		paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
L
Linus Torvalds 已提交
104
		set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
105
		BUG_ON(page_table != pte_offset_kernel(pmd, 0));
L
Linus Torvalds 已提交
106
	}
107

L
Linus Torvalds 已提交
108 109 110 111
	return pte_offset_kernel(pmd, 0);
}

/*
I
Ingo Molnar 已提交
112
 * This function initializes a certain range of kernel virtual memory
L
Linus Torvalds 已提交
113 114
 * with new bootmem page tables, everywhere page tables are missing in
 * the given range.
I
Ingo Molnar 已提交
115 116 117
 *
 * NOTE: The pagetables are allocated contiguous on the physical space
 * so we can cache the place of the first one and move around without
L
Linus Torvalds 已提交
118 119
 * checking the pgd every time.
 */
I
Ingo Molnar 已提交
120 121
static void __init
page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
L
Linus Torvalds 已提交
122 123 124
{
	int pgd_idx, pmd_idx;
	unsigned long vaddr;
I
Ingo Molnar 已提交
125 126
	pgd_t *pgd;
	pmd_t *pmd;
L
Linus Torvalds 已提交
127 128 129 130 131 132 133

	vaddr = start;
	pgd_idx = pgd_index(vaddr);
	pmd_idx = pmd_index(vaddr);
	pgd = pgd_base + pgd_idx;

	for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
134 135
		pmd = one_md_table_init(pgd);
		pmd = pmd + pmd_index(vaddr);
I
Ingo Molnar 已提交
136 137
		for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
							pmd++, pmd_idx++) {
138
			one_page_table_init(pmd);
L
Linus Torvalds 已提交
139 140 141 142 143 144 145 146 147 148 149 150 151 152 153

			vaddr += PMD_SIZE;
		}
		pmd_idx = 0;
	}
}

static inline int is_kernel_text(unsigned long addr)
{
	if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
		return 1;
	return 0;
}

/*
I
Ingo Molnar 已提交
154 155 156
 * This maps the physical memory to kernel virtual address space, a total
 * of max_low_pfn pages, by creating page tables starting from address
 * PAGE_OFFSET:
L
Linus Torvalds 已提交
157 158 159
 */
static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
{
I
Ingo Molnar 已提交
160
	int pgd_idx, pmd_idx, pte_ofs;
L
Linus Torvalds 已提交
161 162 163 164
	unsigned long pfn;
	pgd_t *pgd;
	pmd_t *pmd;
	pte_t *pte;
165
	unsigned pages_2m = 0, pages_4k = 0;
L
Linus Torvalds 已提交
166 167 168 169 170 171 172 173 174

	pgd_idx = pgd_index(PAGE_OFFSET);
	pgd = pgd_base + pgd_idx;
	pfn = 0;

	for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
		pmd = one_md_table_init(pgd);
		if (pfn >= max_low_pfn)
			continue;
I
Ingo Molnar 已提交
175

J
Jeremy Fitzhardinge 已提交
176 177 178
		for (pmd_idx = 0;
		     pmd_idx < PTRS_PER_PMD && pfn < max_low_pfn;
		     pmd++, pmd_idx++) {
I
Ingo Molnar 已提交
179
			unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
L
Linus Torvalds 已提交
180

I
Ingo Molnar 已提交
181 182 183
			/*
			 * Map with big pages if possible, otherwise
			 * create normal page tables:
184 185 186 187 188
			 *
			 * Don't use a large page for the first 2/4MB of memory
			 * because there are often fixed size MTRRs in there
			 * and overlapping MTRRs into large pages can cause
			 * slowdowns.
I
Ingo Molnar 已提交
189
			 */
190
			if (cpu_has_pse && !(pgd_idx == 0 && pmd_idx == 0)) {
I
Ingo Molnar 已提交
191
				unsigned int addr2;
J
Jeremy Fitzhardinge 已提交
192 193
				pgprot_t prot = PAGE_KERNEL_LARGE;

I
Ingo Molnar 已提交
194
				addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
J
Jeremy Fitzhardinge 已提交
195 196
					PAGE_OFFSET + PAGE_SIZE-1;

I
Ingo Molnar 已提交
197 198
				if (is_kernel_text(addr) ||
				    is_kernel_text(addr2))
J
Jeremy Fitzhardinge 已提交
199 200
					prot = PAGE_KERNEL_LARGE_EXEC;

201
				pages_2m++;
J
Jeremy Fitzhardinge 已提交
202
				set_pmd(pmd, pfn_pmd(pfn, prot));
203

L
Linus Torvalds 已提交
204
				pfn += PTRS_PER_PTE;
205
				max_pfn_mapped = pfn;
I
Ingo Molnar 已提交
206 207 208
				continue;
			}
			pte = one_page_table_init(pmd);
L
Linus Torvalds 已提交
209

I
Ingo Molnar 已提交
210 211 212 213
			for (pte_ofs = 0;
			     pte_ofs < PTRS_PER_PTE && pfn < max_low_pfn;
			     pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
				pgprot_t prot = PAGE_KERNEL;
J
Jeremy Fitzhardinge 已提交
214

I
Ingo Molnar 已提交
215 216
				if (is_kernel_text(addr))
					prot = PAGE_KERNEL_EXEC;
J
Jeremy Fitzhardinge 已提交
217

218
				pages_4k++;
I
Ingo Molnar 已提交
219
				set_pte(pte, pfn_pte(pfn, prot));
L
Linus Torvalds 已提交
220
			}
221
			max_pfn_mapped = pfn;
L
Linus Torvalds 已提交
222 223
		}
	}
224 225
	update_page_count(PG_LEVEL_2M, pages_2m);
	update_page_count(PG_LEVEL_4K, pages_4k);
L
Linus Torvalds 已提交
226 227
}

228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246
/*
 * devmem_is_allowed() checks to see if /dev/mem access to a certain address
 * is valid. The argument is a physical page number.
 *
 *
 * On x86, access has to be given to the first megabyte of ram because that area
 * contains bios code and data regions used by X and dosemu and similar apps.
 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
 * mmio resources as well as potential bios/acpi data regions.
 */
int devmem_is_allowed(unsigned long pagenr)
{
	if (pagenr <= 256)
		return 1;
	if (!page_is_ram(pagenr))
		return 1;
	return 0;
}

L
Linus Torvalds 已提交
247 248 249 250
#ifdef CONFIG_HIGHMEM
pte_t *kmap_pte;
pgprot_t kmap_prot;

I
Ingo Molnar 已提交
251 252 253 254 255
static inline pte_t *kmap_get_fixmap_pte(unsigned long vaddr)
{
	return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
			vaddr), vaddr), vaddr);
}
L
Linus Torvalds 已提交
256 257 258 259 260

static void __init kmap_init(void)
{
	unsigned long kmap_vstart;

I
Ingo Molnar 已提交
261 262 263
	/*
	 * Cache the first kmap pte:
	 */
L
Linus Torvalds 已提交
264 265 266 267 268 269 270 271
	kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
	kmap_pte = kmap_get_fixmap_pte(kmap_vstart);

	kmap_prot = PAGE_KERNEL;
}

static void __init permanent_kmaps_init(pgd_t *pgd_base)
{
I
Ingo Molnar 已提交
272
	unsigned long vaddr;
L
Linus Torvalds 已提交
273 274 275 276 277 278 279 280 281 282 283 284
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;

	vaddr = PKMAP_BASE;
	page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);

	pgd = swapper_pg_dir + pgd_index(vaddr);
	pud = pud_offset(pgd, vaddr);
	pmd = pmd_offset(pud, vaddr);
	pte = pte_offset_kernel(pmd, vaddr);
I
Ingo Molnar 已提交
285
	pkmap_page_table = pte;
L
Linus Torvalds 已提交
286 287
}

Y
Yinghai Lu 已提交
288
static void __init add_one_highpage_init(struct page *page, int pfn)
L
Linus Torvalds 已提交
289
{
Y
Yinghai Lu 已提交
290 291 292 293
	ClearPageReserved(page);
	init_page_count(page);
	__free_page(page);
	totalhigh_pages++;
L
Linus Torvalds 已提交
294 295
}

296 297 298 299 300
struct add_highpages_data {
	unsigned long start_pfn;
	unsigned long end_pfn;
};

301
static int __init add_highpages_work_fn(unsigned long start_pfn,
302
					 unsigned long end_pfn, void *datax)
L
Linus Torvalds 已提交
303
{
304 305 306 307
	int node_pfn;
	struct page *page;
	unsigned long final_start_pfn, final_end_pfn;
	struct add_highpages_data *data;
I
Ingo Molnar 已提交
308

309 310 311 312 313
	data = (struct add_highpages_data *)datax;

	final_start_pfn = max(start_pfn, data->start_pfn);
	final_end_pfn = min(end_pfn, data->end_pfn);
	if (final_start_pfn >= final_end_pfn)
314
		return 0;
315 316 317 318 319 320

	for (node_pfn = final_start_pfn; node_pfn < final_end_pfn;
	     node_pfn++) {
		if (!pfn_valid(node_pfn))
			continue;
		page = pfn_to_page(node_pfn);
Y
Yinghai Lu 已提交
321
		add_one_highpage_init(page, node_pfn);
322
	}
323

324 325
	return 0;

326 327 328
}

void __init add_highpages_with_active_regions(int nid, unsigned long start_pfn,
Y
Yinghai Lu 已提交
329
					      unsigned long end_pfn)
330 331 332 333 334 335 336 337 338
{
	struct add_highpages_data data;

	data.start_pfn = start_pfn;
	data.end_pfn = end_pfn;

	work_with_active_regions(nid, add_highpages_work_fn, &data);
}

I
Ingo Molnar 已提交
339
#ifndef CONFIG_NUMA
Y
Yinghai Lu 已提交
340
static void __init set_highmem_pages_init(void)
L
Linus Torvalds 已提交
341
{
Y
Yinghai Lu 已提交
342
	add_highpages_with_active_regions(0, highstart_pfn, highend_pfn);
343

L
Linus Torvalds 已提交
344 345
	totalram_pages += totalhigh_pages;
}
I
Ingo Molnar 已提交
346
#endif /* !CONFIG_NUMA */
L
Linus Torvalds 已提交
347 348

#else
I
Ingo Molnar 已提交
349 350
# define kmap_init()				do { } while (0)
# define permanent_kmaps_init(pgd_base)		do { } while (0)
Y
Yinghai Lu 已提交
351
# define set_highmem_pages_init()	do { } while (0)
L
Linus Torvalds 已提交
352 353
#endif /* CONFIG_HIGHMEM */

354
pteval_t __PAGE_KERNEL = _PAGE_KERNEL;
355
EXPORT_SYMBOL(__PAGE_KERNEL);
L
Linus Torvalds 已提交
356

I
Ingo Molnar 已提交
357
pteval_t __PAGE_KERNEL_EXEC = _PAGE_KERNEL_EXEC;
L
Linus Torvalds 已提交
358

359
void __init native_pagetable_setup_start(pgd_t *base)
L
Linus Torvalds 已提交
360
{
361 362 363 364 365
	unsigned long pfn, va;
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;
366 367

	/*
368 369
	 * Remove any mappings which extend past the end of physical
	 * memory from the boot time page table:
370
	 */
371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387
	for (pfn = max_low_pfn + 1; pfn < 1<<(32-PAGE_SHIFT); pfn++) {
		va = PAGE_OFFSET + (pfn<<PAGE_SHIFT);
		pgd = base + pgd_index(va);
		if (!pgd_present(*pgd))
			break;

		pud = pud_offset(pgd, va);
		pmd = pmd_offset(pud, va);
		if (!pmd_present(*pmd))
			break;

		pte = pte_offset_kernel(pmd, va);
		if (!pte_present(*pte))
			break;

		pte_clear(NULL, va, pte);
	}
388
	paravirt_alloc_pmd(&init_mm, __pa(base) >> PAGE_SHIFT);
389 390 391 392 393 394 395 396 397 398 399 400
}

void __init native_pagetable_setup_done(pgd_t *base)
{
}

/*
 * Build a proper pagetable for the kernel mappings.  Up until this
 * point, we've been running on some set of pagetables constructed by
 * the boot process.
 *
 * If we're booting on native hardware, this will be a pagetable
401 402
 * constructed in arch/x86/kernel/head_32.S.  The root of the
 * pagetable will be swapper_pg_dir.
403 404 405 406 407 408 409 410 411 412 413
 *
 * If we're booting paravirtualized under a hypervisor, then there are
 * more options: we may already be running PAE, and the pagetable may
 * or may not be based in swapper_pg_dir.  In any case,
 * paravirt_pagetable_setup_start() will set up swapper_pg_dir
 * appropriately for the rest of the initialization to work.
 *
 * In general, pagetable_init() assumes that the pagetable may already
 * be partially populated, and so it avoids stomping on any existing
 * mappings.
 */
I
Ingo Molnar 已提交
414
static void __init pagetable_init(void)
415 416
{
	pgd_t *pgd_base = swapper_pg_dir;
I
Ingo Molnar 已提交
417
	unsigned long vaddr, end;
418 419

	paravirt_pagetable_setup_start(pgd_base);
L
Linus Torvalds 已提交
420 421

	/* Enable PSE if available */
422
	if (cpu_has_pse)
L
Linus Torvalds 已提交
423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438
		set_in_cr4(X86_CR4_PSE);

	/* Enable PGE if available */
	if (cpu_has_pge) {
		set_in_cr4(X86_CR4_PGE);
		__PAGE_KERNEL |= _PAGE_GLOBAL;
		__PAGE_KERNEL_EXEC |= _PAGE_GLOBAL;
	}

	kernel_physical_mapping_init(pgd_base);
	remap_numa_kva();

	/*
	 * Fixed mappings, only the page table structure has to be
	 * created - mappings will be set by set_fixmap():
	 */
439
	early_ioremap_clear();
L
Linus Torvalds 已提交
440
	vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
441 442
	end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
	page_table_range_init(vaddr, end, pgd_base);
443
	early_ioremap_reset();
L
Linus Torvalds 已提交
444 445 446

	permanent_kmaps_init(pgd_base);

447
	paravirt_pagetable_setup_done(pgd_base);
L
Linus Torvalds 已提交
448 449
}

450
#ifdef CONFIG_ACPI_SLEEP
L
Linus Torvalds 已提交
451
/*
452
 * ACPI suspend needs this for resume, because things like the intel-agp
L
Linus Torvalds 已提交
453 454
 * driver might have split up a kernel 4MB mapping.
 */
455
char swsusp_pg_dir[PAGE_SIZE]
I
Ingo Molnar 已提交
456
	__attribute__ ((aligned(PAGE_SIZE)));
L
Linus Torvalds 已提交
457 458 459 460 461

static inline void save_pg_dir(void)
{
	memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
}
462
#else /* !CONFIG_ACPI_SLEEP */
L
Linus Torvalds 已提交
463 464 465
static inline void save_pg_dir(void)
{
}
466
#endif /* !CONFIG_ACPI_SLEEP */
L
Linus Torvalds 已提交
467

I
Ingo Molnar 已提交
468
void zap_low_mappings(void)
L
Linus Torvalds 已提交
469 470 471 472 473 474 475 476 477
{
	int i;

	/*
	 * Zap initial low-memory mappings.
	 *
	 * Note that "pgd_clear()" doesn't do it for
	 * us, because pgd_clear() is a no-op on i386.
	 */
J
Jeremy Fitzhardinge 已提交
478
	for (i = 0; i < KERNEL_PGD_BOUNDARY; i++) {
L
Linus Torvalds 已提交
479 480 481 482 483
#ifdef CONFIG_X86_PAE
		set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page)));
#else
		set_pgd(swapper_pg_dir+i, __pgd(0));
#endif
I
Ingo Molnar 已提交
484
	}
L
Linus Torvalds 已提交
485 486 487
	flush_tlb_all();
}

I
Ingo Molnar 已提交
488
int nx_enabled;
J
Jan Beulich 已提交
489

490 491 492
pteval_t __supported_pte_mask __read_mostly = ~_PAGE_NX;
EXPORT_SYMBOL_GPL(__supported_pte_mask);

J
Jan Beulich 已提交
493 494
#ifdef CONFIG_X86_PAE

I
Ingo Molnar 已提交
495
static int disable_nx __initdata;
L
Linus Torvalds 已提交
496 497 498 499 500 501 502 503 504

/*
 * noexec = on|off
 *
 * Control non executable mappings.
 *
 * on      Enable
 * off     Disable
 */
505
static int __init noexec_setup(char *str)
L
Linus Torvalds 已提交
506
{
507 508 509 510 511
	if (!str || !strcmp(str, "on")) {
		if (cpu_has_nx) {
			__supported_pte_mask |= _PAGE_NX;
			disable_nx = 0;
		}
I
Ingo Molnar 已提交
512 513 514 515 516 517 518 519
	} else {
		if (!strcmp(str, "off")) {
			disable_nx = 1;
			__supported_pte_mask &= ~_PAGE_NX;
		} else {
			return -EINVAL;
		}
	}
520 521

	return 0;
L
Linus Torvalds 已提交
522
}
523
early_param("noexec", noexec_setup);
L
Linus Torvalds 已提交
524 525 526 527 528 529 530

static void __init set_nx(void)
{
	unsigned int v[4], l, h;

	if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
		cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
I
Ingo Molnar 已提交
531

L
Linus Torvalds 已提交
532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554
		if ((v[3] & (1 << 20)) && !disable_nx) {
			rdmsr(MSR_EFER, l, h);
			l |= EFER_NX;
			wrmsr(MSR_EFER, l, h);
			nx_enabled = 1;
			__supported_pte_mask |= _PAGE_NX;
		}
	}
}
#endif

/*
 * paging_init() sets up the page tables - note that the first 8MB are
 * already mapped by head.S.
 *
 * This routines also unmaps the page at virtual kernel address 0, so
 * that we can trap those pesky NULL-reference errors in the kernel.
 */
void __init paging_init(void)
{
#ifdef CONFIG_X86_PAE
	set_nx();
	if (nx_enabled)
555
		printk(KERN_INFO "NX (Execute Disable) protection: active\n");
L
Linus Torvalds 已提交
556 557 558 559 560 561 562 563 564 565 566 567
#endif
	pagetable_init();

	load_cr3(swapper_pg_dir);

	__flush_tlb_all();

	kmap_init();
}

/*
 * Test if the WP bit works in supervisor mode. It isn't supported on 386's
D
Dmitri Vorobiev 已提交
568 569 570
 * and also on some strange 486's. All 586+'s are OK. This used to involve
 * black magic jumps to work around some nasty CPU bugs, but fortunately the
 * switch to using exceptions got rid of all that.
L
Linus Torvalds 已提交
571 572 573
 */
static void __init test_wp_bit(void)
{
574 575
	printk(KERN_INFO
  "Checking if this processor honours the WP bit even in supervisor mode...");
L
Linus Torvalds 已提交
576 577 578 579 580 581 582

	/* Any page-aligned address will do, the test is non-destructive */
	__set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY);
	boot_cpu_data.wp_works_ok = do_test_wp_bit();
	clear_fixmap(FIX_WP_TEST);

	if (!boot_cpu_data.wp_works_ok) {
583
		printk(KERN_CONT "No.\n");
L
Linus Torvalds 已提交
584
#ifdef CONFIG_X86_WP_WORKS_OK
585 586
		panic(
  "This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");
L
Linus Torvalds 已提交
587 588
#endif
	} else {
589
		printk(KERN_CONT "Ok.\n");
L
Linus Torvalds 已提交
590 591 592
	}
}

I
Ingo Molnar 已提交
593
static struct kcore_list kcore_mem, kcore_vmalloc;
L
Linus Torvalds 已提交
594 595 596 597

void __init mem_init(void)
{
	int codesize, reservedpages, datasize, initsize;
Y
Yinghai Lu 已提交
598
	int tmp;
L
Linus Torvalds 已提交
599

600
#ifdef CONFIG_FLATMEM
E
Eric Sesterhenn 已提交
601
	BUG_ON(!mem_map);
L
Linus Torvalds 已提交
602 603 604 605 606 607 608
#endif
	/* this will put all low memory onto the freelists */
	totalram_pages += free_all_bootmem();

	reservedpages = 0;
	for (tmp = 0; tmp < max_low_pfn; tmp++)
		/*
I
Ingo Molnar 已提交
609
		 * Only count reserved RAM pages:
L
Linus Torvalds 已提交
610 611 612 613
		 */
		if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
			reservedpages++;

Y
Yinghai Lu 已提交
614
	set_highmem_pages_init();
L
Linus Torvalds 已提交
615 616 617 618 619

	codesize =  (unsigned long) &_etext - (unsigned long) &_text;
	datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
	initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;

I
Ingo Molnar 已提交
620 621
	kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
	kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
L
Linus Torvalds 已提交
622 623
		   VMALLOC_END-VMALLOC_START);

I
Ingo Molnar 已提交
624 625
	printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
			"%dk reserved, %dk data, %dk init, %ldk highmem)\n",
L
Linus Torvalds 已提交
626 627 628 629 630 631 632 633 634
		(unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
		num_physpages << (PAGE_SHIFT-10),
		codesize >> 10,
		reservedpages << (PAGE_SHIFT-10),
		datasize >> 10,
		initsize >> 10,
		(unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
	       );

635
	printk(KERN_INFO "virtual kernel memory layout:\n"
I
Ingo Molnar 已提交
636
		"    fixmap  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
637
#ifdef CONFIG_HIGHMEM
I
Ingo Molnar 已提交
638
		"    pkmap   : 0x%08lx - 0x%08lx   (%4ld kB)\n"
639
#endif
I
Ingo Molnar 已提交
640 641 642 643 644 645 646
		"    vmalloc : 0x%08lx - 0x%08lx   (%4ld MB)\n"
		"    lowmem  : 0x%08lx - 0x%08lx   (%4ld MB)\n"
		"      .init : 0x%08lx - 0x%08lx   (%4ld kB)\n"
		"      .data : 0x%08lx - 0x%08lx   (%4ld kB)\n"
		"      .text : 0x%08lx - 0x%08lx   (%4ld kB)\n",
		FIXADDR_START, FIXADDR_TOP,
		(FIXADDR_TOP - FIXADDR_START) >> 10,
647 648

#ifdef CONFIG_HIGHMEM
I
Ingo Molnar 已提交
649 650
		PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
		(LAST_PKMAP*PAGE_SIZE) >> 10,
651 652
#endif

I
Ingo Molnar 已提交
653 654
		VMALLOC_START, VMALLOC_END,
		(VMALLOC_END - VMALLOC_START) >> 20,
655

I
Ingo Molnar 已提交
656 657
		(unsigned long)__va(0), (unsigned long)high_memory,
		((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
658

I
Ingo Molnar 已提交
659 660 661
		(unsigned long)&__init_begin, (unsigned long)&__init_end,
		((unsigned long)&__init_end -
		 (unsigned long)&__init_begin) >> 10,
662

I
Ingo Molnar 已提交
663 664
		(unsigned long)&_etext, (unsigned long)&_edata,
		((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
665

I
Ingo Molnar 已提交
666 667
		(unsigned long)&_text, (unsigned long)&_etext,
		((unsigned long)&_etext - (unsigned long)&_text) >> 10);
668 669

#ifdef CONFIG_HIGHMEM
I
Ingo Molnar 已提交
670 671
	BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE	> FIXADDR_START);
	BUG_ON(VMALLOC_END				> PKMAP_BASE);
672
#endif
I
Ingo Molnar 已提交
673 674
	BUG_ON(VMALLOC_START				> VMALLOC_END);
	BUG_ON((unsigned long)high_memory		> VMALLOC_START);
675

L
Linus Torvalds 已提交
676 677 678
	if (boot_cpu_data.wp_works_ok < 0)
		test_wp_bit();

679
	cpa_init();
680
	save_pg_dir();
L
Linus Torvalds 已提交
681 682 683
	zap_low_mappings();
}

684
#ifdef CONFIG_MEMORY_HOTPLUG
685
int arch_add_memory(int nid, u64 start, u64 size)
686
{
687
	struct pglist_data *pgdata = NODE_DATA(nid);
688
	struct zone *zone = pgdata->node_zones + ZONE_HIGHMEM;
689 690 691 692 693
	unsigned long start_pfn = start >> PAGE_SHIFT;
	unsigned long nr_pages = size >> PAGE_SHIFT;

	return __add_pages(zone, start_pfn, nr_pages);
}
694
#endif
695

L
Linus Torvalds 已提交
696 697 698 699
/*
 * This function cannot be __init, since exceptions don't work in that
 * section.  Put this after the callers, so that it cannot be inlined.
 */
I
Ingo Molnar 已提交
700
static noinline int do_test_wp_bit(void)
L
Linus Torvalds 已提交
701 702 703 704 705
{
	char tmp_reg;
	int flag;

	__asm__ __volatile__(
I
Ingo Molnar 已提交
706 707 708
		"	movb %0, %1	\n"
		"1:	movb %1, %0	\n"
		"	xorl %2, %2	\n"
L
Linus Torvalds 已提交
709
		"2:			\n"
710
		_ASM_EXTABLE(1b,2b)
L
Linus Torvalds 已提交
711 712 713 714 715
		:"=m" (*(char *)fix_to_virt(FIX_WP_TEST)),
		 "=q" (tmp_reg),
		 "=r" (flag)
		:"2" (1)
		:"memory");
I
Ingo Molnar 已提交
716

L
Linus Torvalds 已提交
717 718 719
	return flag;
}

720
#ifdef CONFIG_DEBUG_RODATA
721 722
const int rodata_test_data = 0xC3;
EXPORT_SYMBOL_GPL(rodata_test_data);
723 724 725

void mark_rodata_ro(void)
{
726 727
	unsigned long start = PFN_ALIGN(_text);
	unsigned long size = PFN_ALIGN(_etext) - start;
728

729 730 731
	set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
	printk(KERN_INFO "Write protecting the kernel text: %luk\n",
		size >> 10);
732 733

#ifdef CONFIG_CPA_DEBUG
734 735 736
	printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n",
		start, start+size);
	set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT);
737

738 739
	printk(KERN_INFO "Testing CPA: write protecting again\n");
	set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT);
740
#endif
741 742
	start += size;
	size = (unsigned long)__end_rodata - start;
743
	set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
744 745
	printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
		size >> 10);
746
	rodata_test();
747

748
#ifdef CONFIG_CPA_DEBUG
749
	printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, start + size);
750
	set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT);
751

752
	printk(KERN_INFO "Testing CPA: write protecting again\n");
753
	set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
754
#endif
755 756 757
}
#endif

G
Gerd Hoffmann 已提交
758 759
void free_init_pages(char *what, unsigned long begin, unsigned long end)
{
I
Ingo Molnar 已提交
760 761 762 763 764 765 766 767 768 769
#ifdef CONFIG_DEBUG_PAGEALLOC
	/*
	 * If debugging page accesses then do not free this memory but
	 * mark them not present - any buggy init-section access will
	 * create a kernel page fault:
	 */
	printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n",
		begin, PAGE_ALIGN(end));
	set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
#else
I
Ingo Molnar 已提交
770 771
	unsigned long addr;

772 773 774 775 776 777 778
	/*
	 * We just marked the kernel text read only above, now that
	 * we are going to free part of that, we need to make that
	 * writeable first.
	 */
	set_memory_rw(begin, (end - begin) >> PAGE_SHIFT);

G
Gerd Hoffmann 已提交
779
	for (addr = begin; addr < end; addr += PAGE_SIZE) {
780 781 782 783
		ClearPageReserved(virt_to_page(addr));
		init_page_count(virt_to_page(addr));
		memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
		free_page(addr);
G
Gerd Hoffmann 已提交
784 785
		totalram_pages++;
	}
786
	printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
I
Ingo Molnar 已提交
787
#endif
G
Gerd Hoffmann 已提交
788 789 790 791 792
}

void free_initmem(void)
{
	free_init_pages("unused kernel memory",
793 794
			(unsigned long)(&__init_begin),
			(unsigned long)(&__init_end));
G
Gerd Hoffmann 已提交
795
}
796

L
Linus Torvalds 已提交
797 798 799
#ifdef CONFIG_BLK_DEV_INITRD
void free_initrd_mem(unsigned long start, unsigned long end)
{
800
	free_init_pages("initrd memory", start, end);
L
Linus Torvalds 已提交
801 802
}
#endif
803 804 805 806 807 808

int __init reserve_bootmem_generic(unsigned long phys, unsigned long len,
				   int flags)
{
	return reserve_bootmem(phys, len, flags);
}