init_64.c 23.7 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4
/*
 *  linux/arch/x86_64/mm/init.c
 *
 *  Copyright (C) 1995  Linus Torvalds
P
Pavel Machek 已提交
5
 *  Copyright (C) 2000  Pavel Machek <pavel@ucw.cz>
L
Linus Torvalds 已提交
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
 *  Copyright (C) 2002,2003 Andi Kleen <ak@suse.de>
 */

#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/smp.h>
#include <linux/init.h>
T
Thomas Gleixner 已提交
21
#include <linux/initrd.h>
L
Linus Torvalds 已提交
22 23
#include <linux/pagemap.h>
#include <linux/bootmem.h>
24
#include <linux/memblock.h>
L
Linus Torvalds 已提交
25
#include <linux/proc_fs.h>
26
#include <linux/pci.h>
27
#include <linux/pfn.h>
28
#include <linux/poison.h>
29
#include <linux/dma-mapping.h>
30
#include <linux/module.h>
31
#include <linux/memory.h>
32
#include <linux/memory_hotplug.h>
33
#include <linux/nmi.h>
34
#include <linux/gfp.h>
L
Linus Torvalds 已提交
35 36

#include <asm/processor.h>
37
#include <asm/bios_ebda.h>
L
Linus Torvalds 已提交
38 39 40 41 42 43 44 45 46 47 48
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/dma.h>
#include <asm/fixmap.h>
#include <asm/e820.h>
#include <asm/apic.h>
#include <asm/tlb.h>
#include <asm/mmu_context.h>
#include <asm/proto.h>
#include <asm/smp.h>
49
#include <asm/sections.h>
50
#include <asm/kdebug.h>
51
#include <asm/numa.h>
52
#include <asm/cacheflush.h>
53
#include <asm/init.h>
54
#include <asm/uv/uv.h>
55
#include <asm/setup.h>
L
Linus Torvalds 已提交
56

I
Ingo Molnar 已提交
57 58 59 60 61 62 63 64 65 66 67 68 69 70
static int __init parse_direct_gbpages_off(char *arg)
{
	direct_gbpages = 0;
	return 0;
}
early_param("nogbpages", parse_direct_gbpages_off);

static int __init parse_direct_gbpages_on(char *arg)
{
	direct_gbpages = 1;
	return 0;
}
early_param("gbpages", parse_direct_gbpages_on);

L
Linus Torvalds 已提交
71 72 73 74 75 76
/*
 * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
 * physical space so we can cache the place of the first one and move
 * around without checking the pgd every time.
 */

77
pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
78 79 80 81
EXPORT_SYMBOL_GPL(__supported_pte_mask);

int force_personality32;

I
Ingo Molnar 已提交
82 83 84 85 86 87 88 89
/*
 * noexec32=on|off
 * Control non executable heap for 32bit processes.
 * To control the stack too use noexec=off
 *
 * on	PROT_READ does not imply PROT_EXEC for 32-bit processes (default)
 * off	PROT_READ implies PROT_EXEC
 */
90 91 92 93 94 95 96 97 98 99
static int __init nonx32_setup(char *str)
{
	if (!strcmp(str, "on"))
		force_personality32 &= ~READ_IMPLIES_EXEC;
	else if (!strcmp(str, "off"))
		force_personality32 |= READ_IMPLIES_EXEC;
	return 1;
}
__setup("noexec32=", nonx32_setup);

100 101 102 103 104 105
/*
 * When memory was added/removed make sure all the processes MM have
 * suitable PGD entries in the local PGD level page.
 */
void sync_global_pgds(unsigned long start, unsigned long end)
{
106 107 108 109 110 111 112 113 114
	unsigned long address;

	for (address = start; address <= end; address += PGDIR_SIZE) {
		const pgd_t *pgd_ref = pgd_offset_k(address);
		struct page *page;

		if (pgd_none(*pgd_ref))
			continue;

A
Andrea Arcangeli 已提交
115
		spin_lock(&pgd_lock);
116 117
		list_for_each_entry(page, &pgd_list, lru) {
			pgd_t *pgd;
118 119
			spinlock_t *pgt_lock;

120
			pgd = (pgd_t *)page_address(page) + pgd_index(address);
A
Andrea Arcangeli 已提交
121
			/* the pgt_lock only for Xen */
122 123 124
			pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
			spin_lock(pgt_lock);

125 126 127 128 129
			if (pgd_none(*pgd))
				set_pgd(pgd, *pgd_ref);
			else
				BUG_ON(pgd_page_vaddr(*pgd)
				       != pgd_page_vaddr(*pgd_ref));
130 131

			spin_unlock(pgt_lock);
132
		}
A
Andrea Arcangeli 已提交
133
		spin_unlock(&pgd_lock);
134
	}
135 136
}

137 138 139 140 141
/*
 * NOTE: This function is marked __ref because it calls __init function
 * (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0.
 */
static __ref void *spp_getpage(void)
T
Thomas Gleixner 已提交
142
{
L
Linus Torvalds 已提交
143
	void *ptr;
T
Thomas Gleixner 已提交
144

L
Linus Torvalds 已提交
145
	if (after_bootmem)
146
		ptr = (void *) get_zeroed_page(GFP_ATOMIC | __GFP_NOTRACK);
L
Linus Torvalds 已提交
147 148
	else
		ptr = alloc_bootmem_pages(PAGE_SIZE);
T
Thomas Gleixner 已提交
149 150 151 152 153

	if (!ptr || ((unsigned long)ptr & ~PAGE_MASK)) {
		panic("set_pte_phys: cannot allocate page data %s\n",
			after_bootmem ? "after bootmem" : "");
	}
L
Linus Torvalds 已提交
154

155
	pr_debug("spp_getpage %p\n", ptr);
T
Thomas Gleixner 已提交
156

L
Linus Torvalds 已提交
157
	return ptr;
T
Thomas Gleixner 已提交
158
}
L
Linus Torvalds 已提交
159

160
static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
L
Linus Torvalds 已提交
161
{
162 163 164 165 166 167 168 169 170
	if (pgd_none(*pgd)) {
		pud_t *pud = (pud_t *)spp_getpage();
		pgd_populate(&init_mm, pgd, pud);
		if (pud != pud_offset(pgd, 0))
			printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
			       pud, pud_offset(pgd, 0));
	}
	return pud_offset(pgd, vaddr);
}
L
Linus Torvalds 已提交
171

172
static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
173
{
L
Linus Torvalds 已提交
174
	if (pud_none(*pud)) {
175
		pmd_t *pmd = (pmd_t *) spp_getpage();
176
		pud_populate(&init_mm, pud, pmd);
177
		if (pmd != pmd_offset(pud, 0))
178
			printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
179
			       pmd, pmd_offset(pud, 0));
L
Linus Torvalds 已提交
180
	}
181 182 183
	return pmd_offset(pud, vaddr);
}

184
static pte_t *fill_pte(pmd_t *pmd, unsigned long vaddr)
185
{
L
Linus Torvalds 已提交
186
	if (pmd_none(*pmd)) {
187
		pte_t *pte = (pte_t *) spp_getpage();
188
		pmd_populate_kernel(&init_mm, pmd, pte);
189
		if (pte != pte_offset_kernel(pmd, 0))
190
			printk(KERN_ERR "PAGETABLE BUG #02!\n");
L
Linus Torvalds 已提交
191
	}
192 193 194 195 196 197 198 199 200 201 202 203
	return pte_offset_kernel(pmd, vaddr);
}

void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
{
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;

	pud = pud_page + pud_index(vaddr);
	pmd = fill_pmd(pud, vaddr);
	pte = fill_pte(pmd, vaddr);
L
Linus Torvalds 已提交
204 205 206 207 208 209 210 211 212 213

	set_pte(pte, new_pte);

	/*
	 * It's enough to flush this one mapping.
	 * (PGE mappings get flushed as well)
	 */
	__flush_tlb_one(vaddr);
}

214
void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230
{
	pgd_t *pgd;
	pud_t *pud_page;

	pr_debug("set_pte_vaddr %lx to %lx\n", vaddr, native_pte_val(pteval));

	pgd = pgd_offset_k(vaddr);
	if (pgd_none(*pgd)) {
		printk(KERN_ERR
			"PGD FIXMAP MISSING, it should be setup in head.S!\n");
		return;
	}
	pud_page = (pud_t*)pgd_page_vaddr(*pgd);
	set_pte_vaddr_pud(pud_page, vaddr, pteval);
}

231
pmd_t * __init populate_extra_pmd(unsigned long vaddr)
232 233 234 235 236
{
	pgd_t *pgd;
	pud_t *pud;

	pgd = pgd_offset_k(vaddr);
237 238 239 240 241 242 243
	pud = fill_pud(pgd, vaddr);
	return fill_pmd(pud, vaddr);
}

pte_t * __init populate_extra_pte(unsigned long vaddr)
{
	pmd_t *pmd;
244

245 246
	pmd = populate_extra_pmd(vaddr);
	return fill_pte(pmd, vaddr);
247 248
}

249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288
/*
 * Create large page table mappings for a range of physical addresses.
 */
static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
						pgprot_t prot)
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;

	BUG_ON((phys & ~PMD_MASK) || (size & ~PMD_MASK));
	for (; size; phys += PMD_SIZE, size -= PMD_SIZE) {
		pgd = pgd_offset_k((unsigned long)__va(phys));
		if (pgd_none(*pgd)) {
			pud = (pud_t *) spp_getpage();
			set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
						_PAGE_USER));
		}
		pud = pud_offset(pgd, (unsigned long)__va(phys));
		if (pud_none(*pud)) {
			pmd = (pmd_t *) spp_getpage();
			set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
						_PAGE_USER));
		}
		pmd = pmd_offset(pud, phys);
		BUG_ON(!pmd_none(*pmd));
		set_pmd(pmd, __pmd(phys | pgprot_val(prot)));
	}
}

void __init init_extra_mapping_wb(unsigned long phys, unsigned long size)
{
	__init_extra_mapping(phys, size, PAGE_KERNEL_LARGE);
}

void __init init_extra_mapping_uc(unsigned long phys, unsigned long size)
{
	__init_extra_mapping(phys, size, PAGE_KERNEL_LARGE_NOCACHE);
}

289
/*
290 291 292
 * The head.S code sets up the kernel high mapping:
 *
 *   from __START_KERNEL_map to __START_KERNEL_map + size (== _end-_text)
293 294 295 296 297
 *
 * phys_addr holds the negative offset to the kernel, which is added
 * to the compile time generated pmds. This results in invalid pmds up
 * to the point where we hit the physaddr 0 mapping.
 *
298 299
 * We limit the mappings to the region from _text to _brk_end.  _brk_end
 * is rounded up to the 2MB boundary. This catches the invalid pmds as
300 301 302 303 304
 * well, as they are located before _text:
 */
void __init cleanup_highmap(void)
{
	unsigned long vaddr = __START_KERNEL_map;
305 306
	unsigned long vaddr_end = __START_KERNEL_map + (max_pfn_mapped << PAGE_SHIFT);
	unsigned long end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1;
307 308
	pmd_t *pmd = level2_kernel_pgt;

309
	for (; vaddr + PMD_SIZE - 1 < vaddr_end; pmd++, vaddr += PMD_SIZE) {
310
		if (pmd_none(*pmd))
311 312 313 314 315 316
			continue;
		if (vaddr < (unsigned long) _text || vaddr > end)
			set_pmd(pmd, __pmd(0));
	}
}

317
static __ref void *alloc_low_page(unsigned long *phys)
T
Thomas Gleixner 已提交
318
{
319
	unsigned long pfn = pgt_buf_end++;
L
Linus Torvalds 已提交
320 321
	void *adr;

322
	if (after_bootmem) {
323
		adr = (void *)get_zeroed_page(GFP_ATOMIC | __GFP_NOTRACK);
324
		*phys = __pa(adr);
T
Thomas Gleixner 已提交
325

326 327 328
		return adr;
	}

329
	if (pfn >= pgt_buf_top)
T
Thomas Gleixner 已提交
330
		panic("alloc_low_page: ran out of memory");
331

J
Jeremy Fitzhardinge 已提交
332
	adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
333
	clear_page(adr);
334 335 336
	*phys  = pfn * PAGE_SIZE;
	return adr;
}
L
Linus Torvalds 已提交
337

Y
Yinghai Lu 已提交
338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353
static __ref void *map_low_page(void *virt)
{
	void *adr;
	unsigned long phys, left;

	if (after_bootmem)
		return virt;

	phys = __pa(virt);
	left = phys & (PAGE_SIZE - 1);
	adr = early_memremap(phys & PAGE_MASK, PAGE_SIZE);
	adr = (void *)(((unsigned long)adr) | left);

	return adr;
}

354
static __ref void unmap_low_page(void *adr)
T
Thomas Gleixner 已提交
355
{
356 357 358
	if (after_bootmem)
		return;

Y
Yinghai Lu 已提交
359
	early_iounmap((void *)((unsigned long)adr & PAGE_MASK), PAGE_SIZE);
T
Thomas Gleixner 已提交
360
}
L
Linus Torvalds 已提交
361

362
static unsigned long __meminit
363 364
phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end,
	      pgprot_t prot)
365 366
{
	unsigned pages = 0;
367
	unsigned long last_map_addr = end;
368
	int i;
369

370 371 372 373 374 375 376 377 378 379 380 381
	pte_t *pte = pte_page + pte_index(addr);

	for(i = pte_index(addr); i < PTRS_PER_PTE; i++, addr += PAGE_SIZE, pte++) {

		if (addr >= end) {
			if (!after_bootmem) {
				for(; i < PTRS_PER_PTE; i++, pte++)
					set_pte(pte, __pte(0));
			}
			break;
		}

382 383 384 385 386 387
		/*
		 * We will re-use the existing mapping.
		 * Xen for example has some special requirements, like mapping
		 * pagetable pages as RO. So assume someone who pre-setup
		 * these mappings are more intelligent.
		 */
388 389
		if (pte_val(*pte)) {
			pages++;
390
			continue;
391
		}
392 393 394 395 396

		if (0)
			printk("   pte=%p addr=%lx pte=%016lx\n",
			       pte, addr, pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL).pte);
		pages++;
397
		set_pte(pte, pfn_pte(addr >> PAGE_SHIFT, prot));
398
		last_map_addr = (addr & PAGE_MASK) + PAGE_SIZE;
399
	}
400

401
	update_page_count(PG_LEVEL_4K, pages);
402 403

	return last_map_addr;
404 405
}

406
static unsigned long __meminit
407
phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
408
	      unsigned long page_size_mask, pgprot_t prot)
409
{
410
	unsigned long pages = 0, next;
411
	unsigned long last_map_addr = end;
412

413
	int i = pmd_index(address);
414

415
	for (; i < PTRS_PER_PMD; i++, address = next) {
416
		unsigned long pte_phys;
417
		pmd_t *pmd = pmd_page + pmd_index(address);
418
		pte_t *pte;
419
		pgprot_t new_prot = prot;
420

421
		if (address >= end) {
T
Thomas Gleixner 已提交
422
			if (!after_bootmem) {
423 424
				for (; i < PTRS_PER_PMD; i++, pmd++)
					set_pmd(pmd, __pmd(0));
T
Thomas Gleixner 已提交
425
			}
426 427
			break;
		}
428

429 430
		next = (address & PMD_MASK) + PMD_SIZE;

431
		if (pmd_val(*pmd)) {
432 433
			if (!pmd_large(*pmd)) {
				spin_lock(&init_mm.page_table_lock);
Y
Yinghai Lu 已提交
434 435
				pte = map_low_page((pte_t *)pmd_page_vaddr(*pmd));
				last_map_addr = phys_pte_init(pte, address,
436
								end, prot);
Y
Yinghai Lu 已提交
437
				unmap_low_page(pte);
438
				spin_unlock(&init_mm.page_table_lock);
439
				continue;
440
			}
441 442 443 444 445 446 447 448 449 450 451 452
			/*
			 * If we are ok with PG_LEVEL_2M mapping, then we will
			 * use the existing mapping,
			 *
			 * Otherwise, we will split the large page mapping but
			 * use the same existing protection bits except for
			 * large page, so that we don't violate Intel's TLB
			 * Application note (317080) which says, while changing
			 * the page sizes, new and old translations should
			 * not differ with respect to page frame and
			 * attributes.
			 */
453
			if (page_size_mask & (1 << PG_LEVEL_2M)) {
454
				last_map_addr = next;
455
				continue;
456
			}
457
			new_prot = pte_pgprot(pte_clrhuge(*(pte_t *)pmd));
458 459
		}

460
		if (page_size_mask & (1<<PG_LEVEL_2M)) {
461
			pages++;
462
			spin_lock(&init_mm.page_table_lock);
463
			set_pte((pte_t *)pmd,
464 465
				pfn_pte(address >> PAGE_SHIFT,
					__pgprot(pgprot_val(prot) | _PAGE_PSE)));
466
			spin_unlock(&init_mm.page_table_lock);
467
			last_map_addr = next;
468
			continue;
469
		}
470

471
		pte = alloc_low_page(&pte_phys);
472
		last_map_addr = phys_pte_init(pte, address, end, new_prot);
473 474
		unmap_low_page(pte);

475
		spin_lock(&init_mm.page_table_lock);
476
		pmd_populate_kernel(&init_mm, pmd, __va(pte_phys));
477
		spin_unlock(&init_mm.page_table_lock);
478
	}
479
	update_page_count(PG_LEVEL_2M, pages);
480
	return last_map_addr;
481 482
}

483
static unsigned long __meminit
484 485
phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
			 unsigned long page_size_mask)
T
Thomas Gleixner 已提交
486
{
487
	unsigned long pages = 0, next;
488
	unsigned long last_map_addr = end;
489
	int i = pud_index(addr);
490

491
	for (; i < PTRS_PER_PUD; i++, addr = next) {
492 493
		unsigned long pmd_phys;
		pud_t *pud = pud_page + pud_index(addr);
L
Linus Torvalds 已提交
494
		pmd_t *pmd;
495
		pgprot_t prot = PAGE_KERNEL;
L
Linus Torvalds 已提交
496

497
		if (addr >= end)
L
Linus Torvalds 已提交
498 499
			break;

500 501 502
		next = (addr & PUD_MASK) + PUD_SIZE;

		if (!after_bootmem && !e820_any_mapped(addr, next, 0)) {
T
Thomas Gleixner 已提交
503
			set_pud(pud, __pud(0));
L
Linus Torvalds 已提交
504
			continue;
T
Thomas Gleixner 已提交
505
		}
L
Linus Torvalds 已提交
506

507
		if (pud_val(*pud)) {
508
			if (!pud_large(*pud)) {
Y
Yinghai Lu 已提交
509 510
				pmd = map_low_page(pmd_offset(pud, 0));
				last_map_addr = phys_pmd_init(pmd, addr, end,
511
							 page_size_mask, prot);
Y
Yinghai Lu 已提交
512 513
				unmap_low_page(pmd);
				__flush_tlb_all();
514 515
				continue;
			}
516 517 518 519 520 521 522 523 524 525 526 527
			/*
			 * If we are ok with PG_LEVEL_1G mapping, then we will
			 * use the existing mapping.
			 *
			 * Otherwise, we will split the gbpage mapping but use
			 * the same existing protection  bits except for large
			 * page, so that we don't violate Intel's TLB
			 * Application note (317080) which says, while changing
			 * the page sizes, new and old translations should
			 * not differ with respect to page frame and
			 * attributes.
			 */
528
			if (page_size_mask & (1 << PG_LEVEL_1G)) {
529
				last_map_addr = next;
530
				continue;
531
			}
532
			prot = pte_pgprot(pte_clrhuge(*(pte_t *)pud));
533 534
		}

535
		if (page_size_mask & (1<<PG_LEVEL_1G)) {
536
			pages++;
537
			spin_lock(&init_mm.page_table_lock);
538 539
			set_pte((pte_t *)pud,
				pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL_LARGE));
540
			spin_unlock(&init_mm.page_table_lock);
541
			last_map_addr = next;
542 543 544
			continue;
		}

545
		pmd = alloc_low_page(&pmd_phys);
546 547
		last_map_addr = phys_pmd_init(pmd, addr, end, page_size_mask,
					      prot);
548
		unmap_low_page(pmd);
549 550

		spin_lock(&init_mm.page_table_lock);
551
		pud_populate(&init_mm, pud, __va(pmd_phys));
552
		spin_unlock(&init_mm.page_table_lock);
L
Linus Torvalds 已提交
553
	}
A
Andi Kleen 已提交
554
	__flush_tlb_all();
555

556
	update_page_count(PG_LEVEL_1G, pages);
557

558
	return last_map_addr;
T
Thomas Gleixner 已提交
559
}
L
Linus Torvalds 已提交
560

561
unsigned long __meminit
562 563 564
kernel_physical_mapping_init(unsigned long start,
			     unsigned long end,
			     unsigned long page_size_mask)
T
Thomas Gleixner 已提交
565
{
566
	bool pgd_changed = false;
567
	unsigned long next, last_map_addr = end;
568
	unsigned long addr;
L
Linus Torvalds 已提交
569 570 571

	start = (unsigned long)__va(start);
	end = (unsigned long)__va(end);
572
	addr = start;
L
Linus Torvalds 已提交
573 574

	for (; start < end; start = next) {
575
		pgd_t *pgd = pgd_offset_k(start);
T
Thomas Gleixner 已提交
576
		unsigned long pud_phys;
577 578
		pud_t *pud;

579
		next = (start + PGDIR_SIZE) & PGDIR_MASK;
580 581 582 583
		if (next > end)
			next = end;

		if (pgd_val(*pgd)) {
Y
Yinghai Lu 已提交
584 585
			pud = map_low_page((pud_t *)pgd_page_vaddr(*pgd));
			last_map_addr = phys_pud_init(pud, __pa(start),
586
						 __pa(end), page_size_mask);
Y
Yinghai Lu 已提交
587
			unmap_low_page(pud);
588 589 590
			continue;
		}

591
		pud = alloc_low_page(&pud_phys);
592 593
		last_map_addr = phys_pud_init(pud, __pa(start), __pa(next),
						 page_size_mask);
594
		unmap_low_page(pud);
595 596 597 598

		spin_lock(&init_mm.page_table_lock);
		pgd_populate(&init_mm, pgd, __va(pud_phys));
		spin_unlock(&init_mm.page_table_lock);
599
		pgd_changed = true;
T
Thomas Gleixner 已提交
600
	}
601 602 603 604

	if (pgd_changed)
		sync_global_pgds(addr, end);

605
	__flush_tlb_all();
L
Linus Torvalds 已提交
606

607 608
	return last_map_addr;
}
609

610
#ifndef CONFIG_NUMA
611
void __init initmem_init(void)
612
{
T
Tejun Heo 已提交
613
	memblock_set_node(0, (phys_addr_t)ULLONG_MAX, 0);
614
}
615
#endif
616

L
Linus Torvalds 已提交
617 618
void __init paging_init(void)
{
619
	sparse_memory_present_with_active_regions(MAX_NUMNODES);
620
	sparse_init();
621 622 623 624 625 626 627 628 629

	/*
	 * clear the default setting with node 0
	 * note: don't use nodes_clear here, that is really clearing when
	 *	 numa support is not compiled in, and later node_set_state
	 *	 will not set it back.
	 */
	node_clear_state(0, N_NORMAL_MEMORY);

630
	zone_sizes_init();
L
Linus Torvalds 已提交
631 632
}

633 634 635
/*
 * Memory hotplug specific functions
 */
636
#ifdef CONFIG_MEMORY_HOTPLUG
637 638 639 640 641 642 643 644 645 646 647 648 649 650 651
/*
 * After memory hotplug the variables max_pfn, max_low_pfn and high_memory need
 * updating.
 */
static void  update_end_of_memory_vars(u64 start, u64 size)
{
	unsigned long end_pfn = PFN_UP(start + size);

	if (end_pfn > max_pfn) {
		max_pfn = end_pfn;
		max_low_pfn = end_pfn;
		high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1;
	}
}

652 653 654 655
/*
 * Memory is added always to NORMAL zone. This means you will never get
 * additional DMA/DMA32 memory.
 */
656
int arch_add_memory(int nid, u64 start, u64 size)
657
{
658
	struct pglist_data *pgdat = NODE_DATA(nid);
659
	struct zone *zone = pgdat->node_zones + ZONE_NORMAL;
660
	unsigned long last_mapped_pfn, start_pfn = start >> PAGE_SHIFT;
661 662 663
	unsigned long nr_pages = size >> PAGE_SHIFT;
	int ret;

664
	last_mapped_pfn = init_memory_mapping(start, start + size);
665 666
	if (last_mapped_pfn > max_pfn_mapped)
		max_pfn_mapped = last_mapped_pfn;
667

668
	ret = __add_pages(nid, zone, start_pfn, nr_pages);
669
	WARN_ON_ONCE(ret);
670

671 672 673
	/* update max_pfn, max_low_pfn and high_memory */
	update_end_of_memory_vars(start, size);

674 675
	return ret;
}
676
EXPORT_SYMBOL_GPL(arch_add_memory);
677

678 679
#endif /* CONFIG_MEMORY_HOTPLUG */

680
static struct kcore_list kcore_vsyscall;
L
Linus Torvalds 已提交
681 682 683

void __init mem_init(void)
{
684
	long codesize, reservedpages, datasize, initsize;
685
	unsigned long absent_pages;
L
Linus Torvalds 已提交
686

687
	pci_iommu_alloc();
L
Linus Torvalds 已提交
688

689
	/* clear_bss() already clear the empty_zero_page */
L
Linus Torvalds 已提交
690 691 692 693

	reservedpages = 0;

	/* this will put all low memory onto the freelists */
694
#ifdef CONFIG_NUMA
695
	totalram_pages = numa_free_all_bootmem();
L
Linus Torvalds 已提交
696
#else
697
	totalram_pages = free_all_bootmem();
L
Linus Torvalds 已提交
698
#endif
699 700 701

	absent_pages = absent_pages_in_range(0, max_pfn);
	reservedpages = max_pfn - totalram_pages - absent_pages;
L
Linus Torvalds 已提交
702 703 704 705 706 707 708
	after_bootmem = 1;

	codesize =  (unsigned long) &_etext - (unsigned long) &_text;
	datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
	initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;

	/* Register memory areas for /proc/kcore */
T
Thomas Gleixner 已提交
709
	kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START,
K
KAMEZAWA Hiroyuki 已提交
710
			 VSYSCALL_END - VSYSCALL_START, KCORE_OTHER);
L
Linus Torvalds 已提交
711

712
	printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, "
713
			 "%ldk absent, %ldk reserved, %ldk data, %ldk init)\n",
714
		nr_free_pages() << (PAGE_SHIFT-10),
Y
Yinghai Lu 已提交
715
		max_pfn << (PAGE_SHIFT-10),
L
Linus Torvalds 已提交
716
		codesize >> 10,
717
		absent_pages << (PAGE_SHIFT-10),
L
Linus Torvalds 已提交
718 719 720 721 722
		reservedpages << (PAGE_SHIFT-10),
		datasize >> 10,
		initsize >> 10);
}

723
#ifdef CONFIG_DEBUG_RODATA
724 725
const int rodata_test_data = 0xC3;
EXPORT_SYMBOL_GPL(rodata_test_data);
726

727
int kernel_set_to_readonly;
728 729 730

void set_kernel_text_rw(void)
{
731
	unsigned long start = PFN_ALIGN(_text);
732
	unsigned long end = PFN_ALIGN(__stop___ex_table);
733 734 735 736 737 738 739

	if (!kernel_set_to_readonly)
		return;

	pr_debug("Set kernel text: %lx - %lx for read write\n",
		 start, end);

740 741 742 743 744
	/*
	 * Make the kernel identity mapping for text RW. Kernel text
	 * mapping will always be RO. Refer to the comment in
	 * static_protections() in pageattr.c
	 */
745 746 747 748 749
	set_memory_rw(start, (end - start) >> PAGE_SHIFT);
}

void set_kernel_text_ro(void)
{
750
	unsigned long start = PFN_ALIGN(_text);
751
	unsigned long end = PFN_ALIGN(__stop___ex_table);
752 753 754 755 756 757 758

	if (!kernel_set_to_readonly)
		return;

	pr_debug("Set kernel text: %lx - %lx for read only\n",
		 start, end);

759 760 761
	/*
	 * Set the kernel identity mapping for text RO.
	 */
762 763 764
	set_memory_ro(start, (end - start) >> PAGE_SHIFT);
}

765 766
void mark_rodata_ro(void)
{
767
	unsigned long start = PFN_ALIGN(_text);
768 769
	unsigned long rodata_start =
		((unsigned long)__start_rodata + PAGE_SIZE - 1) & PAGE_MASK;
770 771 772 773
	unsigned long end = (unsigned long) &__end_rodata_hpage_align;
	unsigned long text_end = PAGE_ALIGN((unsigned long) &__stop___ex_table);
	unsigned long rodata_end = PAGE_ALIGN((unsigned long) &__end_rodata);
	unsigned long data_start = (unsigned long) &_sdata;
774

775
	printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
776
	       (end - start) >> 10);
777 778
	set_memory_ro(start, (end - start) >> PAGE_SHIFT);

779 780
	kernel_set_to_readonly = 1;

781 782 783 784
	/*
	 * The rodata section (but not the kernel text!) should also be
	 * not-executable.
	 */
785
	set_memory_nx(rodata_start, (end - rodata_start) >> PAGE_SHIFT);
786

787 788
	rodata_test();

789
#ifdef CONFIG_CPA_DEBUG
790
	printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, end);
791
	set_memory_rw(start, (end-start) >> PAGE_SHIFT);
792

793
	printk(KERN_INFO "Testing CPA: again\n");
794
	set_memory_ro(start, (end-start) >> PAGE_SHIFT);
795
#endif
796 797 798 799 800 801 802 803

	free_init_pages("unused kernel memory",
			(unsigned long) page_address(virt_to_page(text_end)),
			(unsigned long)
				 page_address(virt_to_page(rodata_start)));
	free_init_pages("unused kernel memory",
			(unsigned long) page_address(virt_to_page(rodata_end)),
			(unsigned long) page_address(virt_to_page(data_start)));
804
}
805

806 807
#endif

T
Thomas Gleixner 已提交
808 809
int kern_addr_valid(unsigned long addr)
{
L
Linus Torvalds 已提交
810
	unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
T
Thomas Gleixner 已提交
811 812 813 814
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;
L
Linus Torvalds 已提交
815 816

	if (above != 0 && above != -1UL)
T
Thomas Gleixner 已提交
817 818
		return 0;

L
Linus Torvalds 已提交
819 820 821 822 823 824
	pgd = pgd_offset_k(addr);
	if (pgd_none(*pgd))
		return 0;

	pud = pud_offset(pgd, addr);
	if (pud_none(*pud))
T
Thomas Gleixner 已提交
825
		return 0;
L
Linus Torvalds 已提交
826 827 828 829

	pmd = pmd_offset(pud, addr);
	if (pmd_none(*pmd))
		return 0;
T
Thomas Gleixner 已提交
830

L
Linus Torvalds 已提交
831 832 833 834 835 836
	if (pmd_large(*pmd))
		return pfn_valid(pmd_pfn(*pmd));

	pte = pte_offset_kernel(pmd, addr);
	if (pte_none(*pte))
		return 0;
T
Thomas Gleixner 已提交
837

L
Linus Torvalds 已提交
838 839 840
	return pfn_valid(pte_pfn(*pte));
}

T
Thomas Gleixner 已提交
841 842 843 844 845
/*
 * A pseudo VMA to allow ptrace access for the vsyscall page.  This only
 * covers the 64bit vsyscall page now. 32bit has a real VMA now and does
 * not need special handling anymore:
 */
L
Linus Torvalds 已提交
846
static struct vm_area_struct gate_vma = {
T
Thomas Gleixner 已提交
847 848 849 850
	.vm_start	= VSYSCALL_START,
	.vm_end		= VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
	.vm_page_prot	= PAGE_READONLY_EXEC,
	.vm_flags	= VM_READ | VM_EXEC
L
Linus Torvalds 已提交
851 852
};

853
struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
L
Linus Torvalds 已提交
854 855
{
#ifdef CONFIG_IA32_EMULATION
856
	if (!mm || mm->context.ia32_compat)
857
		return NULL;
L
Linus Torvalds 已提交
858 859 860 861
#endif
	return &gate_vma;
}

862
int in_gate_area(struct mm_struct *mm, unsigned long addr)
L
Linus Torvalds 已提交
863
{
864
	struct vm_area_struct *vma = get_gate_vma(mm);
T
Thomas Gleixner 已提交
865

866 867
	if (!vma)
		return 0;
T
Thomas Gleixner 已提交
868

L
Linus Torvalds 已提交
869 870 871
	return (addr >= vma->vm_start) && (addr < vma->vm_end);
}

T
Thomas Gleixner 已提交
872
/*
873 874 875
 * Use this when you have no reliable mm, typically from interrupt
 * context. It is less reliable than using a task's mm and may give
 * false positives.
L
Linus Torvalds 已提交
876
 */
877
int in_gate_area_no_mm(unsigned long addr)
L
Linus Torvalds 已提交
878
{
879
	return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END);
L
Linus Torvalds 已提交
880
}
881

882 883 884 885 886 887 888 889
const char *arch_vma_name(struct vm_area_struct *vma)
{
	if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
		return "[vdso]";
	if (vma == &gate_vma)
		return "[vsyscall]";
	return NULL;
}
890

891 892 893 894 895 896 897 898 899 900 901
#ifdef CONFIG_X86_UV
unsigned long memory_block_size_bytes(void)
{
	if (is_uv_system()) {
		printk(KERN_INFO "UV: memory block size 2GB\n");
		return 2UL * 1024 * 1024 * 1024;
	}
	return MIN_MEMORY_BLOCK_SIZE;
}
#endif

902 903 904 905
#ifdef CONFIG_SPARSEMEM_VMEMMAP
/*
 * Initialise the sparsemem vmemmap using huge-pages at the PMD level.
 */
906 907 908 909
static long __meminitdata addr_start, addr_end;
static void __meminitdata *p_start, *p_end;
static int __meminitdata node_start;

T
Thomas Gleixner 已提交
910 911
int __meminit
vmemmap_populate(struct page *start_page, unsigned long size, int node)
912 913 914 915 916 917 918 919 920
{
	unsigned long addr = (unsigned long)start_page;
	unsigned long end = (unsigned long)(start_page + size);
	unsigned long next;
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;

	for (; addr < end; addr = next) {
921
		void *p = NULL;
922 923 924 925

		pgd = vmemmap_pgd_populate(addr, node);
		if (!pgd)
			return -ENOMEM;
T
Thomas Gleixner 已提交
926

927 928 929 930
		pud = vmemmap_pud_populate(pgd, addr, node);
		if (!pud)
			return -ENOMEM;

931 932 933 934 935 936 937 938
		if (!cpu_has_pse) {
			next = (addr + PAGE_SIZE) & PAGE_MASK;
			pmd = vmemmap_pmd_populate(pud, addr, node);

			if (!pmd)
				return -ENOMEM;

			p = vmemmap_pte_populate(pmd, addr, node);
T
Thomas Gleixner 已提交
939

940 941 942
			if (!p)
				return -ENOMEM;

943 944
			addr_end = addr + PAGE_SIZE;
			p_end = p + PAGE_SIZE;
T
Thomas Gleixner 已提交
945
		} else {
946 947 948 949 950 951
			next = pmd_addr_end(addr, end);

			pmd = pmd_offset(pud, addr);
			if (pmd_none(*pmd)) {
				pte_t entry;

952
				p = vmemmap_alloc_block_buf(PMD_SIZE, node);
953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968
				if (!p)
					return -ENOMEM;

				entry = pfn_pte(__pa(p) >> PAGE_SHIFT,
						PAGE_KERNEL_LARGE);
				set_pmd(pmd, __pmd(pte_val(entry)));

				/* check to see if we have contiguous blocks */
				if (p_end != p || node_start != node) {
					if (p_start)
						printk(KERN_DEBUG " [%lx-%lx] PMD -> [%p-%p] on node %d\n",
						       addr_start, addr_end-1, p_start, p_end-1, node_start);
					addr_start = addr;
					node_start = node;
					p_start = p;
				}
Y
Yinghai Lu 已提交
969 970 971

				addr_end = addr + PMD_SIZE;
				p_end = p + PMD_SIZE;
972 973
			} else
				vmemmap_verify((pte_t *)pmd, node, addr, next);
T
Thomas Gleixner 已提交
974
		}
975

976
	}
977
	sync_global_pgds((unsigned long)start_page, end);
978 979
	return 0;
}
980 981 982 983 984 985 986 987 988 989 990

void __meminit vmemmap_populate_print_last(void)
{
	if (p_start) {
		printk(KERN_DEBUG " [%lx-%lx] PMD -> [%p-%p] on node %d\n",
			addr_start, addr_end-1, p_start, p_end-1, node_start);
		p_start = NULL;
		p_end = NULL;
		node_start = 0;
	}
}
991
#endif