init_64.c 37.5 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4
/*
 *  linux/arch/x86_64/mm/init.c
 *
 *  Copyright (C) 1995  Linus Torvalds
P
Pavel Machek 已提交
5
 *  Copyright (C) 2000  Pavel Machek <pavel@ucw.cz>
L
Linus Torvalds 已提交
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
 *  Copyright (C) 2002,2003 Andi Kleen <ak@suse.de>
 */

#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/smp.h>
#include <linux/init.h>
T
Thomas Gleixner 已提交
21
#include <linux/initrd.h>
L
Linus Torvalds 已提交
22 23
#include <linux/pagemap.h>
#include <linux/bootmem.h>
24
#include <linux/memblock.h>
L
Linus Torvalds 已提交
25
#include <linux/proc_fs.h>
26
#include <linux/pci.h>
27
#include <linux/pfn.h>
28
#include <linux/poison.h>
29
#include <linux/dma-mapping.h>
30
#include <linux/memory.h>
31
#include <linux/memory_hotplug.h>
32
#include <linux/memremap.h>
33
#include <linux/nmi.h>
34
#include <linux/gfp.h>
35
#include <linux/kcore.h>
L
Linus Torvalds 已提交
36 37

#include <asm/processor.h>
38
#include <asm/bios_ebda.h>
39
#include <linux/uaccess.h>
L
Linus Torvalds 已提交
40 41 42 43
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/dma.h>
#include <asm/fixmap.h>
44
#include <asm/e820/api.h>
L
Linus Torvalds 已提交
45 46 47 48 49
#include <asm/apic.h>
#include <asm/tlb.h>
#include <asm/mmu_context.h>
#include <asm/proto.h>
#include <asm/smp.h>
50
#include <asm/sections.h>
51
#include <asm/kdebug.h>
52
#include <asm/numa.h>
L
Laura Abbott 已提交
53
#include <asm/set_memory.h>
54
#include <asm/init.h>
55
#include <asm/uv/uv.h>
56
#include <asm/setup.h>
L
Linus Torvalds 已提交
57

58 59
#include "mm_internal.h"

60
#include "ident_map.c"
61

L
Linus Torvalds 已提交
62 63 64 65 66 67
/*
 * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
 * physical space so we can cache the place of the first one and move
 * around without checking the pgd every time.
 */

68
pteval_t __supported_pte_mask __read_mostly = ~0;
69 70 71 72
EXPORT_SYMBOL_GPL(__supported_pte_mask);

int force_personality32;

I
Ingo Molnar 已提交
73 74 75 76 77 78 79 80
/*
 * noexec32=on|off
 * Control non executable heap for 32bit processes.
 * To control the stack too use noexec=off
 *
 * on	PROT_READ does not imply PROT_EXEC for 32-bit processes (default)
 * off	PROT_READ implies PROT_EXEC
 */
81 82 83 84 85 86 87 88 89 90
static int __init nonx32_setup(char *str)
{
	if (!strcmp(str, "on"))
		force_personality32 &= ~READ_IMPLIES_EXEC;
	else if (!strcmp(str, "off"))
		force_personality32 |= READ_IMPLIES_EXEC;
	return 1;
}
__setup("noexec32=", nonx32_setup);

91
static void sync_global_pgds_l5(unsigned long start, unsigned long end)
92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126
{
	unsigned long addr;

	for (addr = start; addr <= end; addr = ALIGN(addr + 1, PGDIR_SIZE)) {
		const pgd_t *pgd_ref = pgd_offset_k(addr);
		struct page *page;

		/* Check for overflow */
		if (addr < start)
			break;

		if (pgd_none(*pgd_ref))
			continue;

		spin_lock(&pgd_lock);
		list_for_each_entry(page, &pgd_list, lru) {
			pgd_t *pgd;
			spinlock_t *pgt_lock;

			pgd = (pgd_t *)page_address(page) + pgd_index(addr);
			/* the pgt_lock only for Xen */
			pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
			spin_lock(pgt_lock);

			if (!pgd_none(*pgd_ref) && !pgd_none(*pgd))
				BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));

			if (pgd_none(*pgd))
				set_pgd(pgd, *pgd_ref);

			spin_unlock(pgt_lock);
		}
		spin_unlock(&pgd_lock);
	}
}
127 128

static void sync_global_pgds_l4(unsigned long start, unsigned long end)
129
{
130
	unsigned long addr;
131

132 133
	for (addr = start; addr <= end; addr = ALIGN(addr + 1, PGDIR_SIZE)) {
		pgd_t *pgd_ref = pgd_offset_k(addr);
134
		const p4d_t *p4d_ref;
135 136
		struct page *page;

137 138 139 140
		/*
		 * With folded p4d, pgd_none() is always false, we need to
		 * handle synchonization on p4d level.
		 */
141
		MAYBE_BUILD_BUG_ON(pgd_none(*pgd_ref));
142
		p4d_ref = p4d_offset(pgd_ref, addr);
143 144

		if (p4d_none(*p4d_ref))
145 146
			continue;

A
Andrea Arcangeli 已提交
147
		spin_lock(&pgd_lock);
148
		list_for_each_entry(page, &pgd_list, lru) {
149
			pgd_t *pgd;
150
			p4d_t *p4d;
151 152
			spinlock_t *pgt_lock;

153 154
			pgd = (pgd_t *)page_address(page) + pgd_index(addr);
			p4d = p4d_offset(pgd, addr);
A
Andrea Arcangeli 已提交
155
			/* the pgt_lock only for Xen */
156 157 158
			pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
			spin_lock(pgt_lock);

159 160 161
			if (!p4d_none(*p4d_ref) && !p4d_none(*p4d))
				BUG_ON(p4d_page_vaddr(*p4d)
				       != p4d_page_vaddr(*p4d_ref));
162

163 164
			if (p4d_none(*p4d))
				set_p4d(p4d, *p4d_ref);
165

166
			spin_unlock(pgt_lock);
167
		}
A
Andrea Arcangeli 已提交
168
		spin_unlock(&pgd_lock);
169
	}
170
}
171 172 173 174 175 176 177 178 179 180 181 182

/*
 * When memory was added make sure all the processes MM have
 * suitable PGD entries in the local PGD level page.
 */
void sync_global_pgds(unsigned long start, unsigned long end)
{
	if (pgtable_l5_enabled)
		sync_global_pgds_l5(start, end);
	else
		sync_global_pgds_l4(start, end);
}
183

184 185 186 187 188
/*
 * NOTE: This function is marked __ref because it calls __init function
 * (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0.
 */
static __ref void *spp_getpage(void)
T
Thomas Gleixner 已提交
189
{
L
Linus Torvalds 已提交
190
	void *ptr;
T
Thomas Gleixner 已提交
191

L
Linus Torvalds 已提交
192
	if (after_bootmem)
193
		ptr = (void *) get_zeroed_page(GFP_ATOMIC);
L
Linus Torvalds 已提交
194 195
	else
		ptr = alloc_bootmem_pages(PAGE_SIZE);
T
Thomas Gleixner 已提交
196 197 198 199 200

	if (!ptr || ((unsigned long)ptr & ~PAGE_MASK)) {
		panic("set_pte_phys: cannot allocate page data %s\n",
			after_bootmem ? "after bootmem" : "");
	}
L
Linus Torvalds 已提交
201

202
	pr_debug("spp_getpage %p\n", ptr);
T
Thomas Gleixner 已提交
203

L
Linus Torvalds 已提交
204
	return ptr;
T
Thomas Gleixner 已提交
205
}
L
Linus Torvalds 已提交
206

207
static p4d_t *fill_p4d(pgd_t *pgd, unsigned long vaddr)
L
Linus Torvalds 已提交
208
{
209
	if (pgd_none(*pgd)) {
210 211 212
		p4d_t *p4d = (p4d_t *)spp_getpage();
		pgd_populate(&init_mm, pgd, p4d);
		if (p4d != p4d_offset(pgd, 0))
213
			printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
214 215 216 217 218 219 220 221 222 223 224 225 226
			       p4d, p4d_offset(pgd, 0));
	}
	return p4d_offset(pgd, vaddr);
}

static pud_t *fill_pud(p4d_t *p4d, unsigned long vaddr)
{
	if (p4d_none(*p4d)) {
		pud_t *pud = (pud_t *)spp_getpage();
		p4d_populate(&init_mm, p4d, pud);
		if (pud != pud_offset(p4d, 0))
			printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
			       pud, pud_offset(p4d, 0));
227
	}
228
	return pud_offset(p4d, vaddr);
229
}
L
Linus Torvalds 已提交
230

231
static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
232
{
L
Linus Torvalds 已提交
233
	if (pud_none(*pud)) {
234
		pmd_t *pmd = (pmd_t *) spp_getpage();
235
		pud_populate(&init_mm, pud, pmd);
236
		if (pmd != pmd_offset(pud, 0))
237
			printk(KERN_ERR "PAGETABLE BUG #02! %p <-> %p\n",
238
			       pmd, pmd_offset(pud, 0));
L
Linus Torvalds 已提交
239
	}
240 241 242
	return pmd_offset(pud, vaddr);
}

243
static pte_t *fill_pte(pmd_t *pmd, unsigned long vaddr)
244
{
L
Linus Torvalds 已提交
245
	if (pmd_none(*pmd)) {
246
		pte_t *pte = (pte_t *) spp_getpage();
247
		pmd_populate_kernel(&init_mm, pmd, pte);
248
		if (pte != pte_offset_kernel(pmd, 0))
249
			printk(KERN_ERR "PAGETABLE BUG #03!\n");
L
Linus Torvalds 已提交
250
	}
251 252 253
	return pte_offset_kernel(pmd, vaddr);
}

254
static void __set_pte_vaddr(pud_t *pud, unsigned long vaddr, pte_t new_pte)
255
{
256 257
	pmd_t *pmd = fill_pmd(pud, vaddr);
	pte_t *pte = fill_pte(pmd, vaddr);
L
Linus Torvalds 已提交
258 259 260 261 262 263 264 265 266 267

	set_pte(pte, new_pte);

	/*
	 * It's enough to flush this one mapping.
	 * (PGE mappings get flushed as well)
	 */
	__flush_tlb_one(vaddr);
}

268 269 270 271 272 273 274 275 276 277 278 279 280 281 282
void set_pte_vaddr_p4d(p4d_t *p4d_page, unsigned long vaddr, pte_t new_pte)
{
	p4d_t *p4d = p4d_page + p4d_index(vaddr);
	pud_t *pud = fill_pud(p4d, vaddr);

	__set_pte_vaddr(pud, vaddr, new_pte);
}

void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
{
	pud_t *pud = pud_page + pud_index(vaddr);

	__set_pte_vaddr(pud, vaddr, new_pte);
}

283
void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
284 285
{
	pgd_t *pgd;
286
	p4d_t *p4d_page;
287 288 289 290 291 292 293 294 295

	pr_debug("set_pte_vaddr %lx to %lx\n", vaddr, native_pte_val(pteval));

	pgd = pgd_offset_k(vaddr);
	if (pgd_none(*pgd)) {
		printk(KERN_ERR
			"PGD FIXMAP MISSING, it should be setup in head.S!\n");
		return;
	}
296 297 298

	p4d_page = p4d_offset(pgd, 0);
	set_pte_vaddr_p4d(p4d_page, vaddr, pteval);
299 300
}

301
pmd_t * __init populate_extra_pmd(unsigned long vaddr)
302 303
{
	pgd_t *pgd;
304
	p4d_t *p4d;
305 306 307
	pud_t *pud;

	pgd = pgd_offset_k(vaddr);
308 309
	p4d = fill_p4d(pgd, vaddr);
	pud = fill_pud(p4d, vaddr);
310 311 312 313 314 315
	return fill_pmd(pud, vaddr);
}

pte_t * __init populate_extra_pte(unsigned long vaddr)
{
	pmd_t *pmd;
316

317 318
	pmd = populate_extra_pmd(vaddr);
	return fill_pte(pmd, vaddr);
319 320
}

321 322 323 324
/*
 * Create large page table mappings for a range of physical addresses.
 */
static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
325
					enum page_cache_mode cache)
326 327
{
	pgd_t *pgd;
328
	p4d_t *p4d;
329 330
	pud_t *pud;
	pmd_t *pmd;
331
	pgprot_t prot;
332

333 334
	pgprot_val(prot) = pgprot_val(PAGE_KERNEL_LARGE) |
		pgprot_val(pgprot_4k_2_large(cachemode2pgprot(cache)));
335 336 337 338
	BUG_ON((phys & ~PMD_MASK) || (size & ~PMD_MASK));
	for (; size; phys += PMD_SIZE, size -= PMD_SIZE) {
		pgd = pgd_offset_k((unsigned long)__va(phys));
		if (pgd_none(*pgd)) {
339 340 341 342 343 344
			p4d = (p4d_t *) spp_getpage();
			set_pgd(pgd, __pgd(__pa(p4d) | _KERNPG_TABLE |
						_PAGE_USER));
		}
		p4d = p4d_offset(pgd, (unsigned long)__va(phys));
		if (p4d_none(*p4d)) {
345
			pud = (pud_t *) spp_getpage();
346
			set_p4d(p4d, __p4d(__pa(pud) | _KERNPG_TABLE |
347 348
						_PAGE_USER));
		}
349
		pud = pud_offset(p4d, (unsigned long)__va(phys));
350 351 352 353 354 355 356 357 358 359 360 361 362
		if (pud_none(*pud)) {
			pmd = (pmd_t *) spp_getpage();
			set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
						_PAGE_USER));
		}
		pmd = pmd_offset(pud, phys);
		BUG_ON(!pmd_none(*pmd));
		set_pmd(pmd, __pmd(phys | pgprot_val(prot)));
	}
}

void __init init_extra_mapping_wb(unsigned long phys, unsigned long size)
{
363
	__init_extra_mapping(phys, size, _PAGE_CACHE_MODE_WB);
364 365 366 367
}

void __init init_extra_mapping_uc(unsigned long phys, unsigned long size)
{
368
	__init_extra_mapping(phys, size, _PAGE_CACHE_MODE_UC);
369 370
}

371
/*
372 373 374
 * The head.S code sets up the kernel high mapping:
 *
 *   from __START_KERNEL_map to __START_KERNEL_map + size (== _end-_text)
375
 *
376
 * phys_base holds the negative offset to the kernel, which is added
377 378 379
 * to the compile time generated pmds. This results in invalid pmds up
 * to the point where we hit the physaddr 0 mapping.
 *
380 381
 * We limit the mappings to the region from _text to _brk_end.  _brk_end
 * is rounded up to the 2MB boundary. This catches the invalid pmds as
382 383 384 385 386
 * well, as they are located before _text:
 */
void __init cleanup_highmap(void)
{
	unsigned long vaddr = __START_KERNEL_map;
387
	unsigned long vaddr_end = __START_KERNEL_map + KERNEL_IMAGE_SIZE;
388
	unsigned long end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1;
389 390
	pmd_t *pmd = level2_kernel_pgt;

391 392 393 394 395 396 397 398
	/*
	 * Native path, max_pfn_mapped is not set yet.
	 * Xen has valid max_pfn_mapped set in
	 *	arch/x86/xen/mmu.c:xen_setup_kernel_pagetable().
	 */
	if (max_pfn_mapped)
		vaddr_end = __START_KERNEL_map + (max_pfn_mapped << PAGE_SHIFT);

399
	for (; vaddr + PMD_SIZE - 1 < vaddr_end; pmd++, vaddr += PMD_SIZE) {
400
		if (pmd_none(*pmd))
401 402 403 404 405 406
			continue;
		if (vaddr < (unsigned long) _text || vaddr > end)
			set_pmd(pmd, __pmd(0));
	}
}

407 408 409 410
/*
 * Create PTE level page table mapping for physical addresses.
 * It returns the last physical address mapped.
 */
411
static unsigned long __meminit
412
phys_pte_init(pte_t *pte_page, unsigned long paddr, unsigned long paddr_end,
413
	      pgprot_t prot)
414
{
415 416 417
	unsigned long pages = 0, paddr_next;
	unsigned long paddr_last = paddr_end;
	pte_t *pte;
418
	int i;
419

420 421
	pte = pte_page + pte_index(paddr);
	i = pte_index(paddr);
422

423 424 425
	for (; i < PTRS_PER_PTE; i++, paddr = paddr_next, pte++) {
		paddr_next = (paddr & PAGE_MASK) + PAGE_SIZE;
		if (paddr >= paddr_end) {
426
			if (!after_bootmem &&
427
			    !e820__mapped_any(paddr & PAGE_MASK, paddr_next,
428
					     E820_TYPE_RAM) &&
429
			    !e820__mapped_any(paddr & PAGE_MASK, paddr_next,
430
					     E820_TYPE_RESERVED_KERN))
431 432
				set_pte(pte, __pte(0));
			continue;
433 434
		}

435 436 437 438 439 440
		/*
		 * We will re-use the existing mapping.
		 * Xen for example has some special requirements, like mapping
		 * pagetable pages as RO. So assume someone who pre-setup
		 * these mappings are more intelligent.
		 */
441
		if (!pte_none(*pte)) {
J
Jan Beulich 已提交
442 443
			if (!after_bootmem)
				pages++;
444
			continue;
445
		}
446 447

		if (0)
448 449
			pr_info("   pte=%p addr=%lx pte=%016lx\n", pte, paddr,
				pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL).pte);
450
		pages++;
451 452
		set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, prot));
		paddr_last = (paddr & PAGE_MASK) + PAGE_SIZE;
453
	}
454

455
	update_page_count(PG_LEVEL_4K, pages);
456

457
	return paddr_last;
458 459
}

460 461 462 463 464
/*
 * Create PMD level page table mapping for physical addresses. The virtual
 * and physical address have to be aligned at this level.
 * It returns the last physical address mapped.
 */
465
static unsigned long __meminit
466
phys_pmd_init(pmd_t *pmd_page, unsigned long paddr, unsigned long paddr_end,
467
	      unsigned long page_size_mask, pgprot_t prot)
468
{
469 470
	unsigned long pages = 0, paddr_next;
	unsigned long paddr_last = paddr_end;
471

472
	int i = pmd_index(paddr);
473

474 475
	for (; i < PTRS_PER_PMD; i++, paddr = paddr_next) {
		pmd_t *pmd = pmd_page + pmd_index(paddr);
476
		pte_t *pte;
477
		pgprot_t new_prot = prot;
478

479 480
		paddr_next = (paddr & PMD_MASK) + PMD_SIZE;
		if (paddr >= paddr_end) {
481
			if (!after_bootmem &&
482
			    !e820__mapped_any(paddr & PMD_MASK, paddr_next,
483
					     E820_TYPE_RAM) &&
484
			    !e820__mapped_any(paddr & PMD_MASK, paddr_next,
485
					     E820_TYPE_RESERVED_KERN))
486 487
				set_pmd(pmd, __pmd(0));
			continue;
488
		}
489

490
		if (!pmd_none(*pmd)) {
491 492
			if (!pmd_large(*pmd)) {
				spin_lock(&init_mm.page_table_lock);
493
				pte = (pte_t *)pmd_page_vaddr(*pmd);
494 495
				paddr_last = phys_pte_init(pte, paddr,
							   paddr_end, prot);
496
				spin_unlock(&init_mm.page_table_lock);
497
				continue;
498
			}
499 500 501 502 503 504 505 506 507 508 509 510
			/*
			 * If we are ok with PG_LEVEL_2M mapping, then we will
			 * use the existing mapping,
			 *
			 * Otherwise, we will split the large page mapping but
			 * use the same existing protection bits except for
			 * large page, so that we don't violate Intel's TLB
			 * Application note (317080) which says, while changing
			 * the page sizes, new and old translations should
			 * not differ with respect to page frame and
			 * attributes.
			 */
511
			if (page_size_mask & (1 << PG_LEVEL_2M)) {
J
Jan Beulich 已提交
512 513
				if (!after_bootmem)
					pages++;
514
				paddr_last = paddr_next;
515
				continue;
516
			}
517
			new_prot = pte_pgprot(pte_clrhuge(*(pte_t *)pmd));
518 519
		}

520
		if (page_size_mask & (1<<PG_LEVEL_2M)) {
521
			pages++;
522
			spin_lock(&init_mm.page_table_lock);
523
			set_pte((pte_t *)pmd,
524
				pfn_pte((paddr & PMD_MASK) >> PAGE_SHIFT,
525
					__pgprot(pgprot_val(prot) | _PAGE_PSE)));
526
			spin_unlock(&init_mm.page_table_lock);
527
			paddr_last = paddr_next;
528
			continue;
529
		}
530

531
		pte = alloc_low_page();
532
		paddr_last = phys_pte_init(pte, paddr, paddr_end, new_prot);
533

534
		spin_lock(&init_mm.page_table_lock);
535
		pmd_populate_kernel(&init_mm, pmd, pte);
536
		spin_unlock(&init_mm.page_table_lock);
537
	}
538
	update_page_count(PG_LEVEL_2M, pages);
539
	return paddr_last;
540 541
}

542 543
/*
 * Create PUD level page table mapping for physical addresses. The virtual
544 545
 * and physical address do not have to be aligned at this level. KASLR can
 * randomize virtual addresses up to this level.
546 547
 * It returns the last physical address mapped.
 */
548
static unsigned long __meminit
549 550
phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
	      unsigned long page_size_mask)
T
Thomas Gleixner 已提交
551
{
552 553
	unsigned long pages = 0, paddr_next;
	unsigned long paddr_last = paddr_end;
554 555
	unsigned long vaddr = (unsigned long)__va(paddr);
	int i = pud_index(vaddr);
556

557
	for (; i < PTRS_PER_PUD; i++, paddr = paddr_next) {
558
		pud_t *pud;
L
Linus Torvalds 已提交
559
		pmd_t *pmd;
560
		pgprot_t prot = PAGE_KERNEL;
L
Linus Torvalds 已提交
561

562 563
		vaddr = (unsigned long)__va(paddr);
		pud = pud_page + pud_index(vaddr);
564
		paddr_next = (paddr & PUD_MASK) + PUD_SIZE;
565

566
		if (paddr >= paddr_end) {
567
			if (!after_bootmem &&
568
			    !e820__mapped_any(paddr & PUD_MASK, paddr_next,
569
					     E820_TYPE_RAM) &&
570
			    !e820__mapped_any(paddr & PUD_MASK, paddr_next,
571
					     E820_TYPE_RESERVED_KERN))
572
				set_pud(pud, __pud(0));
L
Linus Torvalds 已提交
573
			continue;
T
Thomas Gleixner 已提交
574
		}
L
Linus Torvalds 已提交
575

576
		if (!pud_none(*pud)) {
577
			if (!pud_large(*pud)) {
578
				pmd = pmd_offset(pud, 0);
579 580 581 582
				paddr_last = phys_pmd_init(pmd, paddr,
							   paddr_end,
							   page_size_mask,
							   prot);
Y
Yinghai Lu 已提交
583
				__flush_tlb_all();
584 585
				continue;
			}
586 587 588 589 590 591 592 593 594 595 596 597
			/*
			 * If we are ok with PG_LEVEL_1G mapping, then we will
			 * use the existing mapping.
			 *
			 * Otherwise, we will split the gbpage mapping but use
			 * the same existing protection  bits except for large
			 * page, so that we don't violate Intel's TLB
			 * Application note (317080) which says, while changing
			 * the page sizes, new and old translations should
			 * not differ with respect to page frame and
			 * attributes.
			 */
598
			if (page_size_mask & (1 << PG_LEVEL_1G)) {
J
Jan Beulich 已提交
599 600
				if (!after_bootmem)
					pages++;
601
				paddr_last = paddr_next;
602
				continue;
603
			}
604
			prot = pte_pgprot(pte_clrhuge(*(pte_t *)pud));
605 606
		}

607
		if (page_size_mask & (1<<PG_LEVEL_1G)) {
608
			pages++;
609
			spin_lock(&init_mm.page_table_lock);
610
			set_pte((pte_t *)pud,
611
				pfn_pte((paddr & PUD_MASK) >> PAGE_SHIFT,
612
					PAGE_KERNEL_LARGE));
613
			spin_unlock(&init_mm.page_table_lock);
614
			paddr_last = paddr_next;
615 616 617
			continue;
		}

618
		pmd = alloc_low_page();
619 620
		paddr_last = phys_pmd_init(pmd, paddr, paddr_end,
					   page_size_mask, prot);
621 622

		spin_lock(&init_mm.page_table_lock);
623
		pud_populate(&init_mm, pud, pmd);
624
		spin_unlock(&init_mm.page_table_lock);
L
Linus Torvalds 已提交
625
	}
A
Andi Kleen 已提交
626
	__flush_tlb_all();
627

628
	update_page_count(PG_LEVEL_1G, pages);
629

630
	return paddr_last;
T
Thomas Gleixner 已提交
631
}
L
Linus Torvalds 已提交
632

633 634 635 636 637 638 639 640
static unsigned long __meminit
phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end,
	      unsigned long page_size_mask)
{
	unsigned long paddr_next, paddr_last = paddr_end;
	unsigned long vaddr = (unsigned long)__va(paddr);
	int i = p4d_index(vaddr);

641
	if (!pgtable_l5_enabled)
642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683
		return phys_pud_init((pud_t *) p4d_page, paddr, paddr_end, page_size_mask);

	for (; i < PTRS_PER_P4D; i++, paddr = paddr_next) {
		p4d_t *p4d;
		pud_t *pud;

		vaddr = (unsigned long)__va(paddr);
		p4d = p4d_page + p4d_index(vaddr);
		paddr_next = (paddr & P4D_MASK) + P4D_SIZE;

		if (paddr >= paddr_end) {
			if (!after_bootmem &&
			    !e820__mapped_any(paddr & P4D_MASK, paddr_next,
					     E820_TYPE_RAM) &&
			    !e820__mapped_any(paddr & P4D_MASK, paddr_next,
					     E820_TYPE_RESERVED_KERN))
				set_p4d(p4d, __p4d(0));
			continue;
		}

		if (!p4d_none(*p4d)) {
			pud = pud_offset(p4d, 0);
			paddr_last = phys_pud_init(pud, paddr,
					paddr_end,
					page_size_mask);
			__flush_tlb_all();
			continue;
		}

		pud = alloc_low_page();
		paddr_last = phys_pud_init(pud, paddr, paddr_end,
					   page_size_mask);

		spin_lock(&init_mm.page_table_lock);
		p4d_populate(&init_mm, p4d, pud);
		spin_unlock(&init_mm.page_table_lock);
	}
	__flush_tlb_all();

	return paddr_last;
}

684 685
/*
 * Create page table mapping for the physical memory for specific physical
686
 * addresses. The virtual and physical addresses have to be aligned on PMD level
687 688
 * down. It returns the last physical address mapped.
 */
689
unsigned long __meminit
690 691
kernel_physical_mapping_init(unsigned long paddr_start,
			     unsigned long paddr_end,
692
			     unsigned long page_size_mask)
T
Thomas Gleixner 已提交
693
{
694
	bool pgd_changed = false;
695
	unsigned long vaddr, vaddr_start, vaddr_end, vaddr_next, paddr_last;
L
Linus Torvalds 已提交
696

697 698 699 700
	paddr_last = paddr_end;
	vaddr = (unsigned long)__va(paddr_start);
	vaddr_end = (unsigned long)__va(paddr_end);
	vaddr_start = vaddr;
L
Linus Torvalds 已提交
701

702 703
	for (; vaddr < vaddr_end; vaddr = vaddr_next) {
		pgd_t *pgd = pgd_offset_k(vaddr);
704
		p4d_t *p4d;
705

706
		vaddr_next = (vaddr & PGDIR_MASK) + PGDIR_SIZE;
707

708 709 710
		if (pgd_val(*pgd)) {
			p4d = (p4d_t *)pgd_page_vaddr(*pgd);
			paddr_last = phys_p4d_init(p4d, __pa(vaddr),
711 712
						   __pa(vaddr_end),
						   page_size_mask);
713 714 715
			continue;
		}

716 717
		p4d = alloc_low_page();
		paddr_last = phys_p4d_init(p4d, __pa(vaddr), __pa(vaddr_end),
718
					   page_size_mask);
719 720

		spin_lock(&init_mm.page_table_lock);
721
		if (pgtable_l5_enabled)
722 723 724
			pgd_populate(&init_mm, pgd, p4d);
		else
			p4d_populate(&init_mm, p4d_offset(pgd, vaddr), (pud_t *) p4d);
725
		spin_unlock(&init_mm.page_table_lock);
726
		pgd_changed = true;
T
Thomas Gleixner 已提交
727
	}
728 729

	if (pgd_changed)
730
		sync_global_pgds(vaddr_start, vaddr_end - 1);
731

732
	__flush_tlb_all();
L
Linus Torvalds 已提交
733

734
	return paddr_last;
735
}
736

737
#ifndef CONFIG_NUMA
738
void __init initmem_init(void)
739
{
740
	memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0);
741
}
742
#endif
743

L
Linus Torvalds 已提交
744 745
void __init paging_init(void)
{
746
	sparse_memory_present_with_active_regions(MAX_NUMNODES);
747
	sparse_init();
748 749 750 751 752 753 754

	/*
	 * clear the default setting with node 0
	 * note: don't use nodes_clear here, that is really clearing when
	 *	 numa support is not compiled in, and later node_set_state
	 *	 will not set it back.
	 */
755 756 757
	node_clear_state(0, N_MEMORY);
	if (N_MEMORY != N_NORMAL_MEMORY)
		node_clear_state(0, N_NORMAL_MEMORY);
758

759
	zone_sizes_init();
L
Linus Torvalds 已提交
760 761
}

762 763 764
/*
 * Memory hotplug specific functions
 */
765
#ifdef CONFIG_MEMORY_HOTPLUG
766 767 768 769
/*
 * After memory hotplug the variables max_pfn, max_low_pfn and high_memory need
 * updating.
 */
770
static void update_end_of_memory_vars(u64 start, u64 size)
771 772 773 774 775 776 777 778 779 780
{
	unsigned long end_pfn = PFN_UP(start + size);

	if (end_pfn > max_pfn) {
		max_pfn = end_pfn;
		max_low_pfn = end_pfn;
		high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1;
	}
}

781 782
int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
		struct vmem_altmap *altmap, bool want_memblock)
783 784 785
{
	int ret;

786
	ret = __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
787
	WARN_ON_ONCE(ret);
788

789
	/* update max_pfn, max_low_pfn and high_memory */
790 791
	update_end_of_memory_vars(start_pfn << PAGE_SHIFT,
				  nr_pages << PAGE_SHIFT);
792

793 794
	return ret;
}
795

796 797
int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
		bool want_memblock)
798 799 800 801 802 803
{
	unsigned long start_pfn = start >> PAGE_SHIFT;
	unsigned long nr_pages = size >> PAGE_SHIFT;

	init_memory_mapping(start, start + size);

804
	return add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
805
}
806

807 808
#define PAGE_INUSE 0xFD

809 810
static void __meminit free_pagetable(struct page *page, int order,
		struct vmem_altmap *altmap)
811 812 813
{
	unsigned long magic;
	unsigned int nr_pages = 1 << order;
814 815 816 817 818

	if (altmap) {
		vmem_altmap_free(altmap, nr_pages);
		return;
	}
819 820 821 822 823

	/* bootmem page has reserved flag */
	if (PageReserved(page)) {
		__ClearPageReserved(page);

824
		magic = (unsigned long)page->freelist;
825 826 827 828
		if (magic == SECTION_INFO || magic == MIX_SECTION_INFO) {
			while (nr_pages--)
				put_page_bootmem(page++);
		} else
829 830
			while (nr_pages--)
				free_reserved_page(page++);
831 832 833 834
	} else
		free_pages((unsigned long)page_address(page), order);
}

835 836
static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd,
		struct vmem_altmap *altmap)
837 838 839 840 841 842
{
	pte_t *pte;
	int i;

	for (i = 0; i < PTRS_PER_PTE; i++) {
		pte = pte_start + i;
843
		if (!pte_none(*pte))
844 845 846 847
			return;
	}

	/* free a pte talbe */
848
	free_pagetable(pmd_page(*pmd), 0, altmap);
849 850 851 852 853
	spin_lock(&init_mm.page_table_lock);
	pmd_clear(pmd);
	spin_unlock(&init_mm.page_table_lock);
}

854 855
static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud,
		struct vmem_altmap *altmap)
856 857 858 859 860 861
{
	pmd_t *pmd;
	int i;

	for (i = 0; i < PTRS_PER_PMD; i++) {
		pmd = pmd_start + i;
862
		if (!pmd_none(*pmd))
863 864 865 866
			return;
	}

	/* free a pmd talbe */
867
	free_pagetable(pud_page(*pud), 0, altmap);
868 869 870 871 872
	spin_lock(&init_mm.page_table_lock);
	pud_clear(pud);
	spin_unlock(&init_mm.page_table_lock);
}

873 874
static void __meminit free_pud_table(pud_t *pud_start, p4d_t *p4d,
		struct vmem_altmap *altmap)
875 876 877 878 879 880 881 882 883 884 885
{
	pud_t *pud;
	int i;

	for (i = 0; i < PTRS_PER_PUD; i++) {
		pud = pud_start + i;
		if (!pud_none(*pud))
			return;
	}

	/* free a pud talbe */
886
	free_pagetable(p4d_page(*p4d), 0, altmap);
887 888 889 890 891
	spin_lock(&init_mm.page_table_lock);
	p4d_clear(p4d);
	spin_unlock(&init_mm.page_table_lock);
}

892 893
static void __meminit
remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end,
894
		 struct vmem_altmap *altmap, bool direct)
895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918
{
	unsigned long next, pages = 0;
	pte_t *pte;
	void *page_addr;
	phys_addr_t phys_addr;

	pte = pte_start + pte_index(addr);
	for (; addr < end; addr = next, pte++) {
		next = (addr + PAGE_SIZE) & PAGE_MASK;
		if (next > end)
			next = end;

		if (!pte_present(*pte))
			continue;

		/*
		 * We mapped [0,1G) memory as identity mapping when
		 * initializing, in arch/x86/kernel/head_64.S. These
		 * pagetables cannot be removed.
		 */
		phys_addr = pte_val(*pte) + (addr & PAGE_MASK);
		if (phys_addr < (phys_addr_t)0x40000000)
			return;

919
		if (PAGE_ALIGNED(addr) && PAGE_ALIGNED(next)) {
920 921 922 923 924
			/*
			 * Do not free direct mapping pages since they were
			 * freed when offlining, or simplely not in use.
			 */
			if (!direct)
925
				free_pagetable(pte_page(*pte), 0, altmap);
926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947

			spin_lock(&init_mm.page_table_lock);
			pte_clear(&init_mm, addr, pte);
			spin_unlock(&init_mm.page_table_lock);

			/* For non-direct mapping, pages means nothing. */
			pages++;
		} else {
			/*
			 * If we are here, we are freeing vmemmap pages since
			 * direct mapped memory ranges to be freed are aligned.
			 *
			 * If we are not removing the whole page, it means
			 * other page structs in this page are being used and
			 * we canot remove them. So fill the unused page_structs
			 * with 0xFD, and remove the page when it is wholly
			 * filled with 0xFD.
			 */
			memset((void *)addr, PAGE_INUSE, next - addr);

			page_addr = page_address(pte_page(*pte));
			if (!memchr_inv(page_addr, PAGE_INUSE, PAGE_SIZE)) {
948
				free_pagetable(pte_page(*pte), 0, altmap);
949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964

				spin_lock(&init_mm.page_table_lock);
				pte_clear(&init_mm, addr, pte);
				spin_unlock(&init_mm.page_table_lock);
			}
		}
	}

	/* Call free_pte_table() in remove_pmd_table(). */
	flush_tlb_all();
	if (direct)
		update_page_count(PG_LEVEL_4K, -pages);
}

static void __meminit
remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end,
965
		 bool direct, struct vmem_altmap *altmap)
966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983
{
	unsigned long next, pages = 0;
	pte_t *pte_base;
	pmd_t *pmd;
	void *page_addr;

	pmd = pmd_start + pmd_index(addr);
	for (; addr < end; addr = next, pmd++) {
		next = pmd_addr_end(addr, end);

		if (!pmd_present(*pmd))
			continue;

		if (pmd_large(*pmd)) {
			if (IS_ALIGNED(addr, PMD_SIZE) &&
			    IS_ALIGNED(next, PMD_SIZE)) {
				if (!direct)
					free_pagetable(pmd_page(*pmd),
984 985
						       get_order(PMD_SIZE),
						       altmap);
986 987 988 989 990 991 992 993 994 995 996 997 998

				spin_lock(&init_mm.page_table_lock);
				pmd_clear(pmd);
				spin_unlock(&init_mm.page_table_lock);
				pages++;
			} else {
				/* If here, we are freeing vmemmap pages. */
				memset((void *)addr, PAGE_INUSE, next - addr);

				page_addr = page_address(pmd_page(*pmd));
				if (!memchr_inv(page_addr, PAGE_INUSE,
						PMD_SIZE)) {
					free_pagetable(pmd_page(*pmd),
999 1000
						       get_order(PMD_SIZE),
						       altmap);
1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011

					spin_lock(&init_mm.page_table_lock);
					pmd_clear(pmd);
					spin_unlock(&init_mm.page_table_lock);
				}
			}

			continue;
		}

		pte_base = (pte_t *)pmd_page_vaddr(*pmd);
1012 1013
		remove_pte_table(pte_base, addr, next, altmap, direct);
		free_pte_table(pte_base, pmd, altmap);
1014 1015 1016 1017 1018 1019 1020 1021 1022
	}

	/* Call free_pmd_table() in remove_pud_table(). */
	if (direct)
		update_page_count(PG_LEVEL_2M, -pages);
}

static void __meminit
remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end,
1023
		 struct vmem_altmap *altmap, bool direct)
1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041
{
	unsigned long next, pages = 0;
	pmd_t *pmd_base;
	pud_t *pud;
	void *page_addr;

	pud = pud_start + pud_index(addr);
	for (; addr < end; addr = next, pud++) {
		next = pud_addr_end(addr, end);

		if (!pud_present(*pud))
			continue;

		if (pud_large(*pud)) {
			if (IS_ALIGNED(addr, PUD_SIZE) &&
			    IS_ALIGNED(next, PUD_SIZE)) {
				if (!direct)
					free_pagetable(pud_page(*pud),
1042 1043
						       get_order(PUD_SIZE),
						       altmap);
1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056

				spin_lock(&init_mm.page_table_lock);
				pud_clear(pud);
				spin_unlock(&init_mm.page_table_lock);
				pages++;
			} else {
				/* If here, we are freeing vmemmap pages. */
				memset((void *)addr, PAGE_INUSE, next - addr);

				page_addr = page_address(pud_page(*pud));
				if (!memchr_inv(page_addr, PAGE_INUSE,
						PUD_SIZE)) {
					free_pagetable(pud_page(*pud),
1057 1058
						       get_order(PUD_SIZE),
						       altmap);
1059 1060 1061 1062 1063 1064 1065 1066 1067 1068

					spin_lock(&init_mm.page_table_lock);
					pud_clear(pud);
					spin_unlock(&init_mm.page_table_lock);
				}
			}

			continue;
		}

1069
		pmd_base = pmd_offset(pud, 0);
1070 1071
		remove_pmd_table(pmd_base, addr, next, direct, altmap);
		free_pmd_table(pmd_base, pud, altmap);
1072 1073 1074 1075 1076 1077
	}

	if (direct)
		update_page_count(PG_LEVEL_1G, -pages);
}

1078 1079
static void __meminit
remove_p4d_table(p4d_t *p4d_start, unsigned long addr, unsigned long end,
1080
		 struct vmem_altmap *altmap, bool direct)
1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094
{
	unsigned long next, pages = 0;
	pud_t *pud_base;
	p4d_t *p4d;

	p4d = p4d_start + p4d_index(addr);
	for (; addr < end; addr = next, p4d++) {
		next = p4d_addr_end(addr, end);

		if (!p4d_present(*p4d))
			continue;

		BUILD_BUG_ON(p4d_large(*p4d));

1095
		pud_base = pud_offset(p4d, 0);
1096
		remove_pud_table(pud_base, addr, next, altmap, direct);
1097 1098 1099 1100 1101
		/*
		 * For 4-level page tables we do not want to free PUDs, but in the
		 * 5-level case we should free them. This code will have to change
		 * to adapt for boot-time switching between 4 and 5 level page tables.
		 */
1102
		if (pgtable_l5_enabled)
1103
			free_pud_table(pud_base, p4d, altmap);
1104 1105 1106 1107 1108 1109
	}

	if (direct)
		update_page_count(PG_LEVEL_512G, -pages);
}

1110 1111
/* start and end are both virtual address. */
static void __meminit
1112 1113
remove_pagetable(unsigned long start, unsigned long end, bool direct,
		struct vmem_altmap *altmap)
1114 1115
{
	unsigned long next;
1116
	unsigned long addr;
1117
	pgd_t *pgd;
1118
	p4d_t *p4d;
1119

1120 1121
	for (addr = start; addr < end; addr = next) {
		next = pgd_addr_end(addr, end);
1122

1123
		pgd = pgd_offset_k(addr);
1124 1125 1126
		if (!pgd_present(*pgd))
			continue;

1127
		p4d = p4d_offset(pgd, 0);
1128
		remove_p4d_table(p4d, addr, next, altmap, direct);
1129 1130 1131 1132 1133
	}

	flush_tlb_all();
}

1134 1135
void __ref vmemmap_free(unsigned long start, unsigned long end,
		struct vmem_altmap *altmap)
1136
{
1137
	remove_pagetable(start, end, false, altmap);
1138 1139
}

1140
#ifdef CONFIG_MEMORY_HOTREMOVE
1141 1142 1143 1144 1145 1146
static void __meminit
kernel_physical_mapping_remove(unsigned long start, unsigned long end)
{
	start = (unsigned long)__va(start);
	end = (unsigned long)__va(end);

1147
	remove_pagetable(start, end, true, NULL);
1148 1149
}

1150
int __ref arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
1151 1152 1153
{
	unsigned long start_pfn = start >> PAGE_SHIFT;
	unsigned long nr_pages = size >> PAGE_SHIFT;
1154
	struct page *page = pfn_to_page(start_pfn);
1155 1156 1157
	struct zone *zone;
	int ret;

1158 1159 1160 1161
	/* With altmap the first mapped page is offset from @start */
	if (altmap)
		page += vmem_altmap_offset(altmap);
	zone = page_zone(page);
1162
	ret = __remove_pages(zone, start_pfn, nr_pages, altmap);
1163
	WARN_ON_ONCE(ret);
1164
	kernel_physical_mapping_remove(start, start + size);
1165 1166 1167 1168

	return ret;
}
#endif
1169 1170
#endif /* CONFIG_MEMORY_HOTPLUG */

1171
static struct kcore_list kcore_vsyscall;
L
Linus Torvalds 已提交
1172

Y
Yinghai Lu 已提交
1173 1174 1175 1176 1177 1178 1179 1180 1181 1182
static void __init register_page_bootmem_info(void)
{
#ifdef CONFIG_NUMA
	int i;

	for_each_online_node(i)
		register_page_bootmem_info_node(NODE_DATA(i));
#endif
}

L
Linus Torvalds 已提交
1183 1184
void __init mem_init(void)
{
1185
	pci_iommu_alloc();
L
Linus Torvalds 已提交
1186

1187
	/* clear_bss() already clear the empty_zero_page */
L
Linus Torvalds 已提交
1188

1189
	/* this will put all memory onto the freelists */
1190
	free_all_bootmem();
L
Linus Torvalds 已提交
1191 1192
	after_bootmem = 1;

1193 1194 1195 1196 1197 1198 1199 1200
	/*
	 * Must be done after boot memory is put on freelist, because here we
	 * might set fields in deferred struct pages that have not yet been
	 * initialized, and free_all_bootmem() initializes all the reserved
	 * deferred pages for us.
	 */
	register_page_bootmem_info();

L
Linus Torvalds 已提交
1201
	/* Register memory areas for /proc/kcore */
1202 1203
	kclist_add(&kcore_vsyscall, (void *)VSYSCALL_ADDR,
			 PAGE_SIZE, KCORE_OTHER);
L
Linus Torvalds 已提交
1204

1205
	mem_init_print_info(NULL);
L
Linus Torvalds 已提交
1206 1207
}

1208
int kernel_set_to_readonly;
1209 1210 1211

void set_kernel_text_rw(void)
{
1212
	unsigned long start = PFN_ALIGN(_text);
1213
	unsigned long end = PFN_ALIGN(__stop___ex_table);
1214 1215 1216 1217 1218 1219 1220

	if (!kernel_set_to_readonly)
		return;

	pr_debug("Set kernel text: %lx - %lx for read write\n",
		 start, end);

1221 1222 1223 1224 1225
	/*
	 * Make the kernel identity mapping for text RW. Kernel text
	 * mapping will always be RO. Refer to the comment in
	 * static_protections() in pageattr.c
	 */
1226 1227 1228 1229 1230
	set_memory_rw(start, (end - start) >> PAGE_SHIFT);
}

void set_kernel_text_ro(void)
{
1231
	unsigned long start = PFN_ALIGN(_text);
1232
	unsigned long end = PFN_ALIGN(__stop___ex_table);
1233 1234 1235 1236 1237 1238 1239

	if (!kernel_set_to_readonly)
		return;

	pr_debug("Set kernel text: %lx - %lx for read only\n",
		 start, end);

1240 1241 1242
	/*
	 * Set the kernel identity mapping for text RO.
	 */
1243 1244 1245
	set_memory_ro(start, (end - start) >> PAGE_SHIFT);
}

1246 1247
void mark_rodata_ro(void)
{
1248
	unsigned long start = PFN_ALIGN(_text);
1249
	unsigned long rodata_start = PFN_ALIGN(__start_rodata);
1250
	unsigned long end = (unsigned long) &__end_rodata_hpage_align;
1251 1252
	unsigned long text_end = PFN_ALIGN(&__stop___ex_table);
	unsigned long rodata_end = PFN_ALIGN(&__end_rodata);
1253
	unsigned long all_end;
1254

1255
	printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
1256
	       (end - start) >> 10);
1257 1258
	set_memory_ro(start, (end - start) >> PAGE_SHIFT);

1259 1260
	kernel_set_to_readonly = 1;

1261
	/*
1262 1263
	 * The rodata/data/bss/brk section (but not the kernel text!)
	 * should also be not-executable.
1264 1265 1266 1267 1268 1269 1270 1271
	 *
	 * We align all_end to PMD_SIZE because the existing mapping
	 * is a full PMD. If we would align _brk_end to PAGE_SIZE we
	 * split the PMD and the reminder between _brk_end and the end
	 * of the PMD will remain mapped executable.
	 *
	 * Any PMD which was setup after the one which covers _brk_end
	 * has been zapped already via cleanup_highmem().
1272
	 */
1273
	all_end = roundup((unsigned long)_brk_end, PMD_SIZE);
1274
	set_memory_nx(text_end, (all_end - text_end) >> PAGE_SHIFT);
1275

1276
#ifdef CONFIG_CPA_DEBUG
1277
	printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, end);
1278
	set_memory_rw(start, (end-start) >> PAGE_SHIFT);
1279

1280
	printk(KERN_INFO "Testing CPA: again\n");
1281
	set_memory_ro(start, (end-start) >> PAGE_SHIFT);
1282
#endif
1283

1284
	free_init_pages("unused kernel",
1285 1286
			(unsigned long) __va(__pa_symbol(text_end)),
			(unsigned long) __va(__pa_symbol(rodata_start)));
1287
	free_init_pages("unused kernel",
1288 1289
			(unsigned long) __va(__pa_symbol(rodata_end)),
			(unsigned long) __va(__pa_symbol(_sdata)));
S
Stephen Smalley 已提交
1290 1291

	debug_checkwx();
1292
}
1293

T
Thomas Gleixner 已提交
1294 1295
int kern_addr_valid(unsigned long addr)
{
L
Linus Torvalds 已提交
1296
	unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
T
Thomas Gleixner 已提交
1297
	pgd_t *pgd;
1298
	p4d_t *p4d;
T
Thomas Gleixner 已提交
1299 1300 1301
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;
L
Linus Torvalds 已提交
1302 1303

	if (above != 0 && above != -1UL)
T
Thomas Gleixner 已提交
1304 1305
		return 0;

L
Linus Torvalds 已提交
1306 1307 1308 1309
	pgd = pgd_offset_k(addr);
	if (pgd_none(*pgd))
		return 0;

1310 1311 1312 1313 1314
	p4d = p4d_offset(pgd, addr);
	if (p4d_none(*p4d))
		return 0;

	pud = pud_offset(p4d, addr);
L
Linus Torvalds 已提交
1315
	if (pud_none(*pud))
T
Thomas Gleixner 已提交
1316
		return 0;
L
Linus Torvalds 已提交
1317

1318 1319 1320
	if (pud_large(*pud))
		return pfn_valid(pud_pfn(*pud));

L
Linus Torvalds 已提交
1321 1322 1323
	pmd = pmd_offset(pud, addr);
	if (pmd_none(*pmd))
		return 0;
T
Thomas Gleixner 已提交
1324

L
Linus Torvalds 已提交
1325 1326 1327 1328 1329 1330
	if (pmd_large(*pmd))
		return pfn_valid(pmd_pfn(*pmd));

	pte = pte_offset_kernel(pmd, addr);
	if (pte_none(*pte))
		return 0;
T
Thomas Gleixner 已提交
1331

L
Linus Torvalds 已提交
1332 1333 1334
	return pfn_valid(pte_pfn(*pte));
}

1335
static unsigned long probe_memory_block_size(void)
1336
{
1337
	unsigned long bz = MIN_MEMORY_BLOCK_SIZE;
1338

1339 1340 1341
	/* if system is UV or has 64GB of RAM or more, use large blocks */
	if (is_uv_system() || ((max_pfn << PAGE_SHIFT) >= (64UL << 30)))
		bz = 2UL << 30; /* 2GB */
1342

1343
	pr_info("x86/mm: Memory block size: %ldMB\n", bz >> 20);
1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356

	return bz;
}

static unsigned long memory_block_size_probed;
unsigned long memory_block_size_bytes(void)
{
	if (!memory_block_size_probed)
		memory_block_size_probed = probe_memory_block_size();

	return memory_block_size_probed;
}

1357 1358 1359 1360
#ifdef CONFIG_SPARSEMEM_VMEMMAP
/*
 * Initialise the sparsemem vmemmap using huge-pages at the PMD level.
 */
1361 1362 1363 1364
static long __meminitdata addr_start, addr_end;
static void __meminitdata *p_start, *p_end;
static int __meminitdata node_start;

1365
static int __meminit vmemmap_populate_hugepages(unsigned long start,
1366
		unsigned long end, int node, struct vmem_altmap *altmap)
1367
{
1368
	unsigned long addr;
1369 1370
	unsigned long next;
	pgd_t *pgd;
1371
	p4d_t *p4d;
1372 1373 1374
	pud_t *pud;
	pmd_t *pmd;

1375
	for (addr = start; addr < end; addr = next) {
1376
		next = pmd_addr_end(addr, end);
1377 1378 1379 1380

		pgd = vmemmap_pgd_populate(addr, node);
		if (!pgd)
			return -ENOMEM;
T
Thomas Gleixner 已提交
1381

1382 1383 1384 1385 1386
		p4d = vmemmap_p4d_populate(pgd, addr, node);
		if (!p4d)
			return -ENOMEM;

		pud = vmemmap_pud_populate(p4d, addr, node);
1387 1388 1389
		if (!pud)
			return -ENOMEM;

1390 1391 1392
		pmd = pmd_offset(pud, addr);
		if (pmd_none(*pmd)) {
			void *p;
T
Thomas Gleixner 已提交
1393

1394 1395 1396 1397
			if (altmap)
				p = altmap_alloc_block_buf(PMD_SIZE, altmap);
			else
				p = vmemmap_alloc_block_buf(PMD_SIZE, node);
1398 1399 1400 1401 1402 1403 1404 1405 1406 1407
			if (p) {
				pte_t entry;

				entry = pfn_pte(__pa(p) >> PAGE_SHIFT,
						PAGE_KERNEL_LARGE);
				set_pmd(pmd, __pmd(pte_val(entry)));

				/* check to see if we have contiguous blocks */
				if (p_end != p || node_start != node) {
					if (p_start)
D
Dan Williams 已提交
1408
						pr_debug(" [%lx-%lx] PMD -> [%p-%p] on node %d\n",
1409 1410 1411 1412 1413
						       addr_start, addr_end-1, p_start, p_end-1, node_start);
					addr_start = addr;
					node_start = node;
					p_start = p;
				}
1414

1415 1416 1417
				addr_end = addr + PMD_SIZE;
				p_end = p + PMD_SIZE;
				continue;
1418 1419
			} else if (altmap)
				return -ENOMEM; /* no fallback */
1420
		} else if (pmd_large(*pmd)) {
1421
			vmemmap_verify((pte_t *)pmd, node, addr, next);
1422 1423 1424 1425
			continue;
		}
		if (vmemmap_populate_basepages(addr, next, node))
			return -ENOMEM;
1426 1427 1428
	}
	return 0;
}
1429

1430 1431
int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
		struct vmem_altmap *altmap)
1432 1433 1434
{
	int err;

1435
	if (boot_cpu_has(X86_FEATURE_PSE))
1436 1437 1438 1439 1440 1441
		err = vmemmap_populate_hugepages(start, end, node, altmap);
	else if (altmap) {
		pr_err_once("%s: no cpu support for altmap allocations\n",
				__func__);
		err = -ENOMEM;
	} else
1442 1443
		err = vmemmap_populate_basepages(start, end, node);
	if (!err)
1444
		sync_global_pgds(start, end - 1);
1445 1446 1447
	return err;
}

1448 1449
#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HAVE_BOOTMEM_INFO_NODE)
void register_page_bootmem_memmap(unsigned long section_nr,
1450
				  struct page *start_page, unsigned long nr_pages)
1451 1452
{
	unsigned long addr = (unsigned long)start_page;
1453
	unsigned long end = (unsigned long)(start_page + nr_pages);
1454 1455
	unsigned long next;
	pgd_t *pgd;
1456
	p4d_t *p4d;
1457 1458
	pud_t *pud;
	pmd_t *pmd;
1459
	unsigned int nr_pmd_pages;
1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471
	struct page *page;

	for (; addr < end; addr = next) {
		pte_t *pte = NULL;

		pgd = pgd_offset_k(addr);
		if (pgd_none(*pgd)) {
			next = (addr + PAGE_SIZE) & PAGE_MASK;
			continue;
		}
		get_page_bootmem(section_nr, pgd_page(*pgd), MIX_SECTION_INFO);

1472 1473 1474 1475 1476 1477 1478 1479
		p4d = p4d_offset(pgd, addr);
		if (p4d_none(*p4d)) {
			next = (addr + PAGE_SIZE) & PAGE_MASK;
			continue;
		}
		get_page_bootmem(section_nr, p4d_page(*p4d), MIX_SECTION_INFO);

		pud = pud_offset(p4d, addr);
1480 1481 1482 1483 1484 1485
		if (pud_none(*pud)) {
			next = (addr + PAGE_SIZE) & PAGE_MASK;
			continue;
		}
		get_page_bootmem(section_nr, pud_page(*pud), MIX_SECTION_INFO);

1486
		if (!boot_cpu_has(X86_FEATURE_PSE)) {
1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505
			next = (addr + PAGE_SIZE) & PAGE_MASK;
			pmd = pmd_offset(pud, addr);
			if (pmd_none(*pmd))
				continue;
			get_page_bootmem(section_nr, pmd_page(*pmd),
					 MIX_SECTION_INFO);

			pte = pte_offset_kernel(pmd, addr);
			if (pte_none(*pte))
				continue;
			get_page_bootmem(section_nr, pte_page(*pte),
					 SECTION_INFO);
		} else {
			next = pmd_addr_end(addr, end);

			pmd = pmd_offset(pud, addr);
			if (pmd_none(*pmd))
				continue;

1506
			nr_pmd_pages = 1 << get_order(PMD_SIZE);
1507
			page = pmd_page(*pmd);
1508
			while (nr_pmd_pages--)
1509 1510 1511 1512 1513 1514 1515
				get_page_bootmem(section_nr, page++,
						 SECTION_INFO);
		}
	}
}
#endif

1516 1517 1518
void __meminit vmemmap_populate_print_last(void)
{
	if (p_start) {
D
Dan Williams 已提交
1519
		pr_debug(" [%lx-%lx] PMD -> [%p-%p] on node %d\n",
1520 1521 1522 1523 1524 1525
			addr_start, addr_end-1, p_start, p_end-1, node_start);
		p_start = NULL;
		p_end = NULL;
		node_start = 0;
	}
}
1526
#endif