init_64.c 38.3 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4
/*
 *  linux/arch/x86_64/mm/init.c
 *
 *  Copyright (C) 1995  Linus Torvalds
P
Pavel Machek 已提交
5
 *  Copyright (C) 2000  Pavel Machek <pavel@ucw.cz>
L
Linus Torvalds 已提交
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
 *  Copyright (C) 2002,2003 Andi Kleen <ak@suse.de>
 */

#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/smp.h>
#include <linux/init.h>
T
Thomas Gleixner 已提交
21
#include <linux/initrd.h>
L
Linus Torvalds 已提交
22
#include <linux/pagemap.h>
23
#include <linux/memblock.h>
L
Linus Torvalds 已提交
24
#include <linux/proc_fs.h>
25
#include <linux/pci.h>
26
#include <linux/pfn.h>
27
#include <linux/poison.h>
28
#include <linux/dma-mapping.h>
29
#include <linux/memory.h>
30
#include <linux/memory_hotplug.h>
31
#include <linux/memremap.h>
32
#include <linux/nmi.h>
33
#include <linux/gfp.h>
34
#include <linux/kcore.h>
L
Linus Torvalds 已提交
35 36

#include <asm/processor.h>
37
#include <asm/bios_ebda.h>
38
#include <linux/uaccess.h>
L
Linus Torvalds 已提交
39 40 41 42
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/dma.h>
#include <asm/fixmap.h>
43
#include <asm/e820/api.h>
L
Linus Torvalds 已提交
44 45 46 47 48
#include <asm/apic.h>
#include <asm/tlb.h>
#include <asm/mmu_context.h>
#include <asm/proto.h>
#include <asm/smp.h>
49
#include <asm/sections.h>
50
#include <asm/kdebug.h>
51
#include <asm/numa.h>
L
Laura Abbott 已提交
52
#include <asm/set_memory.h>
53
#include <asm/init.h>
54
#include <asm/uv/uv.h>
55
#include <asm/setup.h>
L
Linus Torvalds 已提交
56

57 58
#include "mm_internal.h"

59
#include "ident_map.c"
60

L
Linus Torvalds 已提交
61 62 63 64 65 66
/*
 * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
 * physical space so we can cache the place of the first one and move
 * around without checking the pgd every time.
 */

67
/* Bits supported by the hardware: */
68
pteval_t __supported_pte_mask __read_mostly = ~0;
69 70
/* Bits allowed in normal kernel mappings: */
pteval_t __default_kernel_pte_mask __read_mostly = ~0;
71
EXPORT_SYMBOL_GPL(__supported_pte_mask);
72 73
/* Used in PAGE_KERNEL_* macros which are reasonably used out-of-tree: */
EXPORT_SYMBOL(__default_kernel_pte_mask);
74 75 76

int force_personality32;

I
Ingo Molnar 已提交
77 78 79 80 81 82 83 84
/*
 * noexec32=on|off
 * Control non executable heap for 32bit processes.
 * To control the stack too use noexec=off
 *
 * on	PROT_READ does not imply PROT_EXEC for 32-bit processes (default)
 * off	PROT_READ implies PROT_EXEC
 */
85 86 87 88 89 90 91 92 93 94
static int __init nonx32_setup(char *str)
{
	if (!strcmp(str, "on"))
		force_personality32 &= ~READ_IMPLIES_EXEC;
	else if (!strcmp(str, "off"))
		force_personality32 |= READ_IMPLIES_EXEC;
	return 1;
}
__setup("noexec32=", nonx32_setup);

95
static void sync_global_pgds_l5(unsigned long start, unsigned long end)
96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130
{
	unsigned long addr;

	for (addr = start; addr <= end; addr = ALIGN(addr + 1, PGDIR_SIZE)) {
		const pgd_t *pgd_ref = pgd_offset_k(addr);
		struct page *page;

		/* Check for overflow */
		if (addr < start)
			break;

		if (pgd_none(*pgd_ref))
			continue;

		spin_lock(&pgd_lock);
		list_for_each_entry(page, &pgd_list, lru) {
			pgd_t *pgd;
			spinlock_t *pgt_lock;

			pgd = (pgd_t *)page_address(page) + pgd_index(addr);
			/* the pgt_lock only for Xen */
			pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
			spin_lock(pgt_lock);

			if (!pgd_none(*pgd_ref) && !pgd_none(*pgd))
				BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));

			if (pgd_none(*pgd))
				set_pgd(pgd, *pgd_ref);

			spin_unlock(pgt_lock);
		}
		spin_unlock(&pgd_lock);
	}
}
131 132

static void sync_global_pgds_l4(unsigned long start, unsigned long end)
133
{
134
	unsigned long addr;
135

136 137
	for (addr = start; addr <= end; addr = ALIGN(addr + 1, PGDIR_SIZE)) {
		pgd_t *pgd_ref = pgd_offset_k(addr);
138
		const p4d_t *p4d_ref;
139 140
		struct page *page;

141 142 143 144
		/*
		 * With folded p4d, pgd_none() is always false, we need to
		 * handle synchonization on p4d level.
		 */
145
		MAYBE_BUILD_BUG_ON(pgd_none(*pgd_ref));
146
		p4d_ref = p4d_offset(pgd_ref, addr);
147 148

		if (p4d_none(*p4d_ref))
149 150
			continue;

A
Andrea Arcangeli 已提交
151
		spin_lock(&pgd_lock);
152
		list_for_each_entry(page, &pgd_list, lru) {
153
			pgd_t *pgd;
154
			p4d_t *p4d;
155 156
			spinlock_t *pgt_lock;

157 158
			pgd = (pgd_t *)page_address(page) + pgd_index(addr);
			p4d = p4d_offset(pgd, addr);
A
Andrea Arcangeli 已提交
159
			/* the pgt_lock only for Xen */
160 161 162
			pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
			spin_lock(pgt_lock);

163 164 165
			if (!p4d_none(*p4d_ref) && !p4d_none(*p4d))
				BUG_ON(p4d_page_vaddr(*p4d)
				       != p4d_page_vaddr(*p4d_ref));
166

167 168
			if (p4d_none(*p4d))
				set_p4d(p4d, *p4d_ref);
169

170
			spin_unlock(pgt_lock);
171
		}
A
Andrea Arcangeli 已提交
172
		spin_unlock(&pgd_lock);
173
	}
174
}
175 176 177 178 179 180 181

/*
 * When memory was added make sure all the processes MM have
 * suitable PGD entries in the local PGD level page.
 */
void sync_global_pgds(unsigned long start, unsigned long end)
{
182
	if (pgtable_l5_enabled())
183 184 185 186
		sync_global_pgds_l5(start, end);
	else
		sync_global_pgds_l4(start, end);
}
187

188 189 190 191 192
/*
 * NOTE: This function is marked __ref because it calls __init function
 * (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0.
 */
static __ref void *spp_getpage(void)
T
Thomas Gleixner 已提交
193
{
L
Linus Torvalds 已提交
194
	void *ptr;
T
Thomas Gleixner 已提交
195

L
Linus Torvalds 已提交
196
	if (after_bootmem)
197
		ptr = (void *) get_zeroed_page(GFP_ATOMIC);
L
Linus Torvalds 已提交
198
	else
199
		ptr = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
T
Thomas Gleixner 已提交
200 201 202 203 204

	if (!ptr || ((unsigned long)ptr & ~PAGE_MASK)) {
		panic("set_pte_phys: cannot allocate page data %s\n",
			after_bootmem ? "after bootmem" : "");
	}
L
Linus Torvalds 已提交
205

206
	pr_debug("spp_getpage %p\n", ptr);
T
Thomas Gleixner 已提交
207

L
Linus Torvalds 已提交
208
	return ptr;
T
Thomas Gleixner 已提交
209
}
L
Linus Torvalds 已提交
210

211
static p4d_t *fill_p4d(pgd_t *pgd, unsigned long vaddr)
L
Linus Torvalds 已提交
212
{
213
	if (pgd_none(*pgd)) {
214 215 216
		p4d_t *p4d = (p4d_t *)spp_getpage();
		pgd_populate(&init_mm, pgd, p4d);
		if (p4d != p4d_offset(pgd, 0))
217
			printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
218 219 220 221 222 223 224 225 226 227 228 229 230
			       p4d, p4d_offset(pgd, 0));
	}
	return p4d_offset(pgd, vaddr);
}

static pud_t *fill_pud(p4d_t *p4d, unsigned long vaddr)
{
	if (p4d_none(*p4d)) {
		pud_t *pud = (pud_t *)spp_getpage();
		p4d_populate(&init_mm, p4d, pud);
		if (pud != pud_offset(p4d, 0))
			printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
			       pud, pud_offset(p4d, 0));
231
	}
232
	return pud_offset(p4d, vaddr);
233
}
L
Linus Torvalds 已提交
234

235
static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
236
{
L
Linus Torvalds 已提交
237
	if (pud_none(*pud)) {
238
		pmd_t *pmd = (pmd_t *) spp_getpage();
239
		pud_populate(&init_mm, pud, pmd);
240
		if (pmd != pmd_offset(pud, 0))
241
			printk(KERN_ERR "PAGETABLE BUG #02! %p <-> %p\n",
242
			       pmd, pmd_offset(pud, 0));
L
Linus Torvalds 已提交
243
	}
244 245 246
	return pmd_offset(pud, vaddr);
}

247
static pte_t *fill_pte(pmd_t *pmd, unsigned long vaddr)
248
{
L
Linus Torvalds 已提交
249
	if (pmd_none(*pmd)) {
250
		pte_t *pte = (pte_t *) spp_getpage();
251
		pmd_populate_kernel(&init_mm, pmd, pte);
252
		if (pte != pte_offset_kernel(pmd, 0))
253
			printk(KERN_ERR "PAGETABLE BUG #03!\n");
L
Linus Torvalds 已提交
254
	}
255 256 257
	return pte_offset_kernel(pmd, vaddr);
}

258
static void __set_pte_vaddr(pud_t *pud, unsigned long vaddr, pte_t new_pte)
259
{
260 261
	pmd_t *pmd = fill_pmd(pud, vaddr);
	pte_t *pte = fill_pte(pmd, vaddr);
L
Linus Torvalds 已提交
262 263 264 265 266 267 268

	set_pte(pte, new_pte);

	/*
	 * It's enough to flush this one mapping.
	 * (PGE mappings get flushed as well)
	 */
269
	__flush_tlb_one_kernel(vaddr);
L
Linus Torvalds 已提交
270 271
}

272 273 274 275 276 277 278 279 280 281 282 283 284 285 286
void set_pte_vaddr_p4d(p4d_t *p4d_page, unsigned long vaddr, pte_t new_pte)
{
	p4d_t *p4d = p4d_page + p4d_index(vaddr);
	pud_t *pud = fill_pud(p4d, vaddr);

	__set_pte_vaddr(pud, vaddr, new_pte);
}

void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
{
	pud_t *pud = pud_page + pud_index(vaddr);

	__set_pte_vaddr(pud, vaddr, new_pte);
}

287
void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
288 289
{
	pgd_t *pgd;
290
	p4d_t *p4d_page;
291 292 293 294 295 296 297 298 299

	pr_debug("set_pte_vaddr %lx to %lx\n", vaddr, native_pte_val(pteval));

	pgd = pgd_offset_k(vaddr);
	if (pgd_none(*pgd)) {
		printk(KERN_ERR
			"PGD FIXMAP MISSING, it should be setup in head.S!\n");
		return;
	}
300 301 302

	p4d_page = p4d_offset(pgd, 0);
	set_pte_vaddr_p4d(p4d_page, vaddr, pteval);
303 304
}

305
pmd_t * __init populate_extra_pmd(unsigned long vaddr)
306 307
{
	pgd_t *pgd;
308
	p4d_t *p4d;
309 310 311
	pud_t *pud;

	pgd = pgd_offset_k(vaddr);
312 313
	p4d = fill_p4d(pgd, vaddr);
	pud = fill_pud(p4d, vaddr);
314 315 316 317 318 319
	return fill_pmd(pud, vaddr);
}

pte_t * __init populate_extra_pte(unsigned long vaddr)
{
	pmd_t *pmd;
320

321 322
	pmd = populate_extra_pmd(vaddr);
	return fill_pte(pmd, vaddr);
323 324
}

325 326 327 328
/*
 * Create large page table mappings for a range of physical addresses.
 */
static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
329
					enum page_cache_mode cache)
330 331
{
	pgd_t *pgd;
332
	p4d_t *p4d;
333 334
	pud_t *pud;
	pmd_t *pmd;
335
	pgprot_t prot;
336

337 338
	pgprot_val(prot) = pgprot_val(PAGE_KERNEL_LARGE) |
		pgprot_val(pgprot_4k_2_large(cachemode2pgprot(cache)));
339 340 341 342
	BUG_ON((phys & ~PMD_MASK) || (size & ~PMD_MASK));
	for (; size; phys += PMD_SIZE, size -= PMD_SIZE) {
		pgd = pgd_offset_k((unsigned long)__va(phys));
		if (pgd_none(*pgd)) {
343 344 345 346 347 348
			p4d = (p4d_t *) spp_getpage();
			set_pgd(pgd, __pgd(__pa(p4d) | _KERNPG_TABLE |
						_PAGE_USER));
		}
		p4d = p4d_offset(pgd, (unsigned long)__va(phys));
		if (p4d_none(*p4d)) {
349
			pud = (pud_t *) spp_getpage();
350
			set_p4d(p4d, __p4d(__pa(pud) | _KERNPG_TABLE |
351 352
						_PAGE_USER));
		}
353
		pud = pud_offset(p4d, (unsigned long)__va(phys));
354 355 356 357 358 359 360 361 362 363 364 365 366
		if (pud_none(*pud)) {
			pmd = (pmd_t *) spp_getpage();
			set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
						_PAGE_USER));
		}
		pmd = pmd_offset(pud, phys);
		BUG_ON(!pmd_none(*pmd));
		set_pmd(pmd, __pmd(phys | pgprot_val(prot)));
	}
}

void __init init_extra_mapping_wb(unsigned long phys, unsigned long size)
{
367
	__init_extra_mapping(phys, size, _PAGE_CACHE_MODE_WB);
368 369 370 371
}

void __init init_extra_mapping_uc(unsigned long phys, unsigned long size)
{
372
	__init_extra_mapping(phys, size, _PAGE_CACHE_MODE_UC);
373 374
}

375
/*
376 377 378
 * The head.S code sets up the kernel high mapping:
 *
 *   from __START_KERNEL_map to __START_KERNEL_map + size (== _end-_text)
379
 *
380
 * phys_base holds the negative offset to the kernel, which is added
381 382 383
 * to the compile time generated pmds. This results in invalid pmds up
 * to the point where we hit the physaddr 0 mapping.
 *
384 385
 * We limit the mappings to the region from _text to _brk_end.  _brk_end
 * is rounded up to the 2MB boundary. This catches the invalid pmds as
386 387 388 389 390
 * well, as they are located before _text:
 */
void __init cleanup_highmap(void)
{
	unsigned long vaddr = __START_KERNEL_map;
391
	unsigned long vaddr_end = __START_KERNEL_map + KERNEL_IMAGE_SIZE;
392
	unsigned long end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1;
393 394
	pmd_t *pmd = level2_kernel_pgt;

395 396 397 398 399 400 401 402
	/*
	 * Native path, max_pfn_mapped is not set yet.
	 * Xen has valid max_pfn_mapped set in
	 *	arch/x86/xen/mmu.c:xen_setup_kernel_pagetable().
	 */
	if (max_pfn_mapped)
		vaddr_end = __START_KERNEL_map + (max_pfn_mapped << PAGE_SHIFT);

403
	for (; vaddr + PMD_SIZE - 1 < vaddr_end; pmd++, vaddr += PMD_SIZE) {
404
		if (pmd_none(*pmd))
405 406 407 408 409 410
			continue;
		if (vaddr < (unsigned long) _text || vaddr > end)
			set_pmd(pmd, __pmd(0));
	}
}

411 412 413 414
/*
 * Create PTE level page table mapping for physical addresses.
 * It returns the last physical address mapped.
 */
415
static unsigned long __meminit
416
phys_pte_init(pte_t *pte_page, unsigned long paddr, unsigned long paddr_end,
417
	      pgprot_t prot)
418
{
419 420 421
	unsigned long pages = 0, paddr_next;
	unsigned long paddr_last = paddr_end;
	pte_t *pte;
422
	int i;
423

424 425
	pte = pte_page + pte_index(paddr);
	i = pte_index(paddr);
426

427 428 429
	for (; i < PTRS_PER_PTE; i++, paddr = paddr_next, pte++) {
		paddr_next = (paddr & PAGE_MASK) + PAGE_SIZE;
		if (paddr >= paddr_end) {
430
			if (!after_bootmem &&
431
			    !e820__mapped_any(paddr & PAGE_MASK, paddr_next,
432
					     E820_TYPE_RAM) &&
433
			    !e820__mapped_any(paddr & PAGE_MASK, paddr_next,
434
					     E820_TYPE_RESERVED_KERN))
435
				set_pte_safe(pte, __pte(0));
436
			continue;
437 438
		}

439 440 441 442 443 444
		/*
		 * We will re-use the existing mapping.
		 * Xen for example has some special requirements, like mapping
		 * pagetable pages as RO. So assume someone who pre-setup
		 * these mappings are more intelligent.
		 */
445
		if (!pte_none(*pte)) {
J
Jan Beulich 已提交
446 447
			if (!after_bootmem)
				pages++;
448
			continue;
449
		}
450 451

		if (0)
452 453
			pr_info("   pte=%p addr=%lx pte=%016lx\n", pte, paddr,
				pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL).pte);
454
		pages++;
455
		set_pte_safe(pte, pfn_pte(paddr >> PAGE_SHIFT, prot));
456
		paddr_last = (paddr & PAGE_MASK) + PAGE_SIZE;
457
	}
458

459
	update_page_count(PG_LEVEL_4K, pages);
460

461
	return paddr_last;
462 463
}

464 465 466 467 468
/*
 * Create PMD level page table mapping for physical addresses. The virtual
 * and physical address have to be aligned at this level.
 * It returns the last physical address mapped.
 */
469
static unsigned long __meminit
470
phys_pmd_init(pmd_t *pmd_page, unsigned long paddr, unsigned long paddr_end,
471
	      unsigned long page_size_mask, pgprot_t prot)
472
{
473 474
	unsigned long pages = 0, paddr_next;
	unsigned long paddr_last = paddr_end;
475

476
	int i = pmd_index(paddr);
477

478 479
	for (; i < PTRS_PER_PMD; i++, paddr = paddr_next) {
		pmd_t *pmd = pmd_page + pmd_index(paddr);
480
		pte_t *pte;
481
		pgprot_t new_prot = prot;
482

483 484
		paddr_next = (paddr & PMD_MASK) + PMD_SIZE;
		if (paddr >= paddr_end) {
485
			if (!after_bootmem &&
486
			    !e820__mapped_any(paddr & PMD_MASK, paddr_next,
487
					     E820_TYPE_RAM) &&
488
			    !e820__mapped_any(paddr & PMD_MASK, paddr_next,
489
					     E820_TYPE_RESERVED_KERN))
490
				set_pmd_safe(pmd, __pmd(0));
491
			continue;
492
		}
493

494
		if (!pmd_none(*pmd)) {
495 496
			if (!pmd_large(*pmd)) {
				spin_lock(&init_mm.page_table_lock);
497
				pte = (pte_t *)pmd_page_vaddr(*pmd);
498 499
				paddr_last = phys_pte_init(pte, paddr,
							   paddr_end, prot);
500
				spin_unlock(&init_mm.page_table_lock);
501
				continue;
502
			}
503 504 505 506 507 508 509 510 511 512 513 514
			/*
			 * If we are ok with PG_LEVEL_2M mapping, then we will
			 * use the existing mapping,
			 *
			 * Otherwise, we will split the large page mapping but
			 * use the same existing protection bits except for
			 * large page, so that we don't violate Intel's TLB
			 * Application note (317080) which says, while changing
			 * the page sizes, new and old translations should
			 * not differ with respect to page frame and
			 * attributes.
			 */
515
			if (page_size_mask & (1 << PG_LEVEL_2M)) {
J
Jan Beulich 已提交
516 517
				if (!after_bootmem)
					pages++;
518
				paddr_last = paddr_next;
519
				continue;
520
			}
521
			new_prot = pte_pgprot(pte_clrhuge(*(pte_t *)pmd));
522 523
		}

524
		if (page_size_mask & (1<<PG_LEVEL_2M)) {
525
			pages++;
526
			spin_lock(&init_mm.page_table_lock);
527
			set_pte_safe((pte_t *)pmd,
528
				pfn_pte((paddr & PMD_MASK) >> PAGE_SHIFT,
529
					__pgprot(pgprot_val(prot) | _PAGE_PSE)));
530
			spin_unlock(&init_mm.page_table_lock);
531
			paddr_last = paddr_next;
532
			continue;
533
		}
534

535
		pte = alloc_low_page();
536
		paddr_last = phys_pte_init(pte, paddr, paddr_end, new_prot);
537

538
		spin_lock(&init_mm.page_table_lock);
539
		pmd_populate_kernel_safe(&init_mm, pmd, pte);
540
		spin_unlock(&init_mm.page_table_lock);
541
	}
542
	update_page_count(PG_LEVEL_2M, pages);
543
	return paddr_last;
544 545
}

546 547
/*
 * Create PUD level page table mapping for physical addresses. The virtual
548 549
 * and physical address do not have to be aligned at this level. KASLR can
 * randomize virtual addresses up to this level.
550 551
 * It returns the last physical address mapped.
 */
552
static unsigned long __meminit
553 554
phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
	      unsigned long page_size_mask)
T
Thomas Gleixner 已提交
555
{
556 557
	unsigned long pages = 0, paddr_next;
	unsigned long paddr_last = paddr_end;
558 559
	unsigned long vaddr = (unsigned long)__va(paddr);
	int i = pud_index(vaddr);
560

561
	for (; i < PTRS_PER_PUD; i++, paddr = paddr_next) {
562
		pud_t *pud;
L
Linus Torvalds 已提交
563
		pmd_t *pmd;
564
		pgprot_t prot = PAGE_KERNEL;
L
Linus Torvalds 已提交
565

566 567
		vaddr = (unsigned long)__va(paddr);
		pud = pud_page + pud_index(vaddr);
568
		paddr_next = (paddr & PUD_MASK) + PUD_SIZE;
569

570
		if (paddr >= paddr_end) {
571
			if (!after_bootmem &&
572
			    !e820__mapped_any(paddr & PUD_MASK, paddr_next,
573
					     E820_TYPE_RAM) &&
574
			    !e820__mapped_any(paddr & PUD_MASK, paddr_next,
575
					     E820_TYPE_RESERVED_KERN))
576
				set_pud_safe(pud, __pud(0));
L
Linus Torvalds 已提交
577
			continue;
T
Thomas Gleixner 已提交
578
		}
L
Linus Torvalds 已提交
579

580
		if (!pud_none(*pud)) {
581
			if (!pud_large(*pud)) {
582
				pmd = pmd_offset(pud, 0);
583 584 585 586
				paddr_last = phys_pmd_init(pmd, paddr,
							   paddr_end,
							   page_size_mask,
							   prot);
587 588
				continue;
			}
589 590 591 592 593 594 595 596 597 598 599 600
			/*
			 * If we are ok with PG_LEVEL_1G mapping, then we will
			 * use the existing mapping.
			 *
			 * Otherwise, we will split the gbpage mapping but use
			 * the same existing protection  bits except for large
			 * page, so that we don't violate Intel's TLB
			 * Application note (317080) which says, while changing
			 * the page sizes, new and old translations should
			 * not differ with respect to page frame and
			 * attributes.
			 */
601
			if (page_size_mask & (1 << PG_LEVEL_1G)) {
J
Jan Beulich 已提交
602 603
				if (!after_bootmem)
					pages++;
604
				paddr_last = paddr_next;
605
				continue;
606
			}
607
			prot = pte_pgprot(pte_clrhuge(*(pte_t *)pud));
608 609
		}

610
		if (page_size_mask & (1<<PG_LEVEL_1G)) {
611
			pages++;
612
			spin_lock(&init_mm.page_table_lock);
613
			set_pte_safe((pte_t *)pud,
614
				pfn_pte((paddr & PUD_MASK) >> PAGE_SHIFT,
615
					PAGE_KERNEL_LARGE));
616
			spin_unlock(&init_mm.page_table_lock);
617
			paddr_last = paddr_next;
618 619 620
			continue;
		}

621
		pmd = alloc_low_page();
622 623
		paddr_last = phys_pmd_init(pmd, paddr, paddr_end,
					   page_size_mask, prot);
624 625

		spin_lock(&init_mm.page_table_lock);
626
		pud_populate_safe(&init_mm, pud, pmd);
627
		spin_unlock(&init_mm.page_table_lock);
L
Linus Torvalds 已提交
628
	}
629

630
	update_page_count(PG_LEVEL_1G, pages);
631

632
	return paddr_last;
T
Thomas Gleixner 已提交
633
}
L
Linus Torvalds 已提交
634

635 636 637 638 639 640 641 642
static unsigned long __meminit
phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end,
	      unsigned long page_size_mask)
{
	unsigned long paddr_next, paddr_last = paddr_end;
	unsigned long vaddr = (unsigned long)__va(paddr);
	int i = p4d_index(vaddr);

643
	if (!pgtable_l5_enabled())
644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659
		return phys_pud_init((pud_t *) p4d_page, paddr, paddr_end, page_size_mask);

	for (; i < PTRS_PER_P4D; i++, paddr = paddr_next) {
		p4d_t *p4d;
		pud_t *pud;

		vaddr = (unsigned long)__va(paddr);
		p4d = p4d_page + p4d_index(vaddr);
		paddr_next = (paddr & P4D_MASK) + P4D_SIZE;

		if (paddr >= paddr_end) {
			if (!after_bootmem &&
			    !e820__mapped_any(paddr & P4D_MASK, paddr_next,
					     E820_TYPE_RAM) &&
			    !e820__mapped_any(paddr & P4D_MASK, paddr_next,
					     E820_TYPE_RESERVED_KERN))
660
				set_p4d_safe(p4d, __p4d(0));
661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676
			continue;
		}

		if (!p4d_none(*p4d)) {
			pud = pud_offset(p4d, 0);
			paddr_last = phys_pud_init(pud, paddr,
					paddr_end,
					page_size_mask);
			continue;
		}

		pud = alloc_low_page();
		paddr_last = phys_pud_init(pud, paddr, paddr_end,
					   page_size_mask);

		spin_lock(&init_mm.page_table_lock);
677
		p4d_populate_safe(&init_mm, p4d, pud);
678 679 680 681 682 683
		spin_unlock(&init_mm.page_table_lock);
	}

	return paddr_last;
}

684 685
/*
 * Create page table mapping for the physical memory for specific physical
686
 * addresses. The virtual and physical addresses have to be aligned on PMD level
687 688
 * down. It returns the last physical address mapped.
 */
689
unsigned long __meminit
690 691
kernel_physical_mapping_init(unsigned long paddr_start,
			     unsigned long paddr_end,
692
			     unsigned long page_size_mask)
T
Thomas Gleixner 已提交
693
{
694
	bool pgd_changed = false;
695
	unsigned long vaddr, vaddr_start, vaddr_end, vaddr_next, paddr_last;
L
Linus Torvalds 已提交
696

697 698 699 700
	paddr_last = paddr_end;
	vaddr = (unsigned long)__va(paddr_start);
	vaddr_end = (unsigned long)__va(paddr_end);
	vaddr_start = vaddr;
L
Linus Torvalds 已提交
701

702 703
	for (; vaddr < vaddr_end; vaddr = vaddr_next) {
		pgd_t *pgd = pgd_offset_k(vaddr);
704
		p4d_t *p4d;
705

706
		vaddr_next = (vaddr & PGDIR_MASK) + PGDIR_SIZE;
707

708 709 710
		if (pgd_val(*pgd)) {
			p4d = (p4d_t *)pgd_page_vaddr(*pgd);
			paddr_last = phys_p4d_init(p4d, __pa(vaddr),
711 712
						   __pa(vaddr_end),
						   page_size_mask);
713 714 715
			continue;
		}

716 717
		p4d = alloc_low_page();
		paddr_last = phys_p4d_init(p4d, __pa(vaddr), __pa(vaddr_end),
718
					   page_size_mask);
719 720

		spin_lock(&init_mm.page_table_lock);
721
		if (pgtable_l5_enabled())
722
			pgd_populate_safe(&init_mm, pgd, p4d);
723
		else
724
			p4d_populate_safe(&init_mm, p4d_offset(pgd, vaddr), (pud_t *) p4d);
725
		spin_unlock(&init_mm.page_table_lock);
726
		pgd_changed = true;
T
Thomas Gleixner 已提交
727
	}
728 729

	if (pgd_changed)
730
		sync_global_pgds(vaddr_start, vaddr_end - 1);
731

732
	return paddr_last;
733
}
734

735
#ifndef CONFIG_NUMA
736
void __init initmem_init(void)
737
{
738
	memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
739
}
740
#endif
741

L
Linus Torvalds 已提交
742 743
void __init paging_init(void)
{
744
	sparse_memory_present_with_active_regions(MAX_NUMNODES);
745
	sparse_init();
746 747 748 749 750 751 752

	/*
	 * clear the default setting with node 0
	 * note: don't use nodes_clear here, that is really clearing when
	 *	 numa support is not compiled in, and later node_set_state
	 *	 will not set it back.
	 */
753 754 755
	node_clear_state(0, N_MEMORY);
	if (N_MEMORY != N_NORMAL_MEMORY)
		node_clear_state(0, N_NORMAL_MEMORY);
756

757
	zone_sizes_init();
L
Linus Torvalds 已提交
758 759
}

760 761 762
/*
 * Memory hotplug specific functions
 */
763
#ifdef CONFIG_MEMORY_HOTPLUG
764 765 766 767
/*
 * After memory hotplug the variables max_pfn, max_low_pfn and high_memory need
 * updating.
 */
768
static void update_end_of_memory_vars(u64 start, u64 size)
769 770 771 772 773 774 775 776 777 778
{
	unsigned long end_pfn = PFN_UP(start + size);

	if (end_pfn > max_pfn) {
		max_pfn = end_pfn;
		max_low_pfn = end_pfn;
		high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1;
	}
}

779
int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
780
				struct mhp_restrictions *restrictions)
781 782 783
{
	int ret;

784
	ret = __add_pages(nid, start_pfn, nr_pages, restrictions);
785
	WARN_ON_ONCE(ret);
786

787
	/* update max_pfn, max_low_pfn and high_memory */
788 789
	update_end_of_memory_vars(start_pfn << PAGE_SHIFT,
				  nr_pages << PAGE_SHIFT);
790

791 792
	return ret;
}
793

794 795
int arch_add_memory(int nid, u64 start, u64 size,
			struct mhp_restrictions *restrictions)
796 797 798 799 800 801
{
	unsigned long start_pfn = start >> PAGE_SHIFT;
	unsigned long nr_pages = size >> PAGE_SHIFT;

	init_memory_mapping(start, start + size);

802
	return add_pages(nid, start_pfn, nr_pages, restrictions);
803
}
804

805 806
#define PAGE_INUSE 0xFD

807
static void __meminit free_pagetable(struct page *page, int order)
808 809 810
{
	unsigned long magic;
	unsigned int nr_pages = 1 << order;
811

812 813 814 815
	/* bootmem page has reserved flag */
	if (PageReserved(page)) {
		__ClearPageReserved(page);

816
		magic = (unsigned long)page->freelist;
817 818 819 820
		if (magic == SECTION_INFO || magic == MIX_SECTION_INFO) {
			while (nr_pages--)
				put_page_bootmem(page++);
		} else
821 822
			while (nr_pages--)
				free_reserved_page(page++);
823 824 825 826
	} else
		free_pages((unsigned long)page_address(page), order);
}

827
static void __meminit free_hugepage_table(struct page *page,
828
		struct vmem_altmap *altmap)
829 830 831 832 833 834 835 836
{
	if (altmap)
		vmem_altmap_free(altmap, PMD_SIZE / PAGE_SIZE);
	else
		free_pagetable(page, get_order(PMD_SIZE));
}

static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd)
837 838 839 840 841 842
{
	pte_t *pte;
	int i;

	for (i = 0; i < PTRS_PER_PTE; i++) {
		pte = pte_start + i;
843
		if (!pte_none(*pte))
844 845 846 847
			return;
	}

	/* free a pte talbe */
848
	free_pagetable(pmd_page(*pmd), 0);
849 850 851 852 853
	spin_lock(&init_mm.page_table_lock);
	pmd_clear(pmd);
	spin_unlock(&init_mm.page_table_lock);
}

854
static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud)
855 856 857 858 859 860
{
	pmd_t *pmd;
	int i;

	for (i = 0; i < PTRS_PER_PMD; i++) {
		pmd = pmd_start + i;
861
		if (!pmd_none(*pmd))
862 863 864 865
			return;
	}

	/* free a pmd talbe */
866
	free_pagetable(pud_page(*pud), 0);
867 868 869 870 871
	spin_lock(&init_mm.page_table_lock);
	pud_clear(pud);
	spin_unlock(&init_mm.page_table_lock);
}

872
static void __meminit free_pud_table(pud_t *pud_start, p4d_t *p4d)
873 874 875 876 877 878 879 880 881 882 883
{
	pud_t *pud;
	int i;

	for (i = 0; i < PTRS_PER_PUD; i++) {
		pud = pud_start + i;
		if (!pud_none(*pud))
			return;
	}

	/* free a pud talbe */
884
	free_pagetable(p4d_page(*p4d), 0);
885 886 887 888 889
	spin_lock(&init_mm.page_table_lock);
	p4d_clear(p4d);
	spin_unlock(&init_mm.page_table_lock);
}

890 891
static void __meminit
remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end,
892
		 bool direct)
893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916
{
	unsigned long next, pages = 0;
	pte_t *pte;
	void *page_addr;
	phys_addr_t phys_addr;

	pte = pte_start + pte_index(addr);
	for (; addr < end; addr = next, pte++) {
		next = (addr + PAGE_SIZE) & PAGE_MASK;
		if (next > end)
			next = end;

		if (!pte_present(*pte))
			continue;

		/*
		 * We mapped [0,1G) memory as identity mapping when
		 * initializing, in arch/x86/kernel/head_64.S. These
		 * pagetables cannot be removed.
		 */
		phys_addr = pte_val(*pte) + (addr & PAGE_MASK);
		if (phys_addr < (phys_addr_t)0x40000000)
			return;

917
		if (PAGE_ALIGNED(addr) && PAGE_ALIGNED(next)) {
918 919 920 921 922
			/*
			 * Do not free direct mapping pages since they were
			 * freed when offlining, or simplely not in use.
			 */
			if (!direct)
923
				free_pagetable(pte_page(*pte), 0);
924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945

			spin_lock(&init_mm.page_table_lock);
			pte_clear(&init_mm, addr, pte);
			spin_unlock(&init_mm.page_table_lock);

			/* For non-direct mapping, pages means nothing. */
			pages++;
		} else {
			/*
			 * If we are here, we are freeing vmemmap pages since
			 * direct mapped memory ranges to be freed are aligned.
			 *
			 * If we are not removing the whole page, it means
			 * other page structs in this page are being used and
			 * we canot remove them. So fill the unused page_structs
			 * with 0xFD, and remove the page when it is wholly
			 * filled with 0xFD.
			 */
			memset((void *)addr, PAGE_INUSE, next - addr);

			page_addr = page_address(pte_page(*pte));
			if (!memchr_inv(page_addr, PAGE_INUSE, PAGE_SIZE)) {
946
				free_pagetable(pte_page(*pte), 0);
947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962

				spin_lock(&init_mm.page_table_lock);
				pte_clear(&init_mm, addr, pte);
				spin_unlock(&init_mm.page_table_lock);
			}
		}
	}

	/* Call free_pte_table() in remove_pmd_table(). */
	flush_tlb_all();
	if (direct)
		update_page_count(PG_LEVEL_4K, -pages);
}

static void __meminit
remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end,
963
		 bool direct, struct vmem_altmap *altmap)
964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980
{
	unsigned long next, pages = 0;
	pte_t *pte_base;
	pmd_t *pmd;
	void *page_addr;

	pmd = pmd_start + pmd_index(addr);
	for (; addr < end; addr = next, pmd++) {
		next = pmd_addr_end(addr, end);

		if (!pmd_present(*pmd))
			continue;

		if (pmd_large(*pmd)) {
			if (IS_ALIGNED(addr, PMD_SIZE) &&
			    IS_ALIGNED(next, PMD_SIZE)) {
				if (!direct)
981 982
					free_hugepage_table(pmd_page(*pmd),
							    altmap);
983 984 985 986 987 988 989 990 991 992 993 994

				spin_lock(&init_mm.page_table_lock);
				pmd_clear(pmd);
				spin_unlock(&init_mm.page_table_lock);
				pages++;
			} else {
				/* If here, we are freeing vmemmap pages. */
				memset((void *)addr, PAGE_INUSE, next - addr);

				page_addr = page_address(pmd_page(*pmd));
				if (!memchr_inv(page_addr, PAGE_INUSE,
						PMD_SIZE)) {
995 996
					free_hugepage_table(pmd_page(*pmd),
							    altmap);
997 998 999 1000 1001 1002 1003 1004 1005 1006 1007

					spin_lock(&init_mm.page_table_lock);
					pmd_clear(pmd);
					spin_unlock(&init_mm.page_table_lock);
				}
			}

			continue;
		}

		pte_base = (pte_t *)pmd_page_vaddr(*pmd);
1008 1009
		remove_pte_table(pte_base, addr, next, direct);
		free_pte_table(pte_base, pmd);
1010 1011 1012 1013 1014 1015 1016 1017 1018
	}

	/* Call free_pmd_table() in remove_pud_table(). */
	if (direct)
		update_page_count(PG_LEVEL_2M, -pages);
}

static void __meminit
remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end,
1019
		 struct vmem_altmap *altmap, bool direct)
1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037
{
	unsigned long next, pages = 0;
	pmd_t *pmd_base;
	pud_t *pud;
	void *page_addr;

	pud = pud_start + pud_index(addr);
	for (; addr < end; addr = next, pud++) {
		next = pud_addr_end(addr, end);

		if (!pud_present(*pud))
			continue;

		if (pud_large(*pud)) {
			if (IS_ALIGNED(addr, PUD_SIZE) &&
			    IS_ALIGNED(next, PUD_SIZE)) {
				if (!direct)
					free_pagetable(pud_page(*pud),
1038
						       get_order(PUD_SIZE));
1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051

				spin_lock(&init_mm.page_table_lock);
				pud_clear(pud);
				spin_unlock(&init_mm.page_table_lock);
				pages++;
			} else {
				/* If here, we are freeing vmemmap pages. */
				memset((void *)addr, PAGE_INUSE, next - addr);

				page_addr = page_address(pud_page(*pud));
				if (!memchr_inv(page_addr, PAGE_INUSE,
						PUD_SIZE)) {
					free_pagetable(pud_page(*pud),
1052
						       get_order(PUD_SIZE));
1053 1054 1055 1056 1057 1058 1059 1060 1061 1062

					spin_lock(&init_mm.page_table_lock);
					pud_clear(pud);
					spin_unlock(&init_mm.page_table_lock);
				}
			}

			continue;
		}

1063
		pmd_base = pmd_offset(pud, 0);
1064
		remove_pmd_table(pmd_base, addr, next, direct, altmap);
1065
		free_pmd_table(pmd_base, pud);
1066 1067 1068 1069 1070 1071
	}

	if (direct)
		update_page_count(PG_LEVEL_1G, -pages);
}

1072 1073
static void __meminit
remove_p4d_table(p4d_t *p4d_start, unsigned long addr, unsigned long end,
1074
		 struct vmem_altmap *altmap, bool direct)
1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088
{
	unsigned long next, pages = 0;
	pud_t *pud_base;
	p4d_t *p4d;

	p4d = p4d_start + p4d_index(addr);
	for (; addr < end; addr = next, p4d++) {
		next = p4d_addr_end(addr, end);

		if (!p4d_present(*p4d))
			continue;

		BUILD_BUG_ON(p4d_large(*p4d));

1089
		pud_base = pud_offset(p4d, 0);
1090
		remove_pud_table(pud_base, addr, next, altmap, direct);
1091 1092 1093 1094 1095
		/*
		 * For 4-level page tables we do not want to free PUDs, but in the
		 * 5-level case we should free them. This code will have to change
		 * to adapt for boot-time switching between 4 and 5 level page tables.
		 */
1096
		if (pgtable_l5_enabled())
1097
			free_pud_table(pud_base, p4d);
1098 1099 1100 1101 1102 1103
	}

	if (direct)
		update_page_count(PG_LEVEL_512G, -pages);
}

1104 1105
/* start and end are both virtual address. */
static void __meminit
1106 1107
remove_pagetable(unsigned long start, unsigned long end, bool direct,
		struct vmem_altmap *altmap)
1108 1109
{
	unsigned long next;
1110
	unsigned long addr;
1111
	pgd_t *pgd;
1112
	p4d_t *p4d;
1113

1114 1115
	for (addr = start; addr < end; addr = next) {
		next = pgd_addr_end(addr, end);
1116

1117
		pgd = pgd_offset_k(addr);
1118 1119 1120
		if (!pgd_present(*pgd))
			continue;

1121
		p4d = p4d_offset(pgd, 0);
1122
		remove_p4d_table(p4d, addr, next, altmap, direct);
1123 1124 1125 1126 1127
	}

	flush_tlb_all();
}

1128 1129
void __ref vmemmap_free(unsigned long start, unsigned long end,
		struct vmem_altmap *altmap)
1130
{
1131
	remove_pagetable(start, end, false, altmap);
1132 1133
}

1134
#ifdef CONFIG_MEMORY_HOTREMOVE
1135 1136 1137 1138 1139 1140
static void __meminit
kernel_physical_mapping_remove(unsigned long start, unsigned long end)
{
	start = (unsigned long)__va(start);
	end = (unsigned long)__va(end);

1141
	remove_pagetable(start, end, true, NULL);
1142 1143
}

1144 1145
int __ref arch_remove_memory(int nid, u64 start, u64 size,
				struct vmem_altmap *altmap)
1146 1147 1148
{
	unsigned long start_pfn = start >> PAGE_SHIFT;
	unsigned long nr_pages = size >> PAGE_SHIFT;
1149
	struct page *page = pfn_to_page(start_pfn);
1150 1151 1152
	struct zone *zone;
	int ret;

1153 1154 1155 1156
	/* With altmap the first mapped page is offset from @start */
	if (altmap)
		page += vmem_altmap_offset(altmap);
	zone = page_zone(page);
1157
	ret = __remove_pages(zone, start_pfn, nr_pages, altmap);
1158
	WARN_ON_ONCE(ret);
1159
	kernel_physical_mapping_remove(start, start + size);
1160 1161 1162 1163

	return ret;
}
#endif
1164 1165
#endif /* CONFIG_MEMORY_HOTPLUG */

1166
static struct kcore_list kcore_vsyscall;
L
Linus Torvalds 已提交
1167

Y
Yinghai Lu 已提交
1168 1169 1170 1171 1172 1173 1174 1175 1176 1177
static void __init register_page_bootmem_info(void)
{
#ifdef CONFIG_NUMA
	int i;

	for_each_online_node(i)
		register_page_bootmem_info_node(NODE_DATA(i));
#endif
}

L
Linus Torvalds 已提交
1178 1179
void __init mem_init(void)
{
1180
	pci_iommu_alloc();
L
Linus Torvalds 已提交
1181

1182
	/* clear_bss() already clear the empty_zero_page */
L
Linus Torvalds 已提交
1183

1184
	/* this will put all memory onto the freelists */
1185
	memblock_free_all();
L
Linus Torvalds 已提交
1186
	after_bootmem = 1;
1187
	x86_init.hyper.init_after_bootmem();
L
Linus Torvalds 已提交
1188

1189 1190 1191
	/*
	 * Must be done after boot memory is put on freelist, because here we
	 * might set fields in deferred struct pages that have not yet been
1192
	 * initialized, and memblock_free_all() initializes all the reserved
1193 1194 1195 1196
	 * deferred pages for us.
	 */
	register_page_bootmem_info();

L
Linus Torvalds 已提交
1197
	/* Register memory areas for /proc/kcore */
1198 1199
	if (get_gate_vma(&init_mm))
		kclist_add(&kcore_vsyscall, (void *)VSYSCALL_ADDR, PAGE_SIZE, KCORE_USER);
L
Linus Torvalds 已提交
1200

1201
	mem_init_print_info(NULL);
L
Linus Torvalds 已提交
1202 1203
}

1204
int kernel_set_to_readonly;
1205 1206 1207

void set_kernel_text_rw(void)
{
1208
	unsigned long start = PFN_ALIGN(_text);
1209
	unsigned long end = PFN_ALIGN(__stop___ex_table);
1210 1211 1212 1213 1214 1215 1216

	if (!kernel_set_to_readonly)
		return;

	pr_debug("Set kernel text: %lx - %lx for read write\n",
		 start, end);

1217 1218 1219 1220 1221
	/*
	 * Make the kernel identity mapping for text RW. Kernel text
	 * mapping will always be RO. Refer to the comment in
	 * static_protections() in pageattr.c
	 */
1222 1223 1224 1225 1226
	set_memory_rw(start, (end - start) >> PAGE_SHIFT);
}

void set_kernel_text_ro(void)
{
1227
	unsigned long start = PFN_ALIGN(_text);
1228
	unsigned long end = PFN_ALIGN(__stop___ex_table);
1229 1230 1231 1232 1233 1234 1235

	if (!kernel_set_to_readonly)
		return;

	pr_debug("Set kernel text: %lx - %lx for read only\n",
		 start, end);

1236 1237 1238
	/*
	 * Set the kernel identity mapping for text RO.
	 */
1239 1240 1241
	set_memory_ro(start, (end - start) >> PAGE_SHIFT);
}

1242 1243
void mark_rodata_ro(void)
{
1244
	unsigned long start = PFN_ALIGN(_text);
1245
	unsigned long rodata_start = PFN_ALIGN(__start_rodata);
1246
	unsigned long end = (unsigned long) &__end_rodata_hpage_align;
1247 1248
	unsigned long text_end = PFN_ALIGN(&__stop___ex_table);
	unsigned long rodata_end = PFN_ALIGN(&__end_rodata);
1249
	unsigned long all_end;
1250

1251
	printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
1252
	       (end - start) >> 10);
1253 1254
	set_memory_ro(start, (end - start) >> PAGE_SHIFT);

1255 1256
	kernel_set_to_readonly = 1;

1257
	/*
1258 1259
	 * The rodata/data/bss/brk section (but not the kernel text!)
	 * should also be not-executable.
1260 1261 1262 1263 1264 1265 1266 1267
	 *
	 * We align all_end to PMD_SIZE because the existing mapping
	 * is a full PMD. If we would align _brk_end to PAGE_SIZE we
	 * split the PMD and the reminder between _brk_end and the end
	 * of the PMD will remain mapped executable.
	 *
	 * Any PMD which was setup after the one which covers _brk_end
	 * has been zapped already via cleanup_highmem().
1268
	 */
1269
	all_end = roundup((unsigned long)_brk_end, PMD_SIZE);
1270
	set_memory_nx(text_end, (all_end - text_end) >> PAGE_SHIFT);
1271

1272
#ifdef CONFIG_CPA_DEBUG
1273
	printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, end);
1274
	set_memory_rw(start, (end-start) >> PAGE_SHIFT);
1275

1276
	printk(KERN_INFO "Testing CPA: again\n");
1277
	set_memory_ro(start, (end-start) >> PAGE_SHIFT);
1278
#endif
1279

1280 1281
	free_kernel_image_pages((void *)text_end, (void *)rodata_start);
	free_kernel_image_pages((void *)rodata_end, (void *)_sdata);
S
Stephen Smalley 已提交
1282 1283

	debug_checkwx();
1284
}
1285

T
Thomas Gleixner 已提交
1286 1287
int kern_addr_valid(unsigned long addr)
{
L
Linus Torvalds 已提交
1288
	unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
T
Thomas Gleixner 已提交
1289
	pgd_t *pgd;
1290
	p4d_t *p4d;
T
Thomas Gleixner 已提交
1291 1292 1293
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;
L
Linus Torvalds 已提交
1294 1295

	if (above != 0 && above != -1UL)
T
Thomas Gleixner 已提交
1296 1297
		return 0;

L
Linus Torvalds 已提交
1298 1299 1300 1301
	pgd = pgd_offset_k(addr);
	if (pgd_none(*pgd))
		return 0;

1302 1303 1304 1305 1306
	p4d = p4d_offset(pgd, addr);
	if (p4d_none(*p4d))
		return 0;

	pud = pud_offset(p4d, addr);
L
Linus Torvalds 已提交
1307
	if (pud_none(*pud))
T
Thomas Gleixner 已提交
1308
		return 0;
L
Linus Torvalds 已提交
1309

1310 1311 1312
	if (pud_large(*pud))
		return pfn_valid(pud_pfn(*pud));

L
Linus Torvalds 已提交
1313 1314 1315
	pmd = pmd_offset(pud, addr);
	if (pmd_none(*pmd))
		return 0;
T
Thomas Gleixner 已提交
1316

L
Linus Torvalds 已提交
1317 1318 1319 1320 1321 1322
	if (pmd_large(*pmd))
		return pfn_valid(pmd_pfn(*pmd));

	pte = pte_offset_kernel(pmd, addr);
	if (pte_none(*pte))
		return 0;
T
Thomas Gleixner 已提交
1323

L
Linus Torvalds 已提交
1324 1325 1326
	return pfn_valid(pte_pfn(*pte));
}

1327 1328 1329 1330 1331 1332 1333 1334 1335 1336
/*
 * Block size is the minimum amount of memory which can be hotplugged or
 * hotremoved. It must be power of two and must be equal or larger than
 * MIN_MEMORY_BLOCK_SIZE.
 */
#define MAX_BLOCK_SIZE (2UL << 30)

/* Amount of ram needed to start using large blocks */
#define MEM_SIZE_FOR_LARGE_BLOCK (64UL << 30)

1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349
/* Adjustable memory block size */
static unsigned long set_memory_block_size;
int __init set_memory_block_size_order(unsigned int order)
{
	unsigned long size = 1UL << order;

	if (size > MEM_SIZE_FOR_LARGE_BLOCK || size < MIN_MEMORY_BLOCK_SIZE)
		return -EINVAL;

	set_memory_block_size = size;
	return 0;
}

1350
static unsigned long probe_memory_block_size(void)
1351
{
1352 1353
	unsigned long boot_mem_end = max_pfn << PAGE_SHIFT;
	unsigned long bz;
1354

1355 1356 1357
	/* If memory block size has been set, then use it */
	bz = set_memory_block_size;
	if (bz)
1358
		goto done;
1359

1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371
	/* Use regular block if RAM is smaller than MEM_SIZE_FOR_LARGE_BLOCK */
	if (boot_mem_end < MEM_SIZE_FOR_LARGE_BLOCK) {
		bz = MIN_MEMORY_BLOCK_SIZE;
		goto done;
	}

	/* Find the largest allowed block size that aligns to memory end */
	for (bz = MAX_BLOCK_SIZE; bz > MIN_MEMORY_BLOCK_SIZE; bz >>= 1) {
		if (IS_ALIGNED(boot_mem_end, bz))
			break;
	}
done:
1372
	pr_info("x86/mm: Memory block size: %ldMB\n", bz >> 20);
1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385

	return bz;
}

static unsigned long memory_block_size_probed;
unsigned long memory_block_size_bytes(void)
{
	if (!memory_block_size_probed)
		memory_block_size_probed = probe_memory_block_size();

	return memory_block_size_probed;
}

1386 1387 1388 1389
#ifdef CONFIG_SPARSEMEM_VMEMMAP
/*
 * Initialise the sparsemem vmemmap using huge-pages at the PMD level.
 */
1390 1391 1392 1393
static long __meminitdata addr_start, addr_end;
static void __meminitdata *p_start, *p_end;
static int __meminitdata node_start;

1394
static int __meminit vmemmap_populate_hugepages(unsigned long start,
1395
		unsigned long end, int node, struct vmem_altmap *altmap)
1396
{
1397
	unsigned long addr;
1398 1399
	unsigned long next;
	pgd_t *pgd;
1400
	p4d_t *p4d;
1401 1402 1403
	pud_t *pud;
	pmd_t *pmd;

1404
	for (addr = start; addr < end; addr = next) {
1405
		next = pmd_addr_end(addr, end);
1406 1407 1408 1409

		pgd = vmemmap_pgd_populate(addr, node);
		if (!pgd)
			return -ENOMEM;
T
Thomas Gleixner 已提交
1410

1411 1412 1413 1414 1415
		p4d = vmemmap_p4d_populate(pgd, addr, node);
		if (!p4d)
			return -ENOMEM;

		pud = vmemmap_pud_populate(p4d, addr, node);
1416 1417 1418
		if (!pud)
			return -ENOMEM;

1419 1420 1421
		pmd = pmd_offset(pud, addr);
		if (pmd_none(*pmd)) {
			void *p;
T
Thomas Gleixner 已提交
1422

1423 1424 1425 1426
			if (altmap)
				p = altmap_alloc_block_buf(PMD_SIZE, altmap);
			else
				p = vmemmap_alloc_block_buf(PMD_SIZE, node);
1427 1428 1429 1430 1431 1432 1433 1434 1435 1436
			if (p) {
				pte_t entry;

				entry = pfn_pte(__pa(p) >> PAGE_SHIFT,
						PAGE_KERNEL_LARGE);
				set_pmd(pmd, __pmd(pte_val(entry)));

				/* check to see if we have contiguous blocks */
				if (p_end != p || node_start != node) {
					if (p_start)
D
Dan Williams 已提交
1437
						pr_debug(" [%lx-%lx] PMD -> [%p-%p] on node %d\n",
1438 1439 1440 1441 1442
						       addr_start, addr_end-1, p_start, p_end-1, node_start);
					addr_start = addr;
					node_start = node;
					p_start = p;
				}
1443

1444 1445 1446
				addr_end = addr + PMD_SIZE;
				p_end = p + PMD_SIZE;
				continue;
1447 1448
			} else if (altmap)
				return -ENOMEM; /* no fallback */
1449
		} else if (pmd_large(*pmd)) {
1450
			vmemmap_verify((pte_t *)pmd, node, addr, next);
1451 1452 1453 1454
			continue;
		}
		if (vmemmap_populate_basepages(addr, next, node))
			return -ENOMEM;
1455 1456 1457
	}
	return 0;
}
1458

1459 1460
int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
		struct vmem_altmap *altmap)
1461 1462 1463
{
	int err;

1464
	if (boot_cpu_has(X86_FEATURE_PSE))
1465 1466 1467 1468 1469 1470
		err = vmemmap_populate_hugepages(start, end, node, altmap);
	else if (altmap) {
		pr_err_once("%s: no cpu support for altmap allocations\n",
				__func__);
		err = -ENOMEM;
	} else
1471 1472
		err = vmemmap_populate_basepages(start, end, node);
	if (!err)
1473
		sync_global_pgds(start, end - 1);
1474 1475 1476
	return err;
}

1477 1478
#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HAVE_BOOTMEM_INFO_NODE)
void register_page_bootmem_memmap(unsigned long section_nr,
1479
				  struct page *start_page, unsigned long nr_pages)
1480 1481
{
	unsigned long addr = (unsigned long)start_page;
1482
	unsigned long end = (unsigned long)(start_page + nr_pages);
1483 1484
	unsigned long next;
	pgd_t *pgd;
1485
	p4d_t *p4d;
1486 1487
	pud_t *pud;
	pmd_t *pmd;
1488
	unsigned int nr_pmd_pages;
1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500
	struct page *page;

	for (; addr < end; addr = next) {
		pte_t *pte = NULL;

		pgd = pgd_offset_k(addr);
		if (pgd_none(*pgd)) {
			next = (addr + PAGE_SIZE) & PAGE_MASK;
			continue;
		}
		get_page_bootmem(section_nr, pgd_page(*pgd), MIX_SECTION_INFO);

1501 1502 1503 1504 1505 1506 1507 1508
		p4d = p4d_offset(pgd, addr);
		if (p4d_none(*p4d)) {
			next = (addr + PAGE_SIZE) & PAGE_MASK;
			continue;
		}
		get_page_bootmem(section_nr, p4d_page(*p4d), MIX_SECTION_INFO);

		pud = pud_offset(p4d, addr);
1509 1510 1511 1512 1513 1514
		if (pud_none(*pud)) {
			next = (addr + PAGE_SIZE) & PAGE_MASK;
			continue;
		}
		get_page_bootmem(section_nr, pud_page(*pud), MIX_SECTION_INFO);

1515
		if (!boot_cpu_has(X86_FEATURE_PSE)) {
1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534
			next = (addr + PAGE_SIZE) & PAGE_MASK;
			pmd = pmd_offset(pud, addr);
			if (pmd_none(*pmd))
				continue;
			get_page_bootmem(section_nr, pmd_page(*pmd),
					 MIX_SECTION_INFO);

			pte = pte_offset_kernel(pmd, addr);
			if (pte_none(*pte))
				continue;
			get_page_bootmem(section_nr, pte_page(*pte),
					 SECTION_INFO);
		} else {
			next = pmd_addr_end(addr, end);

			pmd = pmd_offset(pud, addr);
			if (pmd_none(*pmd))
				continue;

1535
			nr_pmd_pages = 1 << get_order(PMD_SIZE);
1536
			page = pmd_page(*pmd);
1537
			while (nr_pmd_pages--)
1538 1539 1540 1541 1542 1543 1544
				get_page_bootmem(section_nr, page++,
						 SECTION_INFO);
		}
	}
}
#endif

1545 1546 1547
void __meminit vmemmap_populate_print_last(void)
{
	if (p_start) {
D
Dan Williams 已提交
1548
		pr_debug(" [%lx-%lx] PMD -> [%p-%p] on node %d\n",
1549 1550 1551 1552 1553 1554
			addr_start, addr_end-1, p_start, p_end-1, node_start);
		p_start = NULL;
		p_end = NULL;
		node_start = 0;
	}
}
1555
#endif