init_64.c 37.4 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4
/*
 *  linux/arch/x86_64/mm/init.c
 *
 *  Copyright (C) 1995  Linus Torvalds
P
Pavel Machek 已提交
5
 *  Copyright (C) 2000  Pavel Machek <pavel@ucw.cz>
L
Linus Torvalds 已提交
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
 *  Copyright (C) 2002,2003 Andi Kleen <ak@suse.de>
 */

#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/smp.h>
#include <linux/init.h>
T
Thomas Gleixner 已提交
21
#include <linux/initrd.h>
L
Linus Torvalds 已提交
22 23
#include <linux/pagemap.h>
#include <linux/bootmem.h>
24
#include <linux/memblock.h>
L
Linus Torvalds 已提交
25
#include <linux/proc_fs.h>
26
#include <linux/pci.h>
27
#include <linux/pfn.h>
28
#include <linux/poison.h>
29
#include <linux/dma-mapping.h>
30
#include <linux/memory.h>
31
#include <linux/memory_hotplug.h>
32
#include <linux/memremap.h>
33
#include <linux/nmi.h>
34
#include <linux/gfp.h>
35
#include <linux/kcore.h>
L
Linus Torvalds 已提交
36 37

#include <asm/processor.h>
38
#include <asm/bios_ebda.h>
39
#include <linux/uaccess.h>
L
Linus Torvalds 已提交
40 41 42 43
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/dma.h>
#include <asm/fixmap.h>
44
#include <asm/e820/api.h>
L
Linus Torvalds 已提交
45 46 47 48 49
#include <asm/apic.h>
#include <asm/tlb.h>
#include <asm/mmu_context.h>
#include <asm/proto.h>
#include <asm/smp.h>
50
#include <asm/sections.h>
51
#include <asm/kdebug.h>
52
#include <asm/numa.h>
L
Laura Abbott 已提交
53
#include <asm/set_memory.h>
54
#include <asm/init.h>
55
#include <asm/uv/uv.h>
56
#include <asm/setup.h>
L
Linus Torvalds 已提交
57

58 59
#include "mm_internal.h"

60
#include "ident_map.c"
61

L
Linus Torvalds 已提交
62 63 64 65 66 67
/*
 * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
 * physical space so we can cache the place of the first one and move
 * around without checking the pgd every time.
 */

68
pteval_t __supported_pte_mask __read_mostly = ~0;
69 70 71 72
EXPORT_SYMBOL_GPL(__supported_pte_mask);

int force_personality32;

I
Ingo Molnar 已提交
73 74 75 76 77 78 79 80
/*
 * noexec32=on|off
 * Control non executable heap for 32bit processes.
 * To control the stack too use noexec=off
 *
 * on	PROT_READ does not imply PROT_EXEC for 32-bit processes (default)
 * off	PROT_READ implies PROT_EXEC
 */
81 82 83 84 85 86 87 88 89 90
static int __init nonx32_setup(char *str)
{
	if (!strcmp(str, "on"))
		force_personality32 &= ~READ_IMPLIES_EXEC;
	else if (!strcmp(str, "off"))
		force_personality32 |= READ_IMPLIES_EXEC;
	return 1;
}
__setup("noexec32=", nonx32_setup);

91
/*
92
 * When memory was added make sure all the processes MM have
93 94
 * suitable PGD entries in the local PGD level page.
 */
95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132
#ifdef CONFIG_X86_5LEVEL
void sync_global_pgds(unsigned long start, unsigned long end)
{
	unsigned long addr;

	for (addr = start; addr <= end; addr = ALIGN(addr + 1, PGDIR_SIZE)) {
		const pgd_t *pgd_ref = pgd_offset_k(addr);
		struct page *page;

		/* Check for overflow */
		if (addr < start)
			break;

		if (pgd_none(*pgd_ref))
			continue;

		spin_lock(&pgd_lock);
		list_for_each_entry(page, &pgd_list, lru) {
			pgd_t *pgd;
			spinlock_t *pgt_lock;

			pgd = (pgd_t *)page_address(page) + pgd_index(addr);
			/* the pgt_lock only for Xen */
			pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
			spin_lock(pgt_lock);

			if (!pgd_none(*pgd_ref) && !pgd_none(*pgd))
				BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));

			if (pgd_none(*pgd))
				set_pgd(pgd, *pgd_ref);

			spin_unlock(pgt_lock);
		}
		spin_unlock(&pgd_lock);
	}
}
#else
133
void sync_global_pgds(unsigned long start, unsigned long end)
134
{
135
	unsigned long addr;
136

137 138
	for (addr = start; addr <= end; addr = ALIGN(addr + 1, PGDIR_SIZE)) {
		pgd_t *pgd_ref = pgd_offset_k(addr);
139
		const p4d_t *p4d_ref;
140 141
		struct page *page;

142 143 144 145 146
		/*
		 * With folded p4d, pgd_none() is always false, we need to
		 * handle synchonization on p4d level.
		 */
		BUILD_BUG_ON(pgd_none(*pgd_ref));
147
		p4d_ref = p4d_offset(pgd_ref, addr);
148 149

		if (p4d_none(*p4d_ref))
150 151
			continue;

A
Andrea Arcangeli 已提交
152
		spin_lock(&pgd_lock);
153
		list_for_each_entry(page, &pgd_list, lru) {
154
			pgd_t *pgd;
155
			p4d_t *p4d;
156 157
			spinlock_t *pgt_lock;

158 159
			pgd = (pgd_t *)page_address(page) + pgd_index(addr);
			p4d = p4d_offset(pgd, addr);
A
Andrea Arcangeli 已提交
160
			/* the pgt_lock only for Xen */
161 162 163
			pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
			spin_lock(pgt_lock);

164 165 166
			if (!p4d_none(*p4d_ref) && !p4d_none(*p4d))
				BUG_ON(p4d_page_vaddr(*p4d)
				       != p4d_page_vaddr(*p4d_ref));
167

168 169
			if (p4d_none(*p4d))
				set_p4d(p4d, *p4d_ref);
170

171
			spin_unlock(pgt_lock);
172
		}
A
Andrea Arcangeli 已提交
173
		spin_unlock(&pgd_lock);
174
	}
175
}
176
#endif
177

178 179 180 181 182
/*
 * NOTE: This function is marked __ref because it calls __init function
 * (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0.
 */
static __ref void *spp_getpage(void)
T
Thomas Gleixner 已提交
183
{
L
Linus Torvalds 已提交
184
	void *ptr;
T
Thomas Gleixner 已提交
185

L
Linus Torvalds 已提交
186
	if (after_bootmem)
187
		ptr = (void *) get_zeroed_page(GFP_ATOMIC);
L
Linus Torvalds 已提交
188 189
	else
		ptr = alloc_bootmem_pages(PAGE_SIZE);
T
Thomas Gleixner 已提交
190 191 192 193 194

	if (!ptr || ((unsigned long)ptr & ~PAGE_MASK)) {
		panic("set_pte_phys: cannot allocate page data %s\n",
			after_bootmem ? "after bootmem" : "");
	}
L
Linus Torvalds 已提交
195

196
	pr_debug("spp_getpage %p\n", ptr);
T
Thomas Gleixner 已提交
197

L
Linus Torvalds 已提交
198
	return ptr;
T
Thomas Gleixner 已提交
199
}
L
Linus Torvalds 已提交
200

201
static p4d_t *fill_p4d(pgd_t *pgd, unsigned long vaddr)
L
Linus Torvalds 已提交
202
{
203
	if (pgd_none(*pgd)) {
204 205 206
		p4d_t *p4d = (p4d_t *)spp_getpage();
		pgd_populate(&init_mm, pgd, p4d);
		if (p4d != p4d_offset(pgd, 0))
207
			printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
208 209 210 211 212 213 214 215 216 217 218 219 220
			       p4d, p4d_offset(pgd, 0));
	}
	return p4d_offset(pgd, vaddr);
}

static pud_t *fill_pud(p4d_t *p4d, unsigned long vaddr)
{
	if (p4d_none(*p4d)) {
		pud_t *pud = (pud_t *)spp_getpage();
		p4d_populate(&init_mm, p4d, pud);
		if (pud != pud_offset(p4d, 0))
			printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
			       pud, pud_offset(p4d, 0));
221
	}
222
	return pud_offset(p4d, vaddr);
223
}
L
Linus Torvalds 已提交
224

225
static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
226
{
L
Linus Torvalds 已提交
227
	if (pud_none(*pud)) {
228
		pmd_t *pmd = (pmd_t *) spp_getpage();
229
		pud_populate(&init_mm, pud, pmd);
230
		if (pmd != pmd_offset(pud, 0))
231
			printk(KERN_ERR "PAGETABLE BUG #02! %p <-> %p\n",
232
			       pmd, pmd_offset(pud, 0));
L
Linus Torvalds 已提交
233
	}
234 235 236
	return pmd_offset(pud, vaddr);
}

237
static pte_t *fill_pte(pmd_t *pmd, unsigned long vaddr)
238
{
L
Linus Torvalds 已提交
239
	if (pmd_none(*pmd)) {
240
		pte_t *pte = (pte_t *) spp_getpage();
241
		pmd_populate_kernel(&init_mm, pmd, pte);
242
		if (pte != pte_offset_kernel(pmd, 0))
243
			printk(KERN_ERR "PAGETABLE BUG #03!\n");
L
Linus Torvalds 已提交
244
	}
245 246 247
	return pte_offset_kernel(pmd, vaddr);
}

248
static void __set_pte_vaddr(pud_t *pud, unsigned long vaddr, pte_t new_pte)
249
{
250 251
	pmd_t *pmd = fill_pmd(pud, vaddr);
	pte_t *pte = fill_pte(pmd, vaddr);
L
Linus Torvalds 已提交
252 253 254 255 256 257 258 259 260 261

	set_pte(pte, new_pte);

	/*
	 * It's enough to flush this one mapping.
	 * (PGE mappings get flushed as well)
	 */
	__flush_tlb_one(vaddr);
}

262 263 264 265 266 267 268 269 270 271 272 273 274 275 276
void set_pte_vaddr_p4d(p4d_t *p4d_page, unsigned long vaddr, pte_t new_pte)
{
	p4d_t *p4d = p4d_page + p4d_index(vaddr);
	pud_t *pud = fill_pud(p4d, vaddr);

	__set_pte_vaddr(pud, vaddr, new_pte);
}

void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
{
	pud_t *pud = pud_page + pud_index(vaddr);

	__set_pte_vaddr(pud, vaddr, new_pte);
}

277
void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
278 279
{
	pgd_t *pgd;
280
	p4d_t *p4d_page;
281 282 283 284 285 286 287 288 289

	pr_debug("set_pte_vaddr %lx to %lx\n", vaddr, native_pte_val(pteval));

	pgd = pgd_offset_k(vaddr);
	if (pgd_none(*pgd)) {
		printk(KERN_ERR
			"PGD FIXMAP MISSING, it should be setup in head.S!\n");
		return;
	}
290 291 292

	p4d_page = p4d_offset(pgd, 0);
	set_pte_vaddr_p4d(p4d_page, vaddr, pteval);
293 294
}

295
pmd_t * __init populate_extra_pmd(unsigned long vaddr)
296 297
{
	pgd_t *pgd;
298
	p4d_t *p4d;
299 300 301
	pud_t *pud;

	pgd = pgd_offset_k(vaddr);
302 303
	p4d = fill_p4d(pgd, vaddr);
	pud = fill_pud(p4d, vaddr);
304 305 306 307 308 309
	return fill_pmd(pud, vaddr);
}

pte_t * __init populate_extra_pte(unsigned long vaddr)
{
	pmd_t *pmd;
310

311 312
	pmd = populate_extra_pmd(vaddr);
	return fill_pte(pmd, vaddr);
313 314
}

315 316 317 318
/*
 * Create large page table mappings for a range of physical addresses.
 */
static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
319
					enum page_cache_mode cache)
320 321
{
	pgd_t *pgd;
322
	p4d_t *p4d;
323 324
	pud_t *pud;
	pmd_t *pmd;
325
	pgprot_t prot;
326

327 328
	pgprot_val(prot) = pgprot_val(PAGE_KERNEL_LARGE) |
		pgprot_val(pgprot_4k_2_large(cachemode2pgprot(cache)));
329 330 331 332
	BUG_ON((phys & ~PMD_MASK) || (size & ~PMD_MASK));
	for (; size; phys += PMD_SIZE, size -= PMD_SIZE) {
		pgd = pgd_offset_k((unsigned long)__va(phys));
		if (pgd_none(*pgd)) {
333 334 335 336 337 338
			p4d = (p4d_t *) spp_getpage();
			set_pgd(pgd, __pgd(__pa(p4d) | _KERNPG_TABLE |
						_PAGE_USER));
		}
		p4d = p4d_offset(pgd, (unsigned long)__va(phys));
		if (p4d_none(*p4d)) {
339
			pud = (pud_t *) spp_getpage();
340
			set_p4d(p4d, __p4d(__pa(pud) | _KERNPG_TABLE |
341 342
						_PAGE_USER));
		}
343
		pud = pud_offset(p4d, (unsigned long)__va(phys));
344 345 346 347 348 349 350 351 352 353 354 355 356
		if (pud_none(*pud)) {
			pmd = (pmd_t *) spp_getpage();
			set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
						_PAGE_USER));
		}
		pmd = pmd_offset(pud, phys);
		BUG_ON(!pmd_none(*pmd));
		set_pmd(pmd, __pmd(phys | pgprot_val(prot)));
	}
}

void __init init_extra_mapping_wb(unsigned long phys, unsigned long size)
{
357
	__init_extra_mapping(phys, size, _PAGE_CACHE_MODE_WB);
358 359 360 361
}

void __init init_extra_mapping_uc(unsigned long phys, unsigned long size)
{
362
	__init_extra_mapping(phys, size, _PAGE_CACHE_MODE_UC);
363 364
}

365
/*
366 367 368
 * The head.S code sets up the kernel high mapping:
 *
 *   from __START_KERNEL_map to __START_KERNEL_map + size (== _end-_text)
369
 *
370
 * phys_base holds the negative offset to the kernel, which is added
371 372 373
 * to the compile time generated pmds. This results in invalid pmds up
 * to the point where we hit the physaddr 0 mapping.
 *
374 375
 * We limit the mappings to the region from _text to _brk_end.  _brk_end
 * is rounded up to the 2MB boundary. This catches the invalid pmds as
376 377 378 379 380
 * well, as they are located before _text:
 */
void __init cleanup_highmap(void)
{
	unsigned long vaddr = __START_KERNEL_map;
381
	unsigned long vaddr_end = __START_KERNEL_map + KERNEL_IMAGE_SIZE;
382
	unsigned long end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1;
383 384
	pmd_t *pmd = level2_kernel_pgt;

385 386 387 388 389 390 391 392
	/*
	 * Native path, max_pfn_mapped is not set yet.
	 * Xen has valid max_pfn_mapped set in
	 *	arch/x86/xen/mmu.c:xen_setup_kernel_pagetable().
	 */
	if (max_pfn_mapped)
		vaddr_end = __START_KERNEL_map + (max_pfn_mapped << PAGE_SHIFT);

393
	for (; vaddr + PMD_SIZE - 1 < vaddr_end; pmd++, vaddr += PMD_SIZE) {
394
		if (pmd_none(*pmd))
395 396 397 398 399 400
			continue;
		if (vaddr < (unsigned long) _text || vaddr > end)
			set_pmd(pmd, __pmd(0));
	}
}

401 402 403 404
/*
 * Create PTE level page table mapping for physical addresses.
 * It returns the last physical address mapped.
 */
405
static unsigned long __meminit
406
phys_pte_init(pte_t *pte_page, unsigned long paddr, unsigned long paddr_end,
407
	      pgprot_t prot)
408
{
409 410 411
	unsigned long pages = 0, paddr_next;
	unsigned long paddr_last = paddr_end;
	pte_t *pte;
412
	int i;
413

414 415
	pte = pte_page + pte_index(paddr);
	i = pte_index(paddr);
416

417 418 419
	for (; i < PTRS_PER_PTE; i++, paddr = paddr_next, pte++) {
		paddr_next = (paddr & PAGE_MASK) + PAGE_SIZE;
		if (paddr >= paddr_end) {
420
			if (!after_bootmem &&
421
			    !e820__mapped_any(paddr & PAGE_MASK, paddr_next,
422
					     E820_TYPE_RAM) &&
423
			    !e820__mapped_any(paddr & PAGE_MASK, paddr_next,
424
					     E820_TYPE_RESERVED_KERN))
425 426
				set_pte(pte, __pte(0));
			continue;
427 428
		}

429 430 431 432 433 434
		/*
		 * We will re-use the existing mapping.
		 * Xen for example has some special requirements, like mapping
		 * pagetable pages as RO. So assume someone who pre-setup
		 * these mappings are more intelligent.
		 */
435
		if (!pte_none(*pte)) {
J
Jan Beulich 已提交
436 437
			if (!after_bootmem)
				pages++;
438
			continue;
439
		}
440 441

		if (0)
442 443
			pr_info("   pte=%p addr=%lx pte=%016lx\n", pte, paddr,
				pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL).pte);
444
		pages++;
445 446
		set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, prot));
		paddr_last = (paddr & PAGE_MASK) + PAGE_SIZE;
447
	}
448

449
	update_page_count(PG_LEVEL_4K, pages);
450

451
	return paddr_last;
452 453
}

454 455 456 457 458
/*
 * Create PMD level page table mapping for physical addresses. The virtual
 * and physical address have to be aligned at this level.
 * It returns the last physical address mapped.
 */
459
static unsigned long __meminit
460
phys_pmd_init(pmd_t *pmd_page, unsigned long paddr, unsigned long paddr_end,
461
	      unsigned long page_size_mask, pgprot_t prot)
462
{
463 464
	unsigned long pages = 0, paddr_next;
	unsigned long paddr_last = paddr_end;
465

466
	int i = pmd_index(paddr);
467

468 469
	for (; i < PTRS_PER_PMD; i++, paddr = paddr_next) {
		pmd_t *pmd = pmd_page + pmd_index(paddr);
470
		pte_t *pte;
471
		pgprot_t new_prot = prot;
472

473 474
		paddr_next = (paddr & PMD_MASK) + PMD_SIZE;
		if (paddr >= paddr_end) {
475
			if (!after_bootmem &&
476
			    !e820__mapped_any(paddr & PMD_MASK, paddr_next,
477
					     E820_TYPE_RAM) &&
478
			    !e820__mapped_any(paddr & PMD_MASK, paddr_next,
479
					     E820_TYPE_RESERVED_KERN))
480 481
				set_pmd(pmd, __pmd(0));
			continue;
482
		}
483

484
		if (!pmd_none(*pmd)) {
485 486
			if (!pmd_large(*pmd)) {
				spin_lock(&init_mm.page_table_lock);
487
				pte = (pte_t *)pmd_page_vaddr(*pmd);
488 489
				paddr_last = phys_pte_init(pte, paddr,
							   paddr_end, prot);
490
				spin_unlock(&init_mm.page_table_lock);
491
				continue;
492
			}
493 494 495 496 497 498 499 500 501 502 503 504
			/*
			 * If we are ok with PG_LEVEL_2M mapping, then we will
			 * use the existing mapping,
			 *
			 * Otherwise, we will split the large page mapping but
			 * use the same existing protection bits except for
			 * large page, so that we don't violate Intel's TLB
			 * Application note (317080) which says, while changing
			 * the page sizes, new and old translations should
			 * not differ with respect to page frame and
			 * attributes.
			 */
505
			if (page_size_mask & (1 << PG_LEVEL_2M)) {
J
Jan Beulich 已提交
506 507
				if (!after_bootmem)
					pages++;
508
				paddr_last = paddr_next;
509
				continue;
510
			}
511
			new_prot = pte_pgprot(pte_clrhuge(*(pte_t *)pmd));
512 513
		}

514
		if (page_size_mask & (1<<PG_LEVEL_2M)) {
515
			pages++;
516
			spin_lock(&init_mm.page_table_lock);
517
			set_pte((pte_t *)pmd,
518
				pfn_pte((paddr & PMD_MASK) >> PAGE_SHIFT,
519
					__pgprot(pgprot_val(prot) | _PAGE_PSE)));
520
			spin_unlock(&init_mm.page_table_lock);
521
			paddr_last = paddr_next;
522
			continue;
523
		}
524

525
		pte = alloc_low_page();
526
		paddr_last = phys_pte_init(pte, paddr, paddr_end, new_prot);
527

528
		spin_lock(&init_mm.page_table_lock);
529
		pmd_populate_kernel(&init_mm, pmd, pte);
530
		spin_unlock(&init_mm.page_table_lock);
531
	}
532
	update_page_count(PG_LEVEL_2M, pages);
533
	return paddr_last;
534 535
}

536 537
/*
 * Create PUD level page table mapping for physical addresses. The virtual
538 539
 * and physical address do not have to be aligned at this level. KASLR can
 * randomize virtual addresses up to this level.
540 541
 * It returns the last physical address mapped.
 */
542
static unsigned long __meminit
543 544
phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
	      unsigned long page_size_mask)
T
Thomas Gleixner 已提交
545
{
546 547
	unsigned long pages = 0, paddr_next;
	unsigned long paddr_last = paddr_end;
548 549
	unsigned long vaddr = (unsigned long)__va(paddr);
	int i = pud_index(vaddr);
550

551
	for (; i < PTRS_PER_PUD; i++, paddr = paddr_next) {
552
		pud_t *pud;
L
Linus Torvalds 已提交
553
		pmd_t *pmd;
554
		pgprot_t prot = PAGE_KERNEL;
L
Linus Torvalds 已提交
555

556 557
		vaddr = (unsigned long)__va(paddr);
		pud = pud_page + pud_index(vaddr);
558
		paddr_next = (paddr & PUD_MASK) + PUD_SIZE;
559

560
		if (paddr >= paddr_end) {
561
			if (!after_bootmem &&
562
			    !e820__mapped_any(paddr & PUD_MASK, paddr_next,
563
					     E820_TYPE_RAM) &&
564
			    !e820__mapped_any(paddr & PUD_MASK, paddr_next,
565
					     E820_TYPE_RESERVED_KERN))
566
				set_pud(pud, __pud(0));
L
Linus Torvalds 已提交
567
			continue;
T
Thomas Gleixner 已提交
568
		}
L
Linus Torvalds 已提交
569

570
		if (!pud_none(*pud)) {
571
			if (!pud_large(*pud)) {
572
				pmd = pmd_offset(pud, 0);
573 574 575 576
				paddr_last = phys_pmd_init(pmd, paddr,
							   paddr_end,
							   page_size_mask,
							   prot);
Y
Yinghai Lu 已提交
577
				__flush_tlb_all();
578 579
				continue;
			}
580 581 582 583 584 585 586 587 588 589 590 591
			/*
			 * If we are ok with PG_LEVEL_1G mapping, then we will
			 * use the existing mapping.
			 *
			 * Otherwise, we will split the gbpage mapping but use
			 * the same existing protection  bits except for large
			 * page, so that we don't violate Intel's TLB
			 * Application note (317080) which says, while changing
			 * the page sizes, new and old translations should
			 * not differ with respect to page frame and
			 * attributes.
			 */
592
			if (page_size_mask & (1 << PG_LEVEL_1G)) {
J
Jan Beulich 已提交
593 594
				if (!after_bootmem)
					pages++;
595
				paddr_last = paddr_next;
596
				continue;
597
			}
598
			prot = pte_pgprot(pte_clrhuge(*(pte_t *)pud));
599 600
		}

601
		if (page_size_mask & (1<<PG_LEVEL_1G)) {
602
			pages++;
603
			spin_lock(&init_mm.page_table_lock);
604
			set_pte((pte_t *)pud,
605
				pfn_pte((paddr & PUD_MASK) >> PAGE_SHIFT,
606
					PAGE_KERNEL_LARGE));
607
			spin_unlock(&init_mm.page_table_lock);
608
			paddr_last = paddr_next;
609 610 611
			continue;
		}

612
		pmd = alloc_low_page();
613 614
		paddr_last = phys_pmd_init(pmd, paddr, paddr_end,
					   page_size_mask, prot);
615 616

		spin_lock(&init_mm.page_table_lock);
617
		pud_populate(&init_mm, pud, pmd);
618
		spin_unlock(&init_mm.page_table_lock);
L
Linus Torvalds 已提交
619
	}
A
Andi Kleen 已提交
620
	__flush_tlb_all();
621

622
	update_page_count(PG_LEVEL_1G, pages);
623

624
	return paddr_last;
T
Thomas Gleixner 已提交
625
}
L
Linus Torvalds 已提交
626

627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677
static unsigned long __meminit
phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end,
	      unsigned long page_size_mask)
{
	unsigned long paddr_next, paddr_last = paddr_end;
	unsigned long vaddr = (unsigned long)__va(paddr);
	int i = p4d_index(vaddr);

	if (!IS_ENABLED(CONFIG_X86_5LEVEL))
		return phys_pud_init((pud_t *) p4d_page, paddr, paddr_end, page_size_mask);

	for (; i < PTRS_PER_P4D; i++, paddr = paddr_next) {
		p4d_t *p4d;
		pud_t *pud;

		vaddr = (unsigned long)__va(paddr);
		p4d = p4d_page + p4d_index(vaddr);
		paddr_next = (paddr & P4D_MASK) + P4D_SIZE;

		if (paddr >= paddr_end) {
			if (!after_bootmem &&
			    !e820__mapped_any(paddr & P4D_MASK, paddr_next,
					     E820_TYPE_RAM) &&
			    !e820__mapped_any(paddr & P4D_MASK, paddr_next,
					     E820_TYPE_RESERVED_KERN))
				set_p4d(p4d, __p4d(0));
			continue;
		}

		if (!p4d_none(*p4d)) {
			pud = pud_offset(p4d, 0);
			paddr_last = phys_pud_init(pud, paddr,
					paddr_end,
					page_size_mask);
			__flush_tlb_all();
			continue;
		}

		pud = alloc_low_page();
		paddr_last = phys_pud_init(pud, paddr, paddr_end,
					   page_size_mask);

		spin_lock(&init_mm.page_table_lock);
		p4d_populate(&init_mm, p4d, pud);
		spin_unlock(&init_mm.page_table_lock);
	}
	__flush_tlb_all();

	return paddr_last;
}

678 679
/*
 * Create page table mapping for the physical memory for specific physical
680
 * addresses. The virtual and physical addresses have to be aligned on PMD level
681 682
 * down. It returns the last physical address mapped.
 */
683
unsigned long __meminit
684 685
kernel_physical_mapping_init(unsigned long paddr_start,
			     unsigned long paddr_end,
686
			     unsigned long page_size_mask)
T
Thomas Gleixner 已提交
687
{
688
	bool pgd_changed = false;
689
	unsigned long vaddr, vaddr_start, vaddr_end, vaddr_next, paddr_last;
L
Linus Torvalds 已提交
690

691 692 693 694
	paddr_last = paddr_end;
	vaddr = (unsigned long)__va(paddr_start);
	vaddr_end = (unsigned long)__va(paddr_end);
	vaddr_start = vaddr;
L
Linus Torvalds 已提交
695

696 697
	for (; vaddr < vaddr_end; vaddr = vaddr_next) {
		pgd_t *pgd = pgd_offset_k(vaddr);
698
		p4d_t *p4d;
699

700
		vaddr_next = (vaddr & PGDIR_MASK) + PGDIR_SIZE;
701

702 703 704
		if (pgd_val(*pgd)) {
			p4d = (p4d_t *)pgd_page_vaddr(*pgd);
			paddr_last = phys_p4d_init(p4d, __pa(vaddr),
705 706
						   __pa(vaddr_end),
						   page_size_mask);
707 708 709
			continue;
		}

710 711
		p4d = alloc_low_page();
		paddr_last = phys_p4d_init(p4d, __pa(vaddr), __pa(vaddr_end),
712
					   page_size_mask);
713 714

		spin_lock(&init_mm.page_table_lock);
715 716 717 718
		if (IS_ENABLED(CONFIG_X86_5LEVEL))
			pgd_populate(&init_mm, pgd, p4d);
		else
			p4d_populate(&init_mm, p4d_offset(pgd, vaddr), (pud_t *) p4d);
719
		spin_unlock(&init_mm.page_table_lock);
720
		pgd_changed = true;
T
Thomas Gleixner 已提交
721
	}
722 723

	if (pgd_changed)
724
		sync_global_pgds(vaddr_start, vaddr_end - 1);
725

726
	__flush_tlb_all();
L
Linus Torvalds 已提交
727

728
	return paddr_last;
729
}
730

731
#ifndef CONFIG_NUMA
732
void __init initmem_init(void)
733
{
734
	memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0);
735
}
736
#endif
737

L
Linus Torvalds 已提交
738 739
void __init paging_init(void)
{
740
	sparse_memory_present_with_active_regions(MAX_NUMNODES);
741
	sparse_init();
742 743 744 745 746 747 748

	/*
	 * clear the default setting with node 0
	 * note: don't use nodes_clear here, that is really clearing when
	 *	 numa support is not compiled in, and later node_set_state
	 *	 will not set it back.
	 */
749 750 751
	node_clear_state(0, N_MEMORY);
	if (N_MEMORY != N_NORMAL_MEMORY)
		node_clear_state(0, N_NORMAL_MEMORY);
752

753
	zone_sizes_init();
L
Linus Torvalds 已提交
754 755
}

756 757 758
/*
 * Memory hotplug specific functions
 */
759
#ifdef CONFIG_MEMORY_HOTPLUG
760 761 762 763
/*
 * After memory hotplug the variables max_pfn, max_low_pfn and high_memory need
 * updating.
 */
764
static void update_end_of_memory_vars(u64 start, u64 size)
765 766 767 768 769 770 771 772 773 774
{
	unsigned long end_pfn = PFN_UP(start + size);

	if (end_pfn > max_pfn) {
		max_pfn = end_pfn;
		max_low_pfn = end_pfn;
		high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1;
	}
}

775 776
int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
		struct vmem_altmap *altmap, bool want_memblock)
777 778 779
{
	int ret;

780
	ret = __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
781
	WARN_ON_ONCE(ret);
782

783
	/* update max_pfn, max_low_pfn and high_memory */
784 785
	update_end_of_memory_vars(start_pfn << PAGE_SHIFT,
				  nr_pages << PAGE_SHIFT);
786

787 788
	return ret;
}
789

790 791
int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
		bool want_memblock)
792 793 794 795 796 797
{
	unsigned long start_pfn = start >> PAGE_SHIFT;
	unsigned long nr_pages = size >> PAGE_SHIFT;

	init_memory_mapping(start, start + size);

798
	return add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
799
}
800

801 802
#define PAGE_INUSE 0xFD

803 804
static void __meminit free_pagetable(struct page *page, int order,
		struct vmem_altmap *altmap)
805 806 807
{
	unsigned long magic;
	unsigned int nr_pages = 1 << order;
808 809 810 811 812

	if (altmap) {
		vmem_altmap_free(altmap, nr_pages);
		return;
	}
813 814 815 816 817

	/* bootmem page has reserved flag */
	if (PageReserved(page)) {
		__ClearPageReserved(page);

818
		magic = (unsigned long)page->freelist;
819 820 821 822
		if (magic == SECTION_INFO || magic == MIX_SECTION_INFO) {
			while (nr_pages--)
				put_page_bootmem(page++);
		} else
823 824
			while (nr_pages--)
				free_reserved_page(page++);
825 826 827 828
	} else
		free_pages((unsigned long)page_address(page), order);
}

829 830
static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd,
		struct vmem_altmap *altmap)
831 832 833 834 835 836
{
	pte_t *pte;
	int i;

	for (i = 0; i < PTRS_PER_PTE; i++) {
		pte = pte_start + i;
837
		if (!pte_none(*pte))
838 839 840 841
			return;
	}

	/* free a pte talbe */
842
	free_pagetable(pmd_page(*pmd), 0, altmap);
843 844 845 846 847
	spin_lock(&init_mm.page_table_lock);
	pmd_clear(pmd);
	spin_unlock(&init_mm.page_table_lock);
}

848 849
static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud,
		struct vmem_altmap *altmap)
850 851 852 853 854 855
{
	pmd_t *pmd;
	int i;

	for (i = 0; i < PTRS_PER_PMD; i++) {
		pmd = pmd_start + i;
856
		if (!pmd_none(*pmd))
857 858 859 860
			return;
	}

	/* free a pmd talbe */
861
	free_pagetable(pud_page(*pud), 0, altmap);
862 863 864 865 866
	spin_lock(&init_mm.page_table_lock);
	pud_clear(pud);
	spin_unlock(&init_mm.page_table_lock);
}

867 868
static void __meminit free_pud_table(pud_t *pud_start, p4d_t *p4d,
		struct vmem_altmap *altmap)
869 870 871 872 873 874 875 876 877 878 879
{
	pud_t *pud;
	int i;

	for (i = 0; i < PTRS_PER_PUD; i++) {
		pud = pud_start + i;
		if (!pud_none(*pud))
			return;
	}

	/* free a pud talbe */
880
	free_pagetable(p4d_page(*p4d), 0, altmap);
881 882 883 884 885
	spin_lock(&init_mm.page_table_lock);
	p4d_clear(p4d);
	spin_unlock(&init_mm.page_table_lock);
}

886 887
static void __meminit
remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end,
888
		 struct vmem_altmap *altmap, bool direct)
889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912
{
	unsigned long next, pages = 0;
	pte_t *pte;
	void *page_addr;
	phys_addr_t phys_addr;

	pte = pte_start + pte_index(addr);
	for (; addr < end; addr = next, pte++) {
		next = (addr + PAGE_SIZE) & PAGE_MASK;
		if (next > end)
			next = end;

		if (!pte_present(*pte))
			continue;

		/*
		 * We mapped [0,1G) memory as identity mapping when
		 * initializing, in arch/x86/kernel/head_64.S. These
		 * pagetables cannot be removed.
		 */
		phys_addr = pte_val(*pte) + (addr & PAGE_MASK);
		if (phys_addr < (phys_addr_t)0x40000000)
			return;

913
		if (PAGE_ALIGNED(addr) && PAGE_ALIGNED(next)) {
914 915 916 917 918
			/*
			 * Do not free direct mapping pages since they were
			 * freed when offlining, or simplely not in use.
			 */
			if (!direct)
919
				free_pagetable(pte_page(*pte), 0, altmap);
920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941

			spin_lock(&init_mm.page_table_lock);
			pte_clear(&init_mm, addr, pte);
			spin_unlock(&init_mm.page_table_lock);

			/* For non-direct mapping, pages means nothing. */
			pages++;
		} else {
			/*
			 * If we are here, we are freeing vmemmap pages since
			 * direct mapped memory ranges to be freed are aligned.
			 *
			 * If we are not removing the whole page, it means
			 * other page structs in this page are being used and
			 * we canot remove them. So fill the unused page_structs
			 * with 0xFD, and remove the page when it is wholly
			 * filled with 0xFD.
			 */
			memset((void *)addr, PAGE_INUSE, next - addr);

			page_addr = page_address(pte_page(*pte));
			if (!memchr_inv(page_addr, PAGE_INUSE, PAGE_SIZE)) {
942
				free_pagetable(pte_page(*pte), 0, altmap);
943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958

				spin_lock(&init_mm.page_table_lock);
				pte_clear(&init_mm, addr, pte);
				spin_unlock(&init_mm.page_table_lock);
			}
		}
	}

	/* Call free_pte_table() in remove_pmd_table(). */
	flush_tlb_all();
	if (direct)
		update_page_count(PG_LEVEL_4K, -pages);
}

static void __meminit
remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end,
959
		 bool direct, struct vmem_altmap *altmap)
960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977
{
	unsigned long next, pages = 0;
	pte_t *pte_base;
	pmd_t *pmd;
	void *page_addr;

	pmd = pmd_start + pmd_index(addr);
	for (; addr < end; addr = next, pmd++) {
		next = pmd_addr_end(addr, end);

		if (!pmd_present(*pmd))
			continue;

		if (pmd_large(*pmd)) {
			if (IS_ALIGNED(addr, PMD_SIZE) &&
			    IS_ALIGNED(next, PMD_SIZE)) {
				if (!direct)
					free_pagetable(pmd_page(*pmd),
978 979
						       get_order(PMD_SIZE),
						       altmap);
980 981 982 983 984 985 986 987 988 989 990 991 992

				spin_lock(&init_mm.page_table_lock);
				pmd_clear(pmd);
				spin_unlock(&init_mm.page_table_lock);
				pages++;
			} else {
				/* If here, we are freeing vmemmap pages. */
				memset((void *)addr, PAGE_INUSE, next - addr);

				page_addr = page_address(pmd_page(*pmd));
				if (!memchr_inv(page_addr, PAGE_INUSE,
						PMD_SIZE)) {
					free_pagetable(pmd_page(*pmd),
993 994
						       get_order(PMD_SIZE),
						       altmap);
995 996 997 998 999 1000 1001 1002 1003 1004 1005

					spin_lock(&init_mm.page_table_lock);
					pmd_clear(pmd);
					spin_unlock(&init_mm.page_table_lock);
				}
			}

			continue;
		}

		pte_base = (pte_t *)pmd_page_vaddr(*pmd);
1006 1007
		remove_pte_table(pte_base, addr, next, altmap, direct);
		free_pte_table(pte_base, pmd, altmap);
1008 1009 1010 1011 1012 1013 1014 1015 1016
	}

	/* Call free_pmd_table() in remove_pud_table(). */
	if (direct)
		update_page_count(PG_LEVEL_2M, -pages);
}

static void __meminit
remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end,
1017
		 struct vmem_altmap *altmap, bool direct)
1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035
{
	unsigned long next, pages = 0;
	pmd_t *pmd_base;
	pud_t *pud;
	void *page_addr;

	pud = pud_start + pud_index(addr);
	for (; addr < end; addr = next, pud++) {
		next = pud_addr_end(addr, end);

		if (!pud_present(*pud))
			continue;

		if (pud_large(*pud)) {
			if (IS_ALIGNED(addr, PUD_SIZE) &&
			    IS_ALIGNED(next, PUD_SIZE)) {
				if (!direct)
					free_pagetable(pud_page(*pud),
1036 1037
						       get_order(PUD_SIZE),
						       altmap);
1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050

				spin_lock(&init_mm.page_table_lock);
				pud_clear(pud);
				spin_unlock(&init_mm.page_table_lock);
				pages++;
			} else {
				/* If here, we are freeing vmemmap pages. */
				memset((void *)addr, PAGE_INUSE, next - addr);

				page_addr = page_address(pud_page(*pud));
				if (!memchr_inv(page_addr, PAGE_INUSE,
						PUD_SIZE)) {
					free_pagetable(pud_page(*pud),
1051 1052
						       get_order(PUD_SIZE),
						       altmap);
1053 1054 1055 1056 1057 1058 1059 1060 1061 1062

					spin_lock(&init_mm.page_table_lock);
					pud_clear(pud);
					spin_unlock(&init_mm.page_table_lock);
				}
			}

			continue;
		}

1063
		pmd_base = pmd_offset(pud, 0);
1064 1065
		remove_pmd_table(pmd_base, addr, next, direct, altmap);
		free_pmd_table(pmd_base, pud, altmap);
1066 1067 1068 1069 1070 1071
	}

	if (direct)
		update_page_count(PG_LEVEL_1G, -pages);
}

1072 1073
static void __meminit
remove_p4d_table(p4d_t *p4d_start, unsigned long addr, unsigned long end,
1074
		 struct vmem_altmap *altmap, bool direct)
1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088
{
	unsigned long next, pages = 0;
	pud_t *pud_base;
	p4d_t *p4d;

	p4d = p4d_start + p4d_index(addr);
	for (; addr < end; addr = next, p4d++) {
		next = p4d_addr_end(addr, end);

		if (!p4d_present(*p4d))
			continue;

		BUILD_BUG_ON(p4d_large(*p4d));

1089
		pud_base = pud_offset(p4d, 0);
1090
		remove_pud_table(pud_base, addr, next, altmap, direct);
1091 1092 1093 1094 1095 1096
		/*
		 * For 4-level page tables we do not want to free PUDs, but in the
		 * 5-level case we should free them. This code will have to change
		 * to adapt for boot-time switching between 4 and 5 level page tables.
		 */
		if (CONFIG_PGTABLE_LEVELS == 5)
1097
			free_pud_table(pud_base, p4d, altmap);
1098 1099 1100 1101 1102 1103
	}

	if (direct)
		update_page_count(PG_LEVEL_512G, -pages);
}

1104 1105
/* start and end are both virtual address. */
static void __meminit
1106 1107
remove_pagetable(unsigned long start, unsigned long end, bool direct,
		struct vmem_altmap *altmap)
1108 1109
{
	unsigned long next;
1110
	unsigned long addr;
1111
	pgd_t *pgd;
1112
	p4d_t *p4d;
1113

1114 1115
	for (addr = start; addr < end; addr = next) {
		next = pgd_addr_end(addr, end);
1116

1117
		pgd = pgd_offset_k(addr);
1118 1119 1120
		if (!pgd_present(*pgd))
			continue;

1121
		p4d = p4d_offset(pgd, 0);
1122
		remove_p4d_table(p4d, addr, next, altmap, direct);
1123 1124 1125 1126 1127
	}

	flush_tlb_all();
}

1128 1129
void __ref vmemmap_free(unsigned long start, unsigned long end,
		struct vmem_altmap *altmap)
1130
{
1131
	remove_pagetable(start, end, false, altmap);
1132 1133
}

1134
#ifdef CONFIG_MEMORY_HOTREMOVE
1135 1136 1137 1138 1139 1140
static void __meminit
kernel_physical_mapping_remove(unsigned long start, unsigned long end)
{
	start = (unsigned long)__va(start);
	end = (unsigned long)__va(end);

1141
	remove_pagetable(start, end, true, NULL);
1142 1143
}

1144
int __ref arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
1145 1146 1147
{
	unsigned long start_pfn = start >> PAGE_SHIFT;
	unsigned long nr_pages = size >> PAGE_SHIFT;
1148
	struct page *page = pfn_to_page(start_pfn);
1149 1150 1151
	struct zone *zone;
	int ret;

1152 1153 1154 1155
	/* With altmap the first mapped page is offset from @start */
	if (altmap)
		page += vmem_altmap_offset(altmap);
	zone = page_zone(page);
1156
	ret = __remove_pages(zone, start_pfn, nr_pages, altmap);
1157
	WARN_ON_ONCE(ret);
1158
	kernel_physical_mapping_remove(start, start + size);
1159 1160 1161 1162

	return ret;
}
#endif
1163 1164
#endif /* CONFIG_MEMORY_HOTPLUG */

1165
static struct kcore_list kcore_vsyscall;
L
Linus Torvalds 已提交
1166

Y
Yinghai Lu 已提交
1167 1168 1169 1170 1171 1172 1173 1174 1175 1176
static void __init register_page_bootmem_info(void)
{
#ifdef CONFIG_NUMA
	int i;

	for_each_online_node(i)
		register_page_bootmem_info_node(NODE_DATA(i));
#endif
}

L
Linus Torvalds 已提交
1177 1178
void __init mem_init(void)
{
1179
	pci_iommu_alloc();
L
Linus Torvalds 已提交
1180

1181
	/* clear_bss() already clear the empty_zero_page */
L
Linus Torvalds 已提交
1182

1183
	/* this will put all memory onto the freelists */
1184
	free_all_bootmem();
L
Linus Torvalds 已提交
1185 1186
	after_bootmem = 1;

1187 1188 1189 1190 1191 1192 1193 1194
	/*
	 * Must be done after boot memory is put on freelist, because here we
	 * might set fields in deferred struct pages that have not yet been
	 * initialized, and free_all_bootmem() initializes all the reserved
	 * deferred pages for us.
	 */
	register_page_bootmem_info();

L
Linus Torvalds 已提交
1195
	/* Register memory areas for /proc/kcore */
1196 1197
	if (get_gate_vma(&init_mm))
		kclist_add(&kcore_vsyscall, (void *)VSYSCALL_ADDR, PAGE_SIZE, KCORE_USER);
L
Linus Torvalds 已提交
1198

1199
	mem_init_print_info(NULL);
L
Linus Torvalds 已提交
1200 1201
}

1202
int kernel_set_to_readonly;
1203 1204 1205

void set_kernel_text_rw(void)
{
1206
	unsigned long start = PFN_ALIGN(_text);
1207
	unsigned long end = PFN_ALIGN(__stop___ex_table);
1208 1209 1210 1211 1212 1213 1214

	if (!kernel_set_to_readonly)
		return;

	pr_debug("Set kernel text: %lx - %lx for read write\n",
		 start, end);

1215 1216 1217 1218 1219
	/*
	 * Make the kernel identity mapping for text RW. Kernel text
	 * mapping will always be RO. Refer to the comment in
	 * static_protections() in pageattr.c
	 */
1220 1221 1222 1223 1224
	set_memory_rw(start, (end - start) >> PAGE_SHIFT);
}

void set_kernel_text_ro(void)
{
1225
	unsigned long start = PFN_ALIGN(_text);
1226
	unsigned long end = PFN_ALIGN(__stop___ex_table);
1227 1228 1229 1230 1231 1232 1233

	if (!kernel_set_to_readonly)
		return;

	pr_debug("Set kernel text: %lx - %lx for read only\n",
		 start, end);

1234 1235 1236
	/*
	 * Set the kernel identity mapping for text RO.
	 */
1237 1238 1239
	set_memory_ro(start, (end - start) >> PAGE_SHIFT);
}

1240 1241
void mark_rodata_ro(void)
{
1242
	unsigned long start = PFN_ALIGN(_text);
1243
	unsigned long rodata_start = PFN_ALIGN(__start_rodata);
1244
	unsigned long end = (unsigned long) &__end_rodata_hpage_align;
1245 1246
	unsigned long text_end = PFN_ALIGN(&__stop___ex_table);
	unsigned long rodata_end = PFN_ALIGN(&__end_rodata);
1247
	unsigned long all_end;
1248

1249
	printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
1250
	       (end - start) >> 10);
1251 1252
	set_memory_ro(start, (end - start) >> PAGE_SHIFT);

1253 1254
	kernel_set_to_readonly = 1;

1255
	/*
1256 1257
	 * The rodata/data/bss/brk section (but not the kernel text!)
	 * should also be not-executable.
1258 1259 1260 1261 1262 1263 1264 1265
	 *
	 * We align all_end to PMD_SIZE because the existing mapping
	 * is a full PMD. If we would align _brk_end to PAGE_SIZE we
	 * split the PMD and the reminder between _brk_end and the end
	 * of the PMD will remain mapped executable.
	 *
	 * Any PMD which was setup after the one which covers _brk_end
	 * has been zapped already via cleanup_highmem().
1266
	 */
1267
	all_end = roundup((unsigned long)_brk_end, PMD_SIZE);
1268
	set_memory_nx(text_end, (all_end - text_end) >> PAGE_SHIFT);
1269

1270
#ifdef CONFIG_CPA_DEBUG
1271
	printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, end);
1272
	set_memory_rw(start, (end-start) >> PAGE_SHIFT);
1273

1274
	printk(KERN_INFO "Testing CPA: again\n");
1275
	set_memory_ro(start, (end-start) >> PAGE_SHIFT);
1276
#endif
1277

1278
	free_init_pages("unused kernel",
1279 1280
			(unsigned long) __va(__pa_symbol(text_end)),
			(unsigned long) __va(__pa_symbol(rodata_start)));
1281
	free_init_pages("unused kernel",
1282 1283
			(unsigned long) __va(__pa_symbol(rodata_end)),
			(unsigned long) __va(__pa_symbol(_sdata)));
S
Stephen Smalley 已提交
1284 1285

	debug_checkwx();
1286
}
1287

T
Thomas Gleixner 已提交
1288 1289
int kern_addr_valid(unsigned long addr)
{
L
Linus Torvalds 已提交
1290
	unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
T
Thomas Gleixner 已提交
1291
	pgd_t *pgd;
1292
	p4d_t *p4d;
T
Thomas Gleixner 已提交
1293 1294 1295
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;
L
Linus Torvalds 已提交
1296 1297

	if (above != 0 && above != -1UL)
T
Thomas Gleixner 已提交
1298 1299
		return 0;

L
Linus Torvalds 已提交
1300 1301 1302 1303
	pgd = pgd_offset_k(addr);
	if (pgd_none(*pgd))
		return 0;

1304 1305 1306 1307 1308
	p4d = p4d_offset(pgd, addr);
	if (p4d_none(*p4d))
		return 0;

	pud = pud_offset(p4d, addr);
L
Linus Torvalds 已提交
1309
	if (pud_none(*pud))
T
Thomas Gleixner 已提交
1310
		return 0;
L
Linus Torvalds 已提交
1311

1312 1313 1314
	if (pud_large(*pud))
		return pfn_valid(pud_pfn(*pud));

L
Linus Torvalds 已提交
1315 1316 1317
	pmd = pmd_offset(pud, addr);
	if (pmd_none(*pmd))
		return 0;
T
Thomas Gleixner 已提交
1318

L
Linus Torvalds 已提交
1319 1320 1321 1322 1323 1324
	if (pmd_large(*pmd))
		return pfn_valid(pmd_pfn(*pmd));

	pte = pte_offset_kernel(pmd, addr);
	if (pte_none(*pte))
		return 0;
T
Thomas Gleixner 已提交
1325

L
Linus Torvalds 已提交
1326 1327 1328
	return pfn_valid(pte_pfn(*pte));
}

1329
static unsigned long probe_memory_block_size(void)
1330
{
1331
	unsigned long bz = MIN_MEMORY_BLOCK_SIZE;
1332

1333 1334 1335
	/* if system is UV or has 64GB of RAM or more, use large blocks */
	if (is_uv_system() || ((max_pfn << PAGE_SHIFT) >= (64UL << 30)))
		bz = 2UL << 30; /* 2GB */
1336

1337
	pr_info("x86/mm: Memory block size: %ldMB\n", bz >> 20);
1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350

	return bz;
}

static unsigned long memory_block_size_probed;
unsigned long memory_block_size_bytes(void)
{
	if (!memory_block_size_probed)
		memory_block_size_probed = probe_memory_block_size();

	return memory_block_size_probed;
}

1351 1352 1353 1354
#ifdef CONFIG_SPARSEMEM_VMEMMAP
/*
 * Initialise the sparsemem vmemmap using huge-pages at the PMD level.
 */
1355 1356 1357 1358
static long __meminitdata addr_start, addr_end;
static void __meminitdata *p_start, *p_end;
static int __meminitdata node_start;

1359
static int __meminit vmemmap_populate_hugepages(unsigned long start,
1360
		unsigned long end, int node, struct vmem_altmap *altmap)
1361
{
1362
	unsigned long addr;
1363 1364
	unsigned long next;
	pgd_t *pgd;
1365
	p4d_t *p4d;
1366 1367 1368
	pud_t *pud;
	pmd_t *pmd;

1369
	for (addr = start; addr < end; addr = next) {
1370
		next = pmd_addr_end(addr, end);
1371 1372 1373 1374

		pgd = vmemmap_pgd_populate(addr, node);
		if (!pgd)
			return -ENOMEM;
T
Thomas Gleixner 已提交
1375

1376 1377 1378 1379 1380
		p4d = vmemmap_p4d_populate(pgd, addr, node);
		if (!p4d)
			return -ENOMEM;

		pud = vmemmap_pud_populate(p4d, addr, node);
1381 1382 1383
		if (!pud)
			return -ENOMEM;

1384 1385 1386
		pmd = pmd_offset(pud, addr);
		if (pmd_none(*pmd)) {
			void *p;
T
Thomas Gleixner 已提交
1387

1388 1389 1390 1391
			if (altmap)
				p = altmap_alloc_block_buf(PMD_SIZE, altmap);
			else
				p = vmemmap_alloc_block_buf(PMD_SIZE, node);
1392 1393 1394 1395 1396 1397 1398 1399 1400 1401
			if (p) {
				pte_t entry;

				entry = pfn_pte(__pa(p) >> PAGE_SHIFT,
						PAGE_KERNEL_LARGE);
				set_pmd(pmd, __pmd(pte_val(entry)));

				/* check to see if we have contiguous blocks */
				if (p_end != p || node_start != node) {
					if (p_start)
D
Dan Williams 已提交
1402
						pr_debug(" [%lx-%lx] PMD -> [%p-%p] on node %d\n",
1403 1404 1405 1406 1407
						       addr_start, addr_end-1, p_start, p_end-1, node_start);
					addr_start = addr;
					node_start = node;
					p_start = p;
				}
1408

1409 1410 1411
				addr_end = addr + PMD_SIZE;
				p_end = p + PMD_SIZE;
				continue;
1412 1413
			} else if (altmap)
				return -ENOMEM; /* no fallback */
1414
		} else if (pmd_large(*pmd)) {
1415
			vmemmap_verify((pte_t *)pmd, node, addr, next);
1416 1417 1418 1419
			continue;
		}
		if (vmemmap_populate_basepages(addr, next, node))
			return -ENOMEM;
1420 1421 1422
	}
	return 0;
}
1423

1424 1425
int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
		struct vmem_altmap *altmap)
1426 1427 1428
{
	int err;

1429
	if (boot_cpu_has(X86_FEATURE_PSE))
1430 1431 1432 1433 1434 1435
		err = vmemmap_populate_hugepages(start, end, node, altmap);
	else if (altmap) {
		pr_err_once("%s: no cpu support for altmap allocations\n",
				__func__);
		err = -ENOMEM;
	} else
1436 1437
		err = vmemmap_populate_basepages(start, end, node);
	if (!err)
1438
		sync_global_pgds(start, end - 1);
1439 1440 1441
	return err;
}

1442 1443
#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HAVE_BOOTMEM_INFO_NODE)
void register_page_bootmem_memmap(unsigned long section_nr,
1444
				  struct page *start_page, unsigned long nr_pages)
1445 1446
{
	unsigned long addr = (unsigned long)start_page;
1447
	unsigned long end = (unsigned long)(start_page + nr_pages);
1448 1449
	unsigned long next;
	pgd_t *pgd;
1450
	p4d_t *p4d;
1451 1452
	pud_t *pud;
	pmd_t *pmd;
1453
	unsigned int nr_pmd_pages;
1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465
	struct page *page;

	for (; addr < end; addr = next) {
		pte_t *pte = NULL;

		pgd = pgd_offset_k(addr);
		if (pgd_none(*pgd)) {
			next = (addr + PAGE_SIZE) & PAGE_MASK;
			continue;
		}
		get_page_bootmem(section_nr, pgd_page(*pgd), MIX_SECTION_INFO);

1466 1467 1468 1469 1470 1471 1472 1473
		p4d = p4d_offset(pgd, addr);
		if (p4d_none(*p4d)) {
			next = (addr + PAGE_SIZE) & PAGE_MASK;
			continue;
		}
		get_page_bootmem(section_nr, p4d_page(*p4d), MIX_SECTION_INFO);

		pud = pud_offset(p4d, addr);
1474 1475 1476 1477 1478 1479
		if (pud_none(*pud)) {
			next = (addr + PAGE_SIZE) & PAGE_MASK;
			continue;
		}
		get_page_bootmem(section_nr, pud_page(*pud), MIX_SECTION_INFO);

1480
		if (!boot_cpu_has(X86_FEATURE_PSE)) {
1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499
			next = (addr + PAGE_SIZE) & PAGE_MASK;
			pmd = pmd_offset(pud, addr);
			if (pmd_none(*pmd))
				continue;
			get_page_bootmem(section_nr, pmd_page(*pmd),
					 MIX_SECTION_INFO);

			pte = pte_offset_kernel(pmd, addr);
			if (pte_none(*pte))
				continue;
			get_page_bootmem(section_nr, pte_page(*pte),
					 SECTION_INFO);
		} else {
			next = pmd_addr_end(addr, end);

			pmd = pmd_offset(pud, addr);
			if (pmd_none(*pmd))
				continue;

1500
			nr_pmd_pages = 1 << get_order(PMD_SIZE);
1501
			page = pmd_page(*pmd);
1502
			while (nr_pmd_pages--)
1503 1504 1505 1506 1507 1508 1509
				get_page_bootmem(section_nr, page++,
						 SECTION_INFO);
		}
	}
}
#endif

1510 1511 1512
void __meminit vmemmap_populate_print_last(void)
{
	if (p_start) {
D
Dan Williams 已提交
1513
		pr_debug(" [%lx-%lx] PMD -> [%p-%p] on node %d\n",
1514 1515 1516 1517 1518 1519
			addr_start, addr_end-1, p_start, p_end-1, node_start);
		p_start = NULL;
		p_end = NULL;
		node_start = 0;
	}
}
1520
#endif