init_64.c 34.4 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4
/*
 *  linux/arch/x86_64/mm/init.c
 *
 *  Copyright (C) 1995  Linus Torvalds
P
Pavel Machek 已提交
5
 *  Copyright (C) 2000  Pavel Machek <pavel@ucw.cz>
L
Linus Torvalds 已提交
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
 *  Copyright (C) 2002,2003 Andi Kleen <ak@suse.de>
 */

#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/smp.h>
#include <linux/init.h>
T
Thomas Gleixner 已提交
21
#include <linux/initrd.h>
L
Linus Torvalds 已提交
22 23
#include <linux/pagemap.h>
#include <linux/bootmem.h>
24
#include <linux/memblock.h>
L
Linus Torvalds 已提交
25
#include <linux/proc_fs.h>
26
#include <linux/pci.h>
27
#include <linux/pfn.h>
28
#include <linux/poison.h>
29
#include <linux/dma-mapping.h>
30
#include <linux/memory.h>
31
#include <linux/memory_hotplug.h>
32
#include <linux/memremap.h>
33
#include <linux/nmi.h>
34
#include <linux/gfp.h>
35
#include <linux/kcore.h>
L
Linus Torvalds 已提交
36 37

#include <asm/processor.h>
38
#include <asm/bios_ebda.h>
39
#include <linux/uaccess.h>
L
Linus Torvalds 已提交
40 41 42 43
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/dma.h>
#include <asm/fixmap.h>
44
#include <asm/e820/api.h>
L
Linus Torvalds 已提交
45 46 47 48 49
#include <asm/apic.h>
#include <asm/tlb.h>
#include <asm/mmu_context.h>
#include <asm/proto.h>
#include <asm/smp.h>
50
#include <asm/sections.h>
51
#include <asm/kdebug.h>
52
#include <asm/numa.h>
53
#include <asm/cacheflush.h>
54
#include <asm/init.h>
55
#include <asm/uv/uv.h>
56
#include <asm/setup.h>
L
Linus Torvalds 已提交
57

58 59
#include "mm_internal.h"

60
#include "ident_map.c"
61

L
Linus Torvalds 已提交
62 63 64 65 66 67
/*
 * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
 * physical space so we can cache the place of the first one and move
 * around without checking the pgd every time.
 */

68
pteval_t __supported_pte_mask __read_mostly = ~0;
69 70 71 72
EXPORT_SYMBOL_GPL(__supported_pte_mask);

int force_personality32;

I
Ingo Molnar 已提交
73 74 75 76 77 78 79 80
/*
 * noexec32=on|off
 * Control non executable heap for 32bit processes.
 * To control the stack too use noexec=off
 *
 * on	PROT_READ does not imply PROT_EXEC for 32-bit processes (default)
 * off	PROT_READ implies PROT_EXEC
 */
81 82 83 84 85 86 87 88 89 90
static int __init nonx32_setup(char *str)
{
	if (!strcmp(str, "on"))
		force_personality32 &= ~READ_IMPLIES_EXEC;
	else if (!strcmp(str, "off"))
		force_personality32 |= READ_IMPLIES_EXEC;
	return 1;
}
__setup("noexec32=", nonx32_setup);

91
/*
92
 * When memory was added make sure all the processes MM have
93 94
 * suitable PGD entries in the local PGD level page.
 */
95
void sync_global_pgds(unsigned long start, unsigned long end)
96
{
97 98 99
	unsigned long address;

	for (address = start; address <= end; address += PGDIR_SIZE) {
100 101
		pgd_t *pgd_ref = pgd_offset_k(address);
		const p4d_t *p4d_ref;
102 103
		struct page *page;

104 105 106 107 108 109 110 111
		/*
		 * With folded p4d, pgd_none() is always false, we need to
		 * handle synchonization on p4d level.
		 */
		BUILD_BUG_ON(pgd_none(*pgd_ref));
		p4d_ref = p4d_offset(pgd_ref, address);

		if (p4d_none(*p4d_ref))
112 113
			continue;

A
Andrea Arcangeli 已提交
114
		spin_lock(&pgd_lock);
115
		list_for_each_entry(page, &pgd_list, lru) {
116
			pgd_t *pgd;
117
			p4d_t *p4d;
118 119
			spinlock_t *pgt_lock;

120
			pgd = (pgd_t *)page_address(page) + pgd_index(address);
121
			p4d = p4d_offset(pgd, address);
A
Andrea Arcangeli 已提交
122
			/* the pgt_lock only for Xen */
123 124 125
			pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
			spin_lock(pgt_lock);

126 127 128
			if (!p4d_none(*p4d_ref) && !p4d_none(*p4d))
				BUG_ON(p4d_page_vaddr(*p4d)
				       != p4d_page_vaddr(*p4d_ref));
129

130 131
			if (p4d_none(*p4d))
				set_p4d(p4d, *p4d_ref);
132

133
			spin_unlock(pgt_lock);
134
		}
A
Andrea Arcangeli 已提交
135
		spin_unlock(&pgd_lock);
136
	}
137 138
}

139 140 141 142 143
/*
 * NOTE: This function is marked __ref because it calls __init function
 * (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0.
 */
static __ref void *spp_getpage(void)
T
Thomas Gleixner 已提交
144
{
L
Linus Torvalds 已提交
145
	void *ptr;
T
Thomas Gleixner 已提交
146

L
Linus Torvalds 已提交
147
	if (after_bootmem)
148
		ptr = (void *) get_zeroed_page(GFP_ATOMIC | __GFP_NOTRACK);
L
Linus Torvalds 已提交
149 150
	else
		ptr = alloc_bootmem_pages(PAGE_SIZE);
T
Thomas Gleixner 已提交
151 152 153 154 155

	if (!ptr || ((unsigned long)ptr & ~PAGE_MASK)) {
		panic("set_pte_phys: cannot allocate page data %s\n",
			after_bootmem ? "after bootmem" : "");
	}
L
Linus Torvalds 已提交
156

157
	pr_debug("spp_getpage %p\n", ptr);
T
Thomas Gleixner 已提交
158

L
Linus Torvalds 已提交
159
	return ptr;
T
Thomas Gleixner 已提交
160
}
L
Linus Torvalds 已提交
161

162
static p4d_t *fill_p4d(pgd_t *pgd, unsigned long vaddr)
L
Linus Torvalds 已提交
163
{
164
	if (pgd_none(*pgd)) {
165 166 167
		p4d_t *p4d = (p4d_t *)spp_getpage();
		pgd_populate(&init_mm, pgd, p4d);
		if (p4d != p4d_offset(pgd, 0))
168
			printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
169 170 171 172 173 174 175 176 177 178 179 180 181
			       p4d, p4d_offset(pgd, 0));
	}
	return p4d_offset(pgd, vaddr);
}

static pud_t *fill_pud(p4d_t *p4d, unsigned long vaddr)
{
	if (p4d_none(*p4d)) {
		pud_t *pud = (pud_t *)spp_getpage();
		p4d_populate(&init_mm, p4d, pud);
		if (pud != pud_offset(p4d, 0))
			printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
			       pud, pud_offset(p4d, 0));
182
	}
183
	return pud_offset(p4d, vaddr);
184
}
L
Linus Torvalds 已提交
185

186
static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
187
{
L
Linus Torvalds 已提交
188
	if (pud_none(*pud)) {
189
		pmd_t *pmd = (pmd_t *) spp_getpage();
190
		pud_populate(&init_mm, pud, pmd);
191
		if (pmd != pmd_offset(pud, 0))
192
			printk(KERN_ERR "PAGETABLE BUG #02! %p <-> %p\n",
193
			       pmd, pmd_offset(pud, 0));
L
Linus Torvalds 已提交
194
	}
195 196 197
	return pmd_offset(pud, vaddr);
}

198
static pte_t *fill_pte(pmd_t *pmd, unsigned long vaddr)
199
{
L
Linus Torvalds 已提交
200
	if (pmd_none(*pmd)) {
201
		pte_t *pte = (pte_t *) spp_getpage();
202
		pmd_populate_kernel(&init_mm, pmd, pte);
203
		if (pte != pte_offset_kernel(pmd, 0))
204
			printk(KERN_ERR "PAGETABLE BUG #03!\n");
L
Linus Torvalds 已提交
205
	}
206 207 208
	return pte_offset_kernel(pmd, vaddr);
}

209
static void __set_pte_vaddr(pud_t *pud, unsigned long vaddr, pte_t new_pte)
210
{
211 212
	pmd_t *pmd = fill_pmd(pud, vaddr);
	pte_t *pte = fill_pte(pmd, vaddr);
L
Linus Torvalds 已提交
213 214 215 216 217 218 219 220 221 222

	set_pte(pte, new_pte);

	/*
	 * It's enough to flush this one mapping.
	 * (PGE mappings get flushed as well)
	 */
	__flush_tlb_one(vaddr);
}

223 224 225 226 227 228 229 230 231 232 233 234 235 236 237
void set_pte_vaddr_p4d(p4d_t *p4d_page, unsigned long vaddr, pte_t new_pte)
{
	p4d_t *p4d = p4d_page + p4d_index(vaddr);
	pud_t *pud = fill_pud(p4d, vaddr);

	__set_pte_vaddr(pud, vaddr, new_pte);
}

void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
{
	pud_t *pud = pud_page + pud_index(vaddr);

	__set_pte_vaddr(pud, vaddr, new_pte);
}

238
void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
239 240
{
	pgd_t *pgd;
241
	p4d_t *p4d_page;
242 243 244 245 246 247 248 249 250

	pr_debug("set_pte_vaddr %lx to %lx\n", vaddr, native_pte_val(pteval));

	pgd = pgd_offset_k(vaddr);
	if (pgd_none(*pgd)) {
		printk(KERN_ERR
			"PGD FIXMAP MISSING, it should be setup in head.S!\n");
		return;
	}
251 252 253

	p4d_page = p4d_offset(pgd, 0);
	set_pte_vaddr_p4d(p4d_page, vaddr, pteval);
254 255
}

256
pmd_t * __init populate_extra_pmd(unsigned long vaddr)
257 258
{
	pgd_t *pgd;
259
	p4d_t *p4d;
260 261 262
	pud_t *pud;

	pgd = pgd_offset_k(vaddr);
263 264
	p4d = fill_p4d(pgd, vaddr);
	pud = fill_pud(p4d, vaddr);
265 266 267 268 269 270
	return fill_pmd(pud, vaddr);
}

pte_t * __init populate_extra_pte(unsigned long vaddr)
{
	pmd_t *pmd;
271

272 273
	pmd = populate_extra_pmd(vaddr);
	return fill_pte(pmd, vaddr);
274 275
}

276 277 278 279
/*
 * Create large page table mappings for a range of physical addresses.
 */
static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
280
					enum page_cache_mode cache)
281 282
{
	pgd_t *pgd;
283
	p4d_t *p4d;
284 285
	pud_t *pud;
	pmd_t *pmd;
286
	pgprot_t prot;
287

288 289
	pgprot_val(prot) = pgprot_val(PAGE_KERNEL_LARGE) |
		pgprot_val(pgprot_4k_2_large(cachemode2pgprot(cache)));
290 291 292 293
	BUG_ON((phys & ~PMD_MASK) || (size & ~PMD_MASK));
	for (; size; phys += PMD_SIZE, size -= PMD_SIZE) {
		pgd = pgd_offset_k((unsigned long)__va(phys));
		if (pgd_none(*pgd)) {
294 295 296 297 298 299
			p4d = (p4d_t *) spp_getpage();
			set_pgd(pgd, __pgd(__pa(p4d) | _KERNPG_TABLE |
						_PAGE_USER));
		}
		p4d = p4d_offset(pgd, (unsigned long)__va(phys));
		if (p4d_none(*p4d)) {
300
			pud = (pud_t *) spp_getpage();
301
			set_p4d(p4d, __p4d(__pa(pud) | _KERNPG_TABLE |
302 303
						_PAGE_USER));
		}
304
		pud = pud_offset(p4d, (unsigned long)__va(phys));
305 306 307 308 309 310 311 312 313 314 315 316 317
		if (pud_none(*pud)) {
			pmd = (pmd_t *) spp_getpage();
			set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
						_PAGE_USER));
		}
		pmd = pmd_offset(pud, phys);
		BUG_ON(!pmd_none(*pmd));
		set_pmd(pmd, __pmd(phys | pgprot_val(prot)));
	}
}

void __init init_extra_mapping_wb(unsigned long phys, unsigned long size)
{
318
	__init_extra_mapping(phys, size, _PAGE_CACHE_MODE_WB);
319 320 321 322
}

void __init init_extra_mapping_uc(unsigned long phys, unsigned long size)
{
323
	__init_extra_mapping(phys, size, _PAGE_CACHE_MODE_UC);
324 325
}

326
/*
327 328 329
 * The head.S code sets up the kernel high mapping:
 *
 *   from __START_KERNEL_map to __START_KERNEL_map + size (== _end-_text)
330
 *
331
 * phys_base holds the negative offset to the kernel, which is added
332 333 334
 * to the compile time generated pmds. This results in invalid pmds up
 * to the point where we hit the physaddr 0 mapping.
 *
335 336
 * We limit the mappings to the region from _text to _brk_end.  _brk_end
 * is rounded up to the 2MB boundary. This catches the invalid pmds as
337 338 339 340 341
 * well, as they are located before _text:
 */
void __init cleanup_highmap(void)
{
	unsigned long vaddr = __START_KERNEL_map;
342
	unsigned long vaddr_end = __START_KERNEL_map + KERNEL_IMAGE_SIZE;
343
	unsigned long end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1;
344 345
	pmd_t *pmd = level2_kernel_pgt;

346 347 348 349 350 351 352 353
	/*
	 * Native path, max_pfn_mapped is not set yet.
	 * Xen has valid max_pfn_mapped set in
	 *	arch/x86/xen/mmu.c:xen_setup_kernel_pagetable().
	 */
	if (max_pfn_mapped)
		vaddr_end = __START_KERNEL_map + (max_pfn_mapped << PAGE_SHIFT);

354
	for (; vaddr + PMD_SIZE - 1 < vaddr_end; pmd++, vaddr += PMD_SIZE) {
355
		if (pmd_none(*pmd))
356 357 358 359 360 361
			continue;
		if (vaddr < (unsigned long) _text || vaddr > end)
			set_pmd(pmd, __pmd(0));
	}
}

362 363 364 365
/*
 * Create PTE level page table mapping for physical addresses.
 * It returns the last physical address mapped.
 */
366
static unsigned long __meminit
367
phys_pte_init(pte_t *pte_page, unsigned long paddr, unsigned long paddr_end,
368
	      pgprot_t prot)
369
{
370 371 372
	unsigned long pages = 0, paddr_next;
	unsigned long paddr_last = paddr_end;
	pte_t *pte;
373
	int i;
374

375 376
	pte = pte_page + pte_index(paddr);
	i = pte_index(paddr);
377

378 379 380
	for (; i < PTRS_PER_PTE; i++, paddr = paddr_next, pte++) {
		paddr_next = (paddr & PAGE_MASK) + PAGE_SIZE;
		if (paddr >= paddr_end) {
381
			if (!after_bootmem &&
382
			    !e820__mapped_any(paddr & PAGE_MASK, paddr_next,
383
					     E820_TYPE_RAM) &&
384
			    !e820__mapped_any(paddr & PAGE_MASK, paddr_next,
385
					     E820_TYPE_RESERVED_KERN))
386 387
				set_pte(pte, __pte(0));
			continue;
388 389
		}

390 391 392 393 394 395
		/*
		 * We will re-use the existing mapping.
		 * Xen for example has some special requirements, like mapping
		 * pagetable pages as RO. So assume someone who pre-setup
		 * these mappings are more intelligent.
		 */
396
		if (!pte_none(*pte)) {
J
Jan Beulich 已提交
397 398
			if (!after_bootmem)
				pages++;
399
			continue;
400
		}
401 402

		if (0)
403 404
			pr_info("   pte=%p addr=%lx pte=%016lx\n", pte, paddr,
				pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL).pte);
405
		pages++;
406 407
		set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, prot));
		paddr_last = (paddr & PAGE_MASK) + PAGE_SIZE;
408
	}
409

410
	update_page_count(PG_LEVEL_4K, pages);
411

412
	return paddr_last;
413 414
}

415 416 417 418 419
/*
 * Create PMD level page table mapping for physical addresses. The virtual
 * and physical address have to be aligned at this level.
 * It returns the last physical address mapped.
 */
420
static unsigned long __meminit
421
phys_pmd_init(pmd_t *pmd_page, unsigned long paddr, unsigned long paddr_end,
422
	      unsigned long page_size_mask, pgprot_t prot)
423
{
424 425
	unsigned long pages = 0, paddr_next;
	unsigned long paddr_last = paddr_end;
426

427
	int i = pmd_index(paddr);
428

429 430
	for (; i < PTRS_PER_PMD; i++, paddr = paddr_next) {
		pmd_t *pmd = pmd_page + pmd_index(paddr);
431
		pte_t *pte;
432
		pgprot_t new_prot = prot;
433

434 435
		paddr_next = (paddr & PMD_MASK) + PMD_SIZE;
		if (paddr >= paddr_end) {
436
			if (!after_bootmem &&
437
			    !e820__mapped_any(paddr & PMD_MASK, paddr_next,
438
					     E820_TYPE_RAM) &&
439
			    !e820__mapped_any(paddr & PMD_MASK, paddr_next,
440
					     E820_TYPE_RESERVED_KERN))
441 442
				set_pmd(pmd, __pmd(0));
			continue;
443
		}
444

445
		if (!pmd_none(*pmd)) {
446 447
			if (!pmd_large(*pmd)) {
				spin_lock(&init_mm.page_table_lock);
448
				pte = (pte_t *)pmd_page_vaddr(*pmd);
449 450
				paddr_last = phys_pte_init(pte, paddr,
							   paddr_end, prot);
451
				spin_unlock(&init_mm.page_table_lock);
452
				continue;
453
			}
454 455 456 457 458 459 460 461 462 463 464 465
			/*
			 * If we are ok with PG_LEVEL_2M mapping, then we will
			 * use the existing mapping,
			 *
			 * Otherwise, we will split the large page mapping but
			 * use the same existing protection bits except for
			 * large page, so that we don't violate Intel's TLB
			 * Application note (317080) which says, while changing
			 * the page sizes, new and old translations should
			 * not differ with respect to page frame and
			 * attributes.
			 */
466
			if (page_size_mask & (1 << PG_LEVEL_2M)) {
J
Jan Beulich 已提交
467 468
				if (!after_bootmem)
					pages++;
469
				paddr_last = paddr_next;
470
				continue;
471
			}
472
			new_prot = pte_pgprot(pte_clrhuge(*(pte_t *)pmd));
473 474
		}

475
		if (page_size_mask & (1<<PG_LEVEL_2M)) {
476
			pages++;
477
			spin_lock(&init_mm.page_table_lock);
478
			set_pte((pte_t *)pmd,
479
				pfn_pte((paddr & PMD_MASK) >> PAGE_SHIFT,
480
					__pgprot(pgprot_val(prot) | _PAGE_PSE)));
481
			spin_unlock(&init_mm.page_table_lock);
482
			paddr_last = paddr_next;
483
			continue;
484
		}
485

486
		pte = alloc_low_page();
487
		paddr_last = phys_pte_init(pte, paddr, paddr_end, new_prot);
488

489
		spin_lock(&init_mm.page_table_lock);
490
		pmd_populate_kernel(&init_mm, pmd, pte);
491
		spin_unlock(&init_mm.page_table_lock);
492
	}
493
	update_page_count(PG_LEVEL_2M, pages);
494
	return paddr_last;
495 496
}

497 498
/*
 * Create PUD level page table mapping for physical addresses. The virtual
499 500
 * and physical address do not have to be aligned at this level. KASLR can
 * randomize virtual addresses up to this level.
501 502
 * It returns the last physical address mapped.
 */
503
static unsigned long __meminit
504 505
phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
	      unsigned long page_size_mask)
T
Thomas Gleixner 已提交
506
{
507 508
	unsigned long pages = 0, paddr_next;
	unsigned long paddr_last = paddr_end;
509 510
	unsigned long vaddr = (unsigned long)__va(paddr);
	int i = pud_index(vaddr);
511

512
	for (; i < PTRS_PER_PUD; i++, paddr = paddr_next) {
513
		pud_t *pud;
L
Linus Torvalds 已提交
514
		pmd_t *pmd;
515
		pgprot_t prot = PAGE_KERNEL;
L
Linus Torvalds 已提交
516

517 518
		vaddr = (unsigned long)__va(paddr);
		pud = pud_page + pud_index(vaddr);
519
		paddr_next = (paddr & PUD_MASK) + PUD_SIZE;
520

521
		if (paddr >= paddr_end) {
522
			if (!after_bootmem &&
523
			    !e820__mapped_any(paddr & PUD_MASK, paddr_next,
524
					     E820_TYPE_RAM) &&
525
			    !e820__mapped_any(paddr & PUD_MASK, paddr_next,
526
					     E820_TYPE_RESERVED_KERN))
527
				set_pud(pud, __pud(0));
L
Linus Torvalds 已提交
528
			continue;
T
Thomas Gleixner 已提交
529
		}
L
Linus Torvalds 已提交
530

531
		if (!pud_none(*pud)) {
532
			if (!pud_large(*pud)) {
533
				pmd = pmd_offset(pud, 0);
534 535 536 537
				paddr_last = phys_pmd_init(pmd, paddr,
							   paddr_end,
							   page_size_mask,
							   prot);
Y
Yinghai Lu 已提交
538
				__flush_tlb_all();
539 540
				continue;
			}
541 542 543 544 545 546 547 548 549 550 551 552
			/*
			 * If we are ok with PG_LEVEL_1G mapping, then we will
			 * use the existing mapping.
			 *
			 * Otherwise, we will split the gbpage mapping but use
			 * the same existing protection  bits except for large
			 * page, so that we don't violate Intel's TLB
			 * Application note (317080) which says, while changing
			 * the page sizes, new and old translations should
			 * not differ with respect to page frame and
			 * attributes.
			 */
553
			if (page_size_mask & (1 << PG_LEVEL_1G)) {
J
Jan Beulich 已提交
554 555
				if (!after_bootmem)
					pages++;
556
				paddr_last = paddr_next;
557
				continue;
558
			}
559
			prot = pte_pgprot(pte_clrhuge(*(pte_t *)pud));
560 561
		}

562
		if (page_size_mask & (1<<PG_LEVEL_1G)) {
563
			pages++;
564
			spin_lock(&init_mm.page_table_lock);
565
			set_pte((pte_t *)pud,
566
				pfn_pte((paddr & PUD_MASK) >> PAGE_SHIFT,
567
					PAGE_KERNEL_LARGE));
568
			spin_unlock(&init_mm.page_table_lock);
569
			paddr_last = paddr_next;
570 571 572
			continue;
		}

573
		pmd = alloc_low_page();
574 575
		paddr_last = phys_pmd_init(pmd, paddr, paddr_end,
					   page_size_mask, prot);
576 577

		spin_lock(&init_mm.page_table_lock);
578
		pud_populate(&init_mm, pud, pmd);
579
		spin_unlock(&init_mm.page_table_lock);
L
Linus Torvalds 已提交
580
	}
A
Andi Kleen 已提交
581
	__flush_tlb_all();
582

583
	update_page_count(PG_LEVEL_1G, pages);
584

585
	return paddr_last;
T
Thomas Gleixner 已提交
586
}
L
Linus Torvalds 已提交
587

588 589
/*
 * Create page table mapping for the physical memory for specific physical
590
 * addresses. The virtual and physical addresses have to be aligned on PMD level
591 592
 * down. It returns the last physical address mapped.
 */
593
unsigned long __meminit
594 595
kernel_physical_mapping_init(unsigned long paddr_start,
			     unsigned long paddr_end,
596
			     unsigned long page_size_mask)
T
Thomas Gleixner 已提交
597
{
598
	bool pgd_changed = false;
599
	unsigned long vaddr, vaddr_start, vaddr_end, vaddr_next, paddr_last;
L
Linus Torvalds 已提交
600

601 602 603 604
	paddr_last = paddr_end;
	vaddr = (unsigned long)__va(paddr_start);
	vaddr_end = (unsigned long)__va(paddr_end);
	vaddr_start = vaddr;
L
Linus Torvalds 已提交
605

606 607
	for (; vaddr < vaddr_end; vaddr = vaddr_next) {
		pgd_t *pgd = pgd_offset_k(vaddr);
608
		p4d_t *p4d;
609 610
		pud_t *pud;

611
		vaddr_next = (vaddr & PGDIR_MASK) + PGDIR_SIZE;
612

613 614 615 616
		BUILD_BUG_ON(pgd_none(*pgd));
		p4d = p4d_offset(pgd, vaddr);
		if (p4d_val(*p4d)) {
			pud = (pud_t *)p4d_page_vaddr(*p4d);
617 618 619
			paddr_last = phys_pud_init(pud, __pa(vaddr),
						   __pa(vaddr_end),
						   page_size_mask);
620 621 622
			continue;
		}

623
		pud = alloc_low_page();
624 625
		paddr_last = phys_pud_init(pud, __pa(vaddr), __pa(vaddr_end),
					   page_size_mask);
626 627

		spin_lock(&init_mm.page_table_lock);
628
		p4d_populate(&init_mm, p4d, pud);
629
		spin_unlock(&init_mm.page_table_lock);
630
		pgd_changed = true;
T
Thomas Gleixner 已提交
631
	}
632 633

	if (pgd_changed)
634
		sync_global_pgds(vaddr_start, vaddr_end - 1);
635

636
	__flush_tlb_all();
L
Linus Torvalds 已提交
637

638
	return paddr_last;
639
}
640

641
#ifndef CONFIG_NUMA
642
void __init initmem_init(void)
643
{
644
	memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0);
645
}
646
#endif
647

L
Linus Torvalds 已提交
648 649
void __init paging_init(void)
{
650
	sparse_memory_present_with_active_regions(MAX_NUMNODES);
651
	sparse_init();
652 653 654 655 656 657 658

	/*
	 * clear the default setting with node 0
	 * note: don't use nodes_clear here, that is really clearing when
	 *	 numa support is not compiled in, and later node_set_state
	 *	 will not set it back.
	 */
659 660 661
	node_clear_state(0, N_MEMORY);
	if (N_MEMORY != N_NORMAL_MEMORY)
		node_clear_state(0, N_NORMAL_MEMORY);
662

663
	zone_sizes_init();
L
Linus Torvalds 已提交
664 665
}

666 667 668
/*
 * Memory hotplug specific functions
 */
669
#ifdef CONFIG_MEMORY_HOTPLUG
670 671 672 673 674 675 676 677 678 679 680 681 682 683 684
/*
 * After memory hotplug the variables max_pfn, max_low_pfn and high_memory need
 * updating.
 */
static void  update_end_of_memory_vars(u64 start, u64 size)
{
	unsigned long end_pfn = PFN_UP(start + size);

	if (end_pfn > max_pfn) {
		max_pfn = end_pfn;
		max_low_pfn = end_pfn;
		high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1;
	}
}

685 686 687 688
/*
 * Memory is added always to NORMAL zone. This means you will never get
 * additional DMA/DMA32 memory.
 */
689
int arch_add_memory(int nid, u64 start, u64 size, bool for_device)
690
{
691
	struct pglist_data *pgdat = NODE_DATA(nid);
692
	struct zone *zone = pgdat->node_zones +
693
		zone_for_memory(nid, start, size, ZONE_NORMAL, for_device);
694
	unsigned long start_pfn = start >> PAGE_SHIFT;
695 696 697
	unsigned long nr_pages = size >> PAGE_SHIFT;
	int ret;

698
	init_memory_mapping(start, start + size);
699

700
	ret = __add_pages(nid, zone, start_pfn, nr_pages);
701
	WARN_ON_ONCE(ret);
702

703 704 705
	/* update max_pfn, max_low_pfn and high_memory */
	update_end_of_memory_vars(start, size);

706 707
	return ret;
}
708
EXPORT_SYMBOL_GPL(arch_add_memory);
709

710 711 712 713 714 715
#define PAGE_INUSE 0xFD

static void __meminit free_pagetable(struct page *page, int order)
{
	unsigned long magic;
	unsigned int nr_pages = 1 << order;
716 717 718 719 720 721
	struct vmem_altmap *altmap = to_vmem_altmap((unsigned long) page);

	if (altmap) {
		vmem_altmap_free(altmap, nr_pages);
		return;
	}
722 723 724 725 726

	/* bootmem page has reserved flag */
	if (PageReserved(page)) {
		__ClearPageReserved(page);

727
		magic = (unsigned long)page->freelist;
728 729 730 731
		if (magic == SECTION_INFO || magic == MIX_SECTION_INFO) {
			while (nr_pages--)
				put_page_bootmem(page++);
		} else
732 733
			while (nr_pages--)
				free_reserved_page(page++);
734 735 736 737 738 739 740 741 742 743 744
	} else
		free_pages((unsigned long)page_address(page), order);
}

static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd)
{
	pte_t *pte;
	int i;

	for (i = 0; i < PTRS_PER_PTE; i++) {
		pte = pte_start + i;
745
		if (!pte_none(*pte))
746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762
			return;
	}

	/* free a pte talbe */
	free_pagetable(pmd_page(*pmd), 0);
	spin_lock(&init_mm.page_table_lock);
	pmd_clear(pmd);
	spin_unlock(&init_mm.page_table_lock);
}

static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud)
{
	pmd_t *pmd;
	int i;

	for (i = 0; i < PTRS_PER_PMD; i++) {
		pmd = pmd_start + i;
763
		if (!pmd_none(*pmd))
764 765 766 767 768 769 770 771 772 773
			return;
	}

	/* free a pmd talbe */
	free_pagetable(pud_page(*pud), 0);
	spin_lock(&init_mm.page_table_lock);
	pud_clear(pud);
	spin_unlock(&init_mm.page_table_lock);
}

774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791
static void __meminit free_pud_table(pud_t *pud_start, p4d_t *p4d)
{
	pud_t *pud;
	int i;

	for (i = 0; i < PTRS_PER_PUD; i++) {
		pud = pud_start + i;
		if (!pud_none(*pud))
			return;
	}

	/* free a pud talbe */
	free_pagetable(p4d_page(*p4d), 0);
	spin_lock(&init_mm.page_table_lock);
	p4d_clear(p4d);
	spin_unlock(&init_mm.page_table_lock);
}

792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818
static void __meminit
remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end,
		 bool direct)
{
	unsigned long next, pages = 0;
	pte_t *pte;
	void *page_addr;
	phys_addr_t phys_addr;

	pte = pte_start + pte_index(addr);
	for (; addr < end; addr = next, pte++) {
		next = (addr + PAGE_SIZE) & PAGE_MASK;
		if (next > end)
			next = end;

		if (!pte_present(*pte))
			continue;

		/*
		 * We mapped [0,1G) memory as identity mapping when
		 * initializing, in arch/x86/kernel/head_64.S. These
		 * pagetables cannot be removed.
		 */
		phys_addr = pte_val(*pte) + (addr & PAGE_MASK);
		if (phys_addr < (phys_addr_t)0x40000000)
			return;

819
		if (PAGE_ALIGNED(addr) && PAGE_ALIGNED(next)) {
820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964
			/*
			 * Do not free direct mapping pages since they were
			 * freed when offlining, or simplely not in use.
			 */
			if (!direct)
				free_pagetable(pte_page(*pte), 0);

			spin_lock(&init_mm.page_table_lock);
			pte_clear(&init_mm, addr, pte);
			spin_unlock(&init_mm.page_table_lock);

			/* For non-direct mapping, pages means nothing. */
			pages++;
		} else {
			/*
			 * If we are here, we are freeing vmemmap pages since
			 * direct mapped memory ranges to be freed are aligned.
			 *
			 * If we are not removing the whole page, it means
			 * other page structs in this page are being used and
			 * we canot remove them. So fill the unused page_structs
			 * with 0xFD, and remove the page when it is wholly
			 * filled with 0xFD.
			 */
			memset((void *)addr, PAGE_INUSE, next - addr);

			page_addr = page_address(pte_page(*pte));
			if (!memchr_inv(page_addr, PAGE_INUSE, PAGE_SIZE)) {
				free_pagetable(pte_page(*pte), 0);

				spin_lock(&init_mm.page_table_lock);
				pte_clear(&init_mm, addr, pte);
				spin_unlock(&init_mm.page_table_lock);
			}
		}
	}

	/* Call free_pte_table() in remove_pmd_table(). */
	flush_tlb_all();
	if (direct)
		update_page_count(PG_LEVEL_4K, -pages);
}

static void __meminit
remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end,
		 bool direct)
{
	unsigned long next, pages = 0;
	pte_t *pte_base;
	pmd_t *pmd;
	void *page_addr;

	pmd = pmd_start + pmd_index(addr);
	for (; addr < end; addr = next, pmd++) {
		next = pmd_addr_end(addr, end);

		if (!pmd_present(*pmd))
			continue;

		if (pmd_large(*pmd)) {
			if (IS_ALIGNED(addr, PMD_SIZE) &&
			    IS_ALIGNED(next, PMD_SIZE)) {
				if (!direct)
					free_pagetable(pmd_page(*pmd),
						       get_order(PMD_SIZE));

				spin_lock(&init_mm.page_table_lock);
				pmd_clear(pmd);
				spin_unlock(&init_mm.page_table_lock);
				pages++;
			} else {
				/* If here, we are freeing vmemmap pages. */
				memset((void *)addr, PAGE_INUSE, next - addr);

				page_addr = page_address(pmd_page(*pmd));
				if (!memchr_inv(page_addr, PAGE_INUSE,
						PMD_SIZE)) {
					free_pagetable(pmd_page(*pmd),
						       get_order(PMD_SIZE));

					spin_lock(&init_mm.page_table_lock);
					pmd_clear(pmd);
					spin_unlock(&init_mm.page_table_lock);
				}
			}

			continue;
		}

		pte_base = (pte_t *)pmd_page_vaddr(*pmd);
		remove_pte_table(pte_base, addr, next, direct);
		free_pte_table(pte_base, pmd);
	}

	/* Call free_pmd_table() in remove_pud_table(). */
	if (direct)
		update_page_count(PG_LEVEL_2M, -pages);
}

static void __meminit
remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end,
		 bool direct)
{
	unsigned long next, pages = 0;
	pmd_t *pmd_base;
	pud_t *pud;
	void *page_addr;

	pud = pud_start + pud_index(addr);
	for (; addr < end; addr = next, pud++) {
		next = pud_addr_end(addr, end);

		if (!pud_present(*pud))
			continue;

		if (pud_large(*pud)) {
			if (IS_ALIGNED(addr, PUD_SIZE) &&
			    IS_ALIGNED(next, PUD_SIZE)) {
				if (!direct)
					free_pagetable(pud_page(*pud),
						       get_order(PUD_SIZE));

				spin_lock(&init_mm.page_table_lock);
				pud_clear(pud);
				spin_unlock(&init_mm.page_table_lock);
				pages++;
			} else {
				/* If here, we are freeing vmemmap pages. */
				memset((void *)addr, PAGE_INUSE, next - addr);

				page_addr = page_address(pud_page(*pud));
				if (!memchr_inv(page_addr, PAGE_INUSE,
						PUD_SIZE)) {
					free_pagetable(pud_page(*pud),
						       get_order(PUD_SIZE));

					spin_lock(&init_mm.page_table_lock);
					pud_clear(pud);
					spin_unlock(&init_mm.page_table_lock);
				}
			}

			continue;
		}

965
		pmd_base = pmd_offset(pud, 0);
966 967 968 969 970 971 972 973
		remove_pmd_table(pmd_base, addr, next, direct);
		free_pmd_table(pmd_base, pud);
	}

	if (direct)
		update_page_count(PG_LEVEL_1G, -pages);
}

974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990
static void __meminit
remove_p4d_table(p4d_t *p4d_start, unsigned long addr, unsigned long end,
		 bool direct)
{
	unsigned long next, pages = 0;
	pud_t *pud_base;
	p4d_t *p4d;

	p4d = p4d_start + p4d_index(addr);
	for (; addr < end; addr = next, p4d++) {
		next = p4d_addr_end(addr, end);

		if (!p4d_present(*p4d))
			continue;

		BUILD_BUG_ON(p4d_large(*p4d));

991
		pud_base = pud_offset(p4d, 0);
992 993 994 995 996 997 998 999
		remove_pud_table(pud_base, addr, next, direct);
		free_pud_table(pud_base, p4d);
	}

	if (direct)
		update_page_count(PG_LEVEL_512G, -pages);
}

1000 1001 1002 1003 1004
/* start and end are both virtual address. */
static void __meminit
remove_pagetable(unsigned long start, unsigned long end, bool direct)
{
	unsigned long next;
1005
	unsigned long addr;
1006
	pgd_t *pgd;
1007
	p4d_t *p4d;
1008

1009 1010
	for (addr = start; addr < end; addr = next) {
		next = pgd_addr_end(addr, end);
1011

1012
		pgd = pgd_offset_k(addr);
1013 1014 1015
		if (!pgd_present(*pgd))
			continue;

1016
		p4d = p4d_offset(pgd, 0);
1017
		remove_p4d_table(p4d, addr, next, direct);
1018 1019 1020 1021 1022
	}

	flush_tlb_all();
}

1023
void __ref vmemmap_free(unsigned long start, unsigned long end)
1024 1025 1026 1027
{
	remove_pagetable(start, end, false);
}

1028
#ifdef CONFIG_MEMORY_HOTREMOVE
1029 1030 1031 1032 1033 1034 1035 1036 1037
static void __meminit
kernel_physical_mapping_remove(unsigned long start, unsigned long end)
{
	start = (unsigned long)__va(start);
	end = (unsigned long)__va(end);

	remove_pagetable(start, end, true);
}

1038 1039 1040 1041
int __ref arch_remove_memory(u64 start, u64 size)
{
	unsigned long start_pfn = start >> PAGE_SHIFT;
	unsigned long nr_pages = size >> PAGE_SHIFT;
1042 1043
	struct page *page = pfn_to_page(start_pfn);
	struct vmem_altmap *altmap;
1044 1045 1046
	struct zone *zone;
	int ret;

1047 1048 1049 1050 1051
	/* With altmap the first mapped page is offset from @start */
	altmap = to_vmem_altmap((unsigned long) page);
	if (altmap)
		page += vmem_altmap_offset(altmap);
	zone = page_zone(page);
1052 1053
	ret = __remove_pages(zone, start_pfn, nr_pages);
	WARN_ON_ONCE(ret);
1054
	kernel_physical_mapping_remove(start, start + size);
1055 1056 1057 1058

	return ret;
}
#endif
1059 1060
#endif /* CONFIG_MEMORY_HOTPLUG */

1061
static struct kcore_list kcore_vsyscall;
L
Linus Torvalds 已提交
1062

Y
Yinghai Lu 已提交
1063 1064 1065 1066 1067 1068 1069 1070 1071 1072
static void __init register_page_bootmem_info(void)
{
#ifdef CONFIG_NUMA
	int i;

	for_each_online_node(i)
		register_page_bootmem_info_node(NODE_DATA(i));
#endif
}

L
Linus Torvalds 已提交
1073 1074
void __init mem_init(void)
{
1075
	pci_iommu_alloc();
L
Linus Torvalds 已提交
1076

1077
	/* clear_bss() already clear the empty_zero_page */
L
Linus Torvalds 已提交
1078

Y
Yinghai Lu 已提交
1079
	register_page_bootmem_info();
1080 1081

	/* this will put all memory onto the freelists */
1082
	free_all_bootmem();
L
Linus Torvalds 已提交
1083 1084 1085
	after_bootmem = 1;

	/* Register memory areas for /proc/kcore */
1086 1087
	kclist_add(&kcore_vsyscall, (void *)VSYSCALL_ADDR,
			 PAGE_SIZE, KCORE_OTHER);
L
Linus Torvalds 已提交
1088

1089
	mem_init_print_info(NULL);
L
Linus Torvalds 已提交
1090 1091
}

1092
int kernel_set_to_readonly;
1093 1094 1095

void set_kernel_text_rw(void)
{
1096
	unsigned long start = PFN_ALIGN(_text);
1097
	unsigned long end = PFN_ALIGN(__stop___ex_table);
1098 1099 1100 1101 1102 1103 1104

	if (!kernel_set_to_readonly)
		return;

	pr_debug("Set kernel text: %lx - %lx for read write\n",
		 start, end);

1105 1106 1107 1108 1109
	/*
	 * Make the kernel identity mapping for text RW. Kernel text
	 * mapping will always be RO. Refer to the comment in
	 * static_protections() in pageattr.c
	 */
1110 1111 1112 1113 1114
	set_memory_rw(start, (end - start) >> PAGE_SHIFT);
}

void set_kernel_text_ro(void)
{
1115
	unsigned long start = PFN_ALIGN(_text);
1116
	unsigned long end = PFN_ALIGN(__stop___ex_table);
1117 1118 1119 1120 1121 1122 1123

	if (!kernel_set_to_readonly)
		return;

	pr_debug("Set kernel text: %lx - %lx for read only\n",
		 start, end);

1124 1125 1126
	/*
	 * Set the kernel identity mapping for text RO.
	 */
1127 1128 1129
	set_memory_ro(start, (end - start) >> PAGE_SHIFT);
}

1130 1131
void mark_rodata_ro(void)
{
1132
	unsigned long start = PFN_ALIGN(_text);
1133
	unsigned long rodata_start = PFN_ALIGN(__start_rodata);
1134
	unsigned long end = (unsigned long) &__end_rodata_hpage_align;
1135 1136
	unsigned long text_end = PFN_ALIGN(&__stop___ex_table);
	unsigned long rodata_end = PFN_ALIGN(&__end_rodata);
1137
	unsigned long all_end;
1138

1139
	printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
1140
	       (end - start) >> 10);
1141 1142
	set_memory_ro(start, (end - start) >> PAGE_SHIFT);

1143 1144
	kernel_set_to_readonly = 1;

1145
	/*
1146 1147
	 * The rodata/data/bss/brk section (but not the kernel text!)
	 * should also be not-executable.
1148 1149 1150 1151 1152 1153 1154 1155
	 *
	 * We align all_end to PMD_SIZE because the existing mapping
	 * is a full PMD. If we would align _brk_end to PAGE_SIZE we
	 * split the PMD and the reminder between _brk_end and the end
	 * of the PMD will remain mapped executable.
	 *
	 * Any PMD which was setup after the one which covers _brk_end
	 * has been zapped already via cleanup_highmem().
1156
	 */
1157
	all_end = roundup((unsigned long)_brk_end, PMD_SIZE);
1158
	set_memory_nx(text_end, (all_end - text_end) >> PAGE_SHIFT);
1159

1160
#ifdef CONFIG_CPA_DEBUG
1161
	printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, end);
1162
	set_memory_rw(start, (end-start) >> PAGE_SHIFT);
1163

1164
	printk(KERN_INFO "Testing CPA: again\n");
1165
	set_memory_ro(start, (end-start) >> PAGE_SHIFT);
1166
#endif
1167

1168
	free_init_pages("unused kernel",
1169 1170
			(unsigned long) __va(__pa_symbol(text_end)),
			(unsigned long) __va(__pa_symbol(rodata_start)));
1171
	free_init_pages("unused kernel",
1172 1173
			(unsigned long) __va(__pa_symbol(rodata_end)),
			(unsigned long) __va(__pa_symbol(_sdata)));
S
Stephen Smalley 已提交
1174 1175

	debug_checkwx();
1176
}
1177

T
Thomas Gleixner 已提交
1178 1179
int kern_addr_valid(unsigned long addr)
{
L
Linus Torvalds 已提交
1180
	unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
T
Thomas Gleixner 已提交
1181
	pgd_t *pgd;
1182
	p4d_t *p4d;
T
Thomas Gleixner 已提交
1183 1184 1185
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;
L
Linus Torvalds 已提交
1186 1187

	if (above != 0 && above != -1UL)
T
Thomas Gleixner 已提交
1188 1189
		return 0;

L
Linus Torvalds 已提交
1190 1191 1192 1193
	pgd = pgd_offset_k(addr);
	if (pgd_none(*pgd))
		return 0;

1194 1195 1196 1197 1198
	p4d = p4d_offset(pgd, addr);
	if (p4d_none(*p4d))
		return 0;

	pud = pud_offset(p4d, addr);
L
Linus Torvalds 已提交
1199
	if (pud_none(*pud))
T
Thomas Gleixner 已提交
1200
		return 0;
L
Linus Torvalds 已提交
1201

1202 1203 1204
	if (pud_large(*pud))
		return pfn_valid(pud_pfn(*pud));

L
Linus Torvalds 已提交
1205 1206 1207
	pmd = pmd_offset(pud, addr);
	if (pmd_none(*pmd))
		return 0;
T
Thomas Gleixner 已提交
1208

L
Linus Torvalds 已提交
1209 1210 1211 1212 1213 1214
	if (pmd_large(*pmd))
		return pfn_valid(pmd_pfn(*pmd));

	pte = pte_offset_kernel(pmd, addr);
	if (pte_none(*pte))
		return 0;
T
Thomas Gleixner 已提交
1215

L
Linus Torvalds 已提交
1216 1217 1218
	return pfn_valid(pte_pfn(*pte));
}

1219
static unsigned long probe_memory_block_size(void)
1220
{
1221
	unsigned long bz = MIN_MEMORY_BLOCK_SIZE;
1222

1223 1224 1225
	/* if system is UV or has 64GB of RAM or more, use large blocks */
	if (is_uv_system() || ((max_pfn << PAGE_SHIFT) >= (64UL << 30)))
		bz = 2UL << 30; /* 2GB */
1226

1227
	pr_info("x86/mm: Memory block size: %ldMB\n", bz >> 20);
1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240

	return bz;
}

static unsigned long memory_block_size_probed;
unsigned long memory_block_size_bytes(void)
{
	if (!memory_block_size_probed)
		memory_block_size_probed = probe_memory_block_size();

	return memory_block_size_probed;
}

1241 1242 1243 1244
#ifdef CONFIG_SPARSEMEM_VMEMMAP
/*
 * Initialise the sparsemem vmemmap using huge-pages at the PMD level.
 */
1245 1246 1247 1248
static long __meminitdata addr_start, addr_end;
static void __meminitdata *p_start, *p_end;
static int __meminitdata node_start;

1249
static int __meminit vmemmap_populate_hugepages(unsigned long start,
1250
		unsigned long end, int node, struct vmem_altmap *altmap)
1251
{
1252
	unsigned long addr;
1253 1254
	unsigned long next;
	pgd_t *pgd;
1255
	p4d_t *p4d;
1256 1257 1258
	pud_t *pud;
	pmd_t *pmd;

1259
	for (addr = start; addr < end; addr = next) {
1260
		next = pmd_addr_end(addr, end);
1261 1262 1263 1264

		pgd = vmemmap_pgd_populate(addr, node);
		if (!pgd)
			return -ENOMEM;
T
Thomas Gleixner 已提交
1265

1266 1267 1268 1269 1270
		p4d = vmemmap_p4d_populate(pgd, addr, node);
		if (!p4d)
			return -ENOMEM;

		pud = vmemmap_pud_populate(p4d, addr, node);
1271 1272 1273
		if (!pud)
			return -ENOMEM;

1274 1275 1276
		pmd = pmd_offset(pud, addr);
		if (pmd_none(*pmd)) {
			void *p;
T
Thomas Gleixner 已提交
1277

1278
			p = __vmemmap_alloc_block_buf(PMD_SIZE, node, altmap);
1279 1280 1281 1282 1283 1284 1285 1286 1287 1288
			if (p) {
				pte_t entry;

				entry = pfn_pte(__pa(p) >> PAGE_SHIFT,
						PAGE_KERNEL_LARGE);
				set_pmd(pmd, __pmd(pte_val(entry)));

				/* check to see if we have contiguous blocks */
				if (p_end != p || node_start != node) {
					if (p_start)
D
Dan Williams 已提交
1289
						pr_debug(" [%lx-%lx] PMD -> [%p-%p] on node %d\n",
1290 1291 1292 1293 1294
						       addr_start, addr_end-1, p_start, p_end-1, node_start);
					addr_start = addr;
					node_start = node;
					p_start = p;
				}
1295

1296 1297 1298
				addr_end = addr + PMD_SIZE;
				p_end = p + PMD_SIZE;
				continue;
1299 1300
			} else if (altmap)
				return -ENOMEM; /* no fallback */
1301
		} else if (pmd_large(*pmd)) {
1302
			vmemmap_verify((pte_t *)pmd, node, addr, next);
1303 1304 1305 1306 1307
			continue;
		}
		pr_warn_once("vmemmap: falling back to regular page backing\n");
		if (vmemmap_populate_basepages(addr, next, node))
			return -ENOMEM;
1308 1309 1310
	}
	return 0;
}
1311

1312 1313
int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
{
1314
	struct vmem_altmap *altmap = to_vmem_altmap(start);
1315 1316
	int err;

1317
	if (boot_cpu_has(X86_FEATURE_PSE))
1318 1319 1320 1321 1322 1323
		err = vmemmap_populate_hugepages(start, end, node, altmap);
	else if (altmap) {
		pr_err_once("%s: no cpu support for altmap allocations\n",
				__func__);
		err = -ENOMEM;
	} else
1324 1325
		err = vmemmap_populate_basepages(start, end, node);
	if (!err)
1326
		sync_global_pgds(start, end - 1);
1327 1328 1329
	return err;
}

1330 1331 1332 1333 1334 1335 1336 1337
#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HAVE_BOOTMEM_INFO_NODE)
void register_page_bootmem_memmap(unsigned long section_nr,
				  struct page *start_page, unsigned long size)
{
	unsigned long addr = (unsigned long)start_page;
	unsigned long end = (unsigned long)(start_page + size);
	unsigned long next;
	pgd_t *pgd;
1338
	p4d_t *p4d;
1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353
	pud_t *pud;
	pmd_t *pmd;
	unsigned int nr_pages;
	struct page *page;

	for (; addr < end; addr = next) {
		pte_t *pte = NULL;

		pgd = pgd_offset_k(addr);
		if (pgd_none(*pgd)) {
			next = (addr + PAGE_SIZE) & PAGE_MASK;
			continue;
		}
		get_page_bootmem(section_nr, pgd_page(*pgd), MIX_SECTION_INFO);

1354 1355 1356 1357 1358 1359 1360 1361
		p4d = p4d_offset(pgd, addr);
		if (p4d_none(*p4d)) {
			next = (addr + PAGE_SIZE) & PAGE_MASK;
			continue;
		}
		get_page_bootmem(section_nr, p4d_page(*p4d), MIX_SECTION_INFO);

		pud = pud_offset(p4d, addr);
1362 1363 1364 1365 1366 1367
		if (pud_none(*pud)) {
			next = (addr + PAGE_SIZE) & PAGE_MASK;
			continue;
		}
		get_page_bootmem(section_nr, pud_page(*pud), MIX_SECTION_INFO);

1368
		if (!boot_cpu_has(X86_FEATURE_PSE)) {
1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397
			next = (addr + PAGE_SIZE) & PAGE_MASK;
			pmd = pmd_offset(pud, addr);
			if (pmd_none(*pmd))
				continue;
			get_page_bootmem(section_nr, pmd_page(*pmd),
					 MIX_SECTION_INFO);

			pte = pte_offset_kernel(pmd, addr);
			if (pte_none(*pte))
				continue;
			get_page_bootmem(section_nr, pte_page(*pte),
					 SECTION_INFO);
		} else {
			next = pmd_addr_end(addr, end);

			pmd = pmd_offset(pud, addr);
			if (pmd_none(*pmd))
				continue;

			nr_pages = 1 << (get_order(PMD_SIZE));
			page = pmd_page(*pmd);
			while (nr_pages--)
				get_page_bootmem(section_nr, page++,
						 SECTION_INFO);
		}
	}
}
#endif

1398 1399 1400
void __meminit vmemmap_populate_print_last(void)
{
	if (p_start) {
D
Dan Williams 已提交
1401
		pr_debug(" [%lx-%lx] PMD -> [%p-%p] on node %d\n",
1402 1403 1404 1405 1406 1407
			addr_start, addr_end-1, p_start, p_end-1, node_start);
		p_start = NULL;
		p_end = NULL;
		node_start = 0;
	}
}
1408
#endif