sparse-vmemmap.c 16.4 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2 3 4
/*
 * Virtual Memory Map support
 *
C
Christoph Lameter 已提交
5
 * (C) 2007 sgi. Christoph Lameter.
6 7 8 9 10 11 12
 *
 * Virtual memory maps allow VM primitives pfn_to_page, page_to_pfn,
 * virt_to_page, page_address() to be implemented as a base offset
 * calculation without memory access.
 *
 * However, virtual mappings need a page table and TLBs. Many Linux
 * architectures already map their physical space using 1-1 mappings
13
 * via TLBs. For those arches the virtual memory map is essentially
14 15 16 17
 * for free if we use the same page size as the 1-1 mappings. In that
 * case the overhead consists of a few additional pages that are
 * allocated to create a view of memory for vmemmap.
 *
18 19
 * The architecture is expected to provide a vmemmap_populate() function
 * to instantiate the mapping.
20 21 22
 */
#include <linux/mm.h>
#include <linux/mmzone.h>
23
#include <linux/memblock.h>
24
#include <linux/memremap.h>
25
#include <linux/highmem.h>
26
#include <linux/slab.h>
27 28
#include <linux/spinlock.h>
#include <linux/vmalloc.h>
29
#include <linux/sched.h>
30 31 32
#include <linux/pgtable.h>
#include <linux/bootmem_info.h>

33 34
#include <asm/dma.h>
#include <asm/pgalloc.h>
35 36 37 38 39 40
#include <asm/tlbflush.h>

/**
 * struct vmemmap_remap_walk - walk vmemmap page table
 *
 * @remap_pte:		called for each lowest-level entry (PTE).
41
 * @nr_walked:		the number of walked pte.
42 43
 * @reuse_page:		the page which is reused for the tail vmemmap pages.
 * @reuse_addr:		the virtual address of the @reuse_page page.
44 45
 * @vmemmap_pages:	the list head of the vmemmap pages that can be freed
 *			or is mapped from.
46 47 48 49
 */
struct vmemmap_remap_walk {
	void (*remap_pte)(pte_t *pte, unsigned long addr,
			  struct vmemmap_remap_walk *walk);
50
	unsigned long nr_walked;
51 52 53 54 55
	struct page *reuse_page;
	unsigned long reuse_addr;
	struct list_head *vmemmap_pages;
};

56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78
static int split_vmemmap_huge_pmd(pmd_t *pmd, unsigned long start,
				  struct vmemmap_remap_walk *walk)
{
	pmd_t __pmd;
	int i;
	unsigned long addr = start;
	struct page *page = pmd_page(*pmd);
	pte_t *pgtable = pte_alloc_one_kernel(&init_mm);

	if (!pgtable)
		return -ENOMEM;

	pmd_populate_kernel(&init_mm, &__pmd, pgtable);

	for (i = 0; i < PMD_SIZE / PAGE_SIZE; i++, addr += PAGE_SIZE) {
		pte_t entry, *pte;
		pgprot_t pgprot = PAGE_KERNEL;

		entry = mk_pte(page + i, pgprot);
		pte = pte_offset_kernel(&__pmd, addr);
		set_pte_at(&init_mm, addr, pte, entry);
	}

Q
Qi Zheng 已提交
79
	/* Make pte visible before pmd. See comment in pmd_install(). */
80 81 82 83 84 85 86 87
	smp_wmb();
	pmd_populate_kernel(&init_mm, pmd, pgtable);

	flush_tlb_kernel_range(start, start + PMD_SIZE);

	return 0;
}

88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105
static void vmemmap_pte_range(pmd_t *pmd, unsigned long addr,
			      unsigned long end,
			      struct vmemmap_remap_walk *walk)
{
	pte_t *pte = pte_offset_kernel(pmd, addr);

	/*
	 * The reuse_page is found 'first' in table walk before we start
	 * remapping (which is calling @walk->remap_pte).
	 */
	if (!walk->reuse_page) {
		walk->reuse_page = pte_page(*pte);
		/*
		 * Because the reuse address is part of the range that we are
		 * walking, skip the reuse address range.
		 */
		addr += PAGE_SIZE;
		pte++;
106
		walk->nr_walked++;
107 108
	}

109
	for (; addr != end; addr += PAGE_SIZE, pte++) {
110
		walk->remap_pte(pte, addr, walk);
111 112
		walk->nr_walked++;
	}
113 114
}

115 116 117
static int vmemmap_pmd_range(pud_t *pud, unsigned long addr,
			     unsigned long end,
			     struct vmemmap_remap_walk *walk)
118 119 120 121 122 123
{
	pmd_t *pmd;
	unsigned long next;

	pmd = pmd_offset(pud, addr);
	do {
124 125
		if (pmd_leaf(*pmd)) {
			int ret;
126

127 128 129 130
			ret = split_vmemmap_huge_pmd(pmd, addr & PMD_MASK, walk);
			if (ret)
				return ret;
		}
131 132 133
		next = pmd_addr_end(addr, end);
		vmemmap_pte_range(pmd, addr, next, walk);
	} while (pmd++, addr = next, addr != end);
134 135

	return 0;
136 137
}

138 139 140
static int vmemmap_pud_range(p4d_t *p4d, unsigned long addr,
			     unsigned long end,
			     struct vmemmap_remap_walk *walk)
141 142 143 144 145 146
{
	pud_t *pud;
	unsigned long next;

	pud = pud_offset(p4d, addr);
	do {
147 148
		int ret;

149
		next = pud_addr_end(addr, end);
150 151 152
		ret = vmemmap_pmd_range(pud, addr, next, walk);
		if (ret)
			return ret;
153
	} while (pud++, addr = next, addr != end);
154 155

	return 0;
156 157
}

158 159 160
static int vmemmap_p4d_range(pgd_t *pgd, unsigned long addr,
			     unsigned long end,
			     struct vmemmap_remap_walk *walk)
161 162 163 164 165 166
{
	p4d_t *p4d;
	unsigned long next;

	p4d = p4d_offset(pgd, addr);
	do {
167 168
		int ret;

169
		next = p4d_addr_end(addr, end);
170 171 172
		ret = vmemmap_pud_range(p4d, addr, next, walk);
		if (ret)
			return ret;
173
	} while (p4d++, addr = next, addr != end);
174 175

	return 0;
176 177
}

178 179
static int vmemmap_remap_range(unsigned long start, unsigned long end,
			       struct vmemmap_remap_walk *walk)
180 181 182 183 184 185 186 187 188 189
{
	unsigned long addr = start;
	unsigned long next;
	pgd_t *pgd;

	VM_BUG_ON(!IS_ALIGNED(start, PAGE_SIZE));
	VM_BUG_ON(!IS_ALIGNED(end, PAGE_SIZE));

	pgd = pgd_offset_k(addr);
	do {
190 191
		int ret;

192
		next = pgd_addr_end(addr, end);
193 194 195
		ret = vmemmap_p4d_range(pgd, addr, next, walk);
		if (ret)
			return ret;
196 197 198 199 200 201 202 203
	} while (pgd++, addr = next, addr != end);

	/*
	 * We only change the mapping of the vmemmap virtual address range
	 * [@start + PAGE_SIZE, end), so we only need to flush the TLB which
	 * belongs to the range.
	 */
	flush_tlb_kernel_range(start + PAGE_SIZE, end);
204 205

	return 0;
206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243
}

/*
 * Free a vmemmap page. A vmemmap page can be allocated from the memblock
 * allocator or buddy allocator. If the PG_reserved flag is set, it means
 * that it allocated from the memblock allocator, just free it via the
 * free_bootmem_page(). Otherwise, use __free_page().
 */
static inline void free_vmemmap_page(struct page *page)
{
	if (PageReserved(page))
		free_bootmem_page(page);
	else
		__free_page(page);
}

/* Free a list of the vmemmap pages */
static void free_vmemmap_page_list(struct list_head *list)
{
	struct page *page, *next;

	list_for_each_entry_safe(page, next, list, lru) {
		list_del(&page->lru);
		free_vmemmap_page(page);
	}
}

static void vmemmap_remap_pte(pte_t *pte, unsigned long addr,
			      struct vmemmap_remap_walk *walk)
{
	/*
	 * Remap the tail pages as read-only to catch illegal write operation
	 * to the tail pages.
	 */
	pgprot_t pgprot = PAGE_KERNEL_RO;
	pte_t entry = mk_pte(walk->reuse_page, pgprot);
	struct page *page = pte_page(*pte);

244
	list_add_tail(&page->lru, walk->vmemmap_pages);
245 246 247
	set_pte_at(&init_mm, addr, pte, entry);
}

248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267
/*
 * How many struct page structs need to be reset. When we reuse the head
 * struct page, the special metadata (e.g. page->flags or page->mapping)
 * cannot copy to the tail struct page structs. The invalid value will be
 * checked in the free_tail_pages_check(). In order to avoid the message
 * of "corrupted mapping in tail page". We need to reset at least 3 (one
 * head struct page struct and two tail struct page structs) struct page
 * structs.
 */
#define NR_RESET_STRUCT_PAGE		3

static inline void reset_struct_pages(struct page *start)
{
	int i;
	struct page *from = start + NR_RESET_STRUCT_PAGE;

	for (i = 0; i < NR_RESET_STRUCT_PAGE; i++)
		memcpy(start + i, from, sizeof(*from));
}

268 269 270 271 272 273 274 275 276 277 278 279 280
static void vmemmap_restore_pte(pte_t *pte, unsigned long addr,
				struct vmemmap_remap_walk *walk)
{
	pgprot_t pgprot = PAGE_KERNEL;
	struct page *page;
	void *to;

	BUG_ON(pte_page(*pte) != walk->reuse_page);

	page = list_first_entry(walk->vmemmap_pages, struct page, lru);
	list_del(&page->lru);
	to = page_to_virt(page);
	copy_page(to, (void *)walk->reuse_addr);
281
	reset_struct_pages(to);
282 283 284 285

	set_pte_at(&init_mm, addr, pte, mk_pte(page, pgprot));
}

286 287 288 289 290 291 292 293 294 295
/**
 * vmemmap_remap_free - remap the vmemmap virtual address range [@start, @end)
 *			to the page which @reuse is mapped to, then free vmemmap
 *			which the range are mapped to.
 * @start:	start address of the vmemmap virtual address range that we want
 *		to remap.
 * @end:	end address of the vmemmap virtual address range that we want to
 *		remap.
 * @reuse:	reuse address.
 *
296
 * Return: %0 on success, negative error code otherwise.
297
 */
298 299
int vmemmap_remap_free(unsigned long start, unsigned long end,
		       unsigned long reuse)
300
{
301
	int ret;
302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323
	LIST_HEAD(vmemmap_pages);
	struct vmemmap_remap_walk walk = {
		.remap_pte	= vmemmap_remap_pte,
		.reuse_addr	= reuse,
		.vmemmap_pages	= &vmemmap_pages,
	};

	/*
	 * In order to make remapping routine most efficient for the huge pages,
	 * the routine of vmemmap page table walking has the following rules
	 * (see more details from the vmemmap_pte_range()):
	 *
	 * - The range [@start, @end) and the range [@reuse, @reuse + PAGE_SIZE)
	 *   should be continuous.
	 * - The @reuse address is part of the range [@reuse, @end) that we are
	 *   walking which is passed to vmemmap_remap_range().
	 * - The @reuse address is the first in the complete range.
	 *
	 * So we need to make sure that @start and @reuse meet the above rules.
	 */
	BUG_ON(start - reuse != PAGE_SIZE);

324 325 326
	mmap_write_lock(&init_mm);
	ret = vmemmap_remap_range(reuse, end, &walk);
	mmap_write_downgrade(&init_mm);
327

328 329 330 331 332 333 334 335 336 337 338 339 340
	if (ret && walk.nr_walked) {
		end = reuse + walk.nr_walked * PAGE_SIZE;
		/*
		 * vmemmap_pages contains pages from the previous
		 * vmemmap_remap_range call which failed.  These
		 * are pages which were removed from the vmemmap.
		 * They will be restored in the following call.
		 */
		walk = (struct vmemmap_remap_walk) {
			.remap_pte	= vmemmap_restore_pte,
			.reuse_addr	= reuse,
			.vmemmap_pages	= &vmemmap_pages,
		};
341

342 343 344
		vmemmap_remap_range(reuse, end, &walk);
	}
	mmap_read_unlock(&init_mm);
345

346
	free_vmemmap_page_list(&vmemmap_pages);
347

348
	return ret;
349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381
}

static int alloc_vmemmap_page_list(unsigned long start, unsigned long end,
				   gfp_t gfp_mask, struct list_head *list)
{
	unsigned long nr_pages = (end - start) >> PAGE_SHIFT;
	int nid = page_to_nid((struct page *)start);
	struct page *page, *next;

	while (nr_pages--) {
		page = alloc_pages_node(nid, gfp_mask, 0);
		if (!page)
			goto out;
		list_add_tail(&page->lru, list);
	}

	return 0;
out:
	list_for_each_entry_safe(page, next, list, lru)
		__free_pages(page, 0);
	return -ENOMEM;
}

/**
 * vmemmap_remap_alloc - remap the vmemmap virtual address range [@start, end)
 *			 to the page which is from the @vmemmap_pages
 *			 respectively.
 * @start:	start address of the vmemmap virtual address range that we want
 *		to remap.
 * @end:	end address of the vmemmap virtual address range that we want to
 *		remap.
 * @reuse:	reuse address.
 * @gfp_mask:	GFP flag for allocating vmemmap pages.
382 383
 *
 * Return: %0 on success, negative error code otherwise.
384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400
 */
int vmemmap_remap_alloc(unsigned long start, unsigned long end,
			unsigned long reuse, gfp_t gfp_mask)
{
	LIST_HEAD(vmemmap_pages);
	struct vmemmap_remap_walk walk = {
		.remap_pte	= vmemmap_restore_pte,
		.reuse_addr	= reuse,
		.vmemmap_pages	= &vmemmap_pages,
	};

	/* See the comment in the vmemmap_remap_free(). */
	BUG_ON(start - reuse != PAGE_SIZE);

	if (alloc_vmemmap_page_list(start, end, gfp_mask, &vmemmap_pages))
		return -ENOMEM;

401
	mmap_read_lock(&init_mm);
402
	vmemmap_remap_range(reuse, end, &walk);
403
	mmap_read_unlock(&init_mm);
404 405 406 407

	return 0;
}

408 409 410 411 412
/*
 * Allocate a block of memory to be used to back the virtual memory map
 * or to back the page tables that are used to create the mapping.
 * Uses the main allocators if they are available, else bootmem.
 */
413

414
static void * __ref __earlyonly_bootmem_alloc(int node,
415 416 417 418
				unsigned long size,
				unsigned long align,
				unsigned long goal)
{
419
	return memblock_alloc_try_nid_raw(size, align, goal,
420
					       MEMBLOCK_ALLOC_ACCESSIBLE, node);
421 422
}

423 424 425 426
void * __meminit vmemmap_alloc_block(unsigned long size, int node)
{
	/* If the main allocator is up use that, fallback to bootmem. */
	if (slab_is_available()) {
427 428 429
		gfp_t gfp_mask = GFP_KERNEL|__GFP_RETRY_MAYFAIL|__GFP_NOWARN;
		int order = get_order(size);
		static bool warned;
430 431
		struct page *page;

432
		page = alloc_pages_node(node, gfp_mask, order);
433 434
		if (page)
			return page_address(page);
435 436 437 438 439 440

		if (!warned) {
			warn_alloc(gfp_mask & ~__GFP_NOWARN, NULL,
				   "vmemmap alloc failure: order:%u", order);
			warned = true;
		}
441 442
		return NULL;
	} else
443
		return __earlyonly_bootmem_alloc(node, size, size,
444 445 446
				__pa(MAX_DMA_ADDRESS));
}

447 448 449
static void * __meminit altmap_alloc_block_buf(unsigned long size,
					       struct vmem_altmap *altmap);

450
/* need to make sure size is all the same during early stage */
451 452
void * __meminit vmemmap_alloc_block_buf(unsigned long size, int node,
					 struct vmem_altmap *altmap)
453
{
454 455 456 457
	void *ptr;

	if (altmap)
		return altmap_alloc_block_buf(size, altmap);
458

459
	ptr = sparse_buffer_alloc(size);
460 461
	if (!ptr)
		ptr = vmemmap_alloc_block(size, node);
462 463 464
	return ptr;
}

465 466 467 468 469 470 471 472 473 474 475 476 477 478 479
static unsigned long __meminit vmem_altmap_next_pfn(struct vmem_altmap *altmap)
{
	return altmap->base_pfn + altmap->reserve + altmap->alloc
		+ altmap->align;
}

static unsigned long __meminit vmem_altmap_nr_free(struct vmem_altmap *altmap)
{
	unsigned long allocated = altmap->alloc + altmap->align;

	if (altmap->free > allocated)
		return altmap->free - allocated;
	return 0;
}

480 481
static void * __meminit altmap_alloc_block_buf(unsigned long size,
					       struct vmem_altmap *altmap)
482
{
483
	unsigned long pfn, nr_pfns, nr_align;
484 485 486 487 488 489 490

	if (size & ~PAGE_MASK) {
		pr_warn_once("%s: allocations must be multiple of PAGE_SIZE (%ld)\n",
				__func__, size);
		return NULL;
	}

491
	pfn = vmem_altmap_next_pfn(altmap);
492
	nr_pfns = size >> PAGE_SHIFT;
493 494 495 496 497 498 499 500 501
	nr_align = 1UL << find_first_bit(&nr_pfns, BITS_PER_LONG);
	nr_align = ALIGN(pfn, nr_align) - pfn;
	if (nr_pfns + nr_align > vmem_altmap_nr_free(altmap))
		return NULL;

	altmap->alloc += nr_pfns;
	altmap->align += nr_align;
	pfn += nr_align;

502 503
	pr_debug("%s: pfn: %#lx alloc: %ld align: %ld nr: %#lx\n",
			__func__, pfn, altmap->alloc, altmap->align, nr_pfns);
504
	return __va(__pfn_to_phys(pfn));
505 506
}

507 508 509 510 511 512
void __meminit vmemmap_verify(pte_t *pte, int node,
				unsigned long start, unsigned long end)
{
	unsigned long pfn = pte_pfn(*pte);
	int actual_node = early_pfn_to_nid(pfn);

513
	if (node_distance(actual_node, node) > LOCAL_DISTANCE)
514 515
		pr_warn("[%lx-%lx] potential offnode page_structs\n",
			start, end - 1);
516 517
}

518 519
pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node,
				       struct vmem_altmap *altmap)
520
{
521 522 523
	pte_t *pte = pte_offset_kernel(pmd, addr);
	if (pte_none(*pte)) {
		pte_t entry;
524 525
		void *p;

526
		p = vmemmap_alloc_block_buf(PAGE_SIZE, node, altmap);
527
		if (!p)
A
Al Viro 已提交
528
			return NULL;
529 530 531 532
		entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL);
		set_pte_at(&init_mm, addr, pte, entry);
	}
	return pte;
533 534
}

535 536 537 538 539 540 541 542 543 544 545
static void * __meminit vmemmap_alloc_block_zero(unsigned long size, int node)
{
	void *p = vmemmap_alloc_block(size, node);

	if (!p)
		return NULL;
	memset(p, 0, size);

	return p;
}

546
pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node)
547
{
548 549
	pmd_t *pmd = pmd_offset(pud, addr);
	if (pmd_none(*pmd)) {
550
		void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
551
		if (!p)
A
Al Viro 已提交
552
			return NULL;
553
		pmd_populate_kernel(&init_mm, pmd, p);
554
	}
555
	return pmd;
556 557
}

558
pud_t * __meminit vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node)
559
{
560
	pud_t *pud = pud_offset(p4d, addr);
561
	if (pud_none(*pud)) {
562
		void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
563
		if (!p)
A
Al Viro 已提交
564
			return NULL;
565 566 567 568
		pud_populate(&init_mm, pud, p);
	}
	return pud;
}
569

570 571 572 573
p4d_t * __meminit vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node)
{
	p4d_t *p4d = p4d_offset(pgd, addr);
	if (p4d_none(*p4d)) {
574
		void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
575 576 577 578 579 580 581
		if (!p)
			return NULL;
		p4d_populate(&init_mm, p4d, p);
	}
	return p4d;
}

582 583 584 585
pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
{
	pgd_t *pgd = pgd_offset_k(addr);
	if (pgd_none(*pgd)) {
586
		void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
587
		if (!p)
A
Al Viro 已提交
588
			return NULL;
589
		pgd_populate(&init_mm, pgd, p);
590
	}
591
	return pgd;
592 593
}

594 595
int __meminit vmemmap_populate_basepages(unsigned long start, unsigned long end,
					 int node, struct vmem_altmap *altmap)
596
{
597
	unsigned long addr = start;
598
	pgd_t *pgd;
599
	p4d_t *p4d;
600 601 602
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;
603

604 605 606 607
	for (; addr < end; addr += PAGE_SIZE) {
		pgd = vmemmap_pgd_populate(addr, node);
		if (!pgd)
			return -ENOMEM;
608 609 610 611
		p4d = vmemmap_p4d_populate(pgd, addr, node);
		if (!p4d)
			return -ENOMEM;
		pud = vmemmap_pud_populate(p4d, addr, node);
612 613 614 615 616
		if (!pud)
			return -ENOMEM;
		pmd = vmemmap_pmd_populate(pud, addr, node);
		if (!pmd)
			return -ENOMEM;
617
		pte = vmemmap_pte_populate(pmd, addr, node, altmap);
618 619 620
		if (!pte)
			return -ENOMEM;
		vmemmap_verify(pte, node, addr, addr + PAGE_SIZE);
621
	}
622 623

	return 0;
624 625
}

626 627
struct page * __meminit __populate_section_memmap(unsigned long pfn,
		unsigned long nr_pages, int nid, struct vmem_altmap *altmap)
628
{
629 630 631 632 633 634
	unsigned long start = (unsigned long) pfn_to_page(pfn);
	unsigned long end = start + nr_pages * sizeof(struct page);

	if (WARN_ON_ONCE(!IS_ALIGNED(pfn, PAGES_PER_SUBSECTION) ||
		!IS_ALIGNED(nr_pages, PAGES_PER_SUBSECTION)))
		return NULL;
635

636
	if (vmemmap_populate(start, end, nid, altmap))
637 638
		return NULL;

639
	return pfn_to_page(pfn);
640
}