sparse-vmemmap.c 12.1 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2 3 4
/*
 * Virtual Memory Map support
 *
C
Christoph Lameter 已提交
5
 * (C) 2007 sgi. Christoph Lameter.
6 7 8 9 10 11 12
 *
 * Virtual memory maps allow VM primitives pfn_to_page, page_to_pfn,
 * virt_to_page, page_address() to be implemented as a base offset
 * calculation without memory access.
 *
 * However, virtual mappings need a page table and TLBs. Many Linux
 * architectures already map their physical space using 1-1 mappings
13
 * via TLBs. For those arches the virtual memory map is essentially
14 15 16 17
 * for free if we use the same page size as the 1-1 mappings. In that
 * case the overhead consists of a few additional pages that are
 * allocated to create a view of memory for vmemmap.
 *
18 19
 * The architecture is expected to provide a vmemmap_populate() function
 * to instantiate the mapping.
20 21 22
 */
#include <linux/mm.h>
#include <linux/mmzone.h>
23
#include <linux/memblock.h>
24
#include <linux/memremap.h>
25
#include <linux/highmem.h>
26
#include <linux/slab.h>
27 28
#include <linux/spinlock.h>
#include <linux/vmalloc.h>
29
#include <linux/sched.h>
30 31 32
#include <linux/pgtable.h>
#include <linux/bootmem_info.h>

33 34
#include <asm/dma.h>
#include <asm/pgalloc.h>
35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225
#include <asm/tlbflush.h>

/**
 * struct vmemmap_remap_walk - walk vmemmap page table
 *
 * @remap_pte:		called for each lowest-level entry (PTE).
 * @reuse_page:		the page which is reused for the tail vmemmap pages.
 * @reuse_addr:		the virtual address of the @reuse_page page.
 * @vmemmap_pages:	the list head of the vmemmap pages that can be freed.
 */
struct vmemmap_remap_walk {
	void (*remap_pte)(pte_t *pte, unsigned long addr,
			  struct vmemmap_remap_walk *walk);
	struct page *reuse_page;
	unsigned long reuse_addr;
	struct list_head *vmemmap_pages;
};

static void vmemmap_pte_range(pmd_t *pmd, unsigned long addr,
			      unsigned long end,
			      struct vmemmap_remap_walk *walk)
{
	pte_t *pte = pte_offset_kernel(pmd, addr);

	/*
	 * The reuse_page is found 'first' in table walk before we start
	 * remapping (which is calling @walk->remap_pte).
	 */
	if (!walk->reuse_page) {
		walk->reuse_page = pte_page(*pte);
		/*
		 * Because the reuse address is part of the range that we are
		 * walking, skip the reuse address range.
		 */
		addr += PAGE_SIZE;
		pte++;
	}

	for (; addr != end; addr += PAGE_SIZE, pte++)
		walk->remap_pte(pte, addr, walk);
}

static void vmemmap_pmd_range(pud_t *pud, unsigned long addr,
			      unsigned long end,
			      struct vmemmap_remap_walk *walk)
{
	pmd_t *pmd;
	unsigned long next;

	pmd = pmd_offset(pud, addr);
	do {
		BUG_ON(pmd_leaf(*pmd));

		next = pmd_addr_end(addr, end);
		vmemmap_pte_range(pmd, addr, next, walk);
	} while (pmd++, addr = next, addr != end);
}

static void vmemmap_pud_range(p4d_t *p4d, unsigned long addr,
			      unsigned long end,
			      struct vmemmap_remap_walk *walk)
{
	pud_t *pud;
	unsigned long next;

	pud = pud_offset(p4d, addr);
	do {
		next = pud_addr_end(addr, end);
		vmemmap_pmd_range(pud, addr, next, walk);
	} while (pud++, addr = next, addr != end);
}

static void vmemmap_p4d_range(pgd_t *pgd, unsigned long addr,
			      unsigned long end,
			      struct vmemmap_remap_walk *walk)
{
	p4d_t *p4d;
	unsigned long next;

	p4d = p4d_offset(pgd, addr);
	do {
		next = p4d_addr_end(addr, end);
		vmemmap_pud_range(p4d, addr, next, walk);
	} while (p4d++, addr = next, addr != end);
}

static void vmemmap_remap_range(unsigned long start, unsigned long end,
				struct vmemmap_remap_walk *walk)
{
	unsigned long addr = start;
	unsigned long next;
	pgd_t *pgd;

	VM_BUG_ON(!IS_ALIGNED(start, PAGE_SIZE));
	VM_BUG_ON(!IS_ALIGNED(end, PAGE_SIZE));

	pgd = pgd_offset_k(addr);
	do {
		next = pgd_addr_end(addr, end);
		vmemmap_p4d_range(pgd, addr, next, walk);
	} while (pgd++, addr = next, addr != end);

	/*
	 * We only change the mapping of the vmemmap virtual address range
	 * [@start + PAGE_SIZE, end), so we only need to flush the TLB which
	 * belongs to the range.
	 */
	flush_tlb_kernel_range(start + PAGE_SIZE, end);
}

/*
 * Free a vmemmap page. A vmemmap page can be allocated from the memblock
 * allocator or buddy allocator. If the PG_reserved flag is set, it means
 * that it allocated from the memblock allocator, just free it via the
 * free_bootmem_page(). Otherwise, use __free_page().
 */
static inline void free_vmemmap_page(struct page *page)
{
	if (PageReserved(page))
		free_bootmem_page(page);
	else
		__free_page(page);
}

/* Free a list of the vmemmap pages */
static void free_vmemmap_page_list(struct list_head *list)
{
	struct page *page, *next;

	list_for_each_entry_safe(page, next, list, lru) {
		list_del(&page->lru);
		free_vmemmap_page(page);
	}
}

static void vmemmap_remap_pte(pte_t *pte, unsigned long addr,
			      struct vmemmap_remap_walk *walk)
{
	/*
	 * Remap the tail pages as read-only to catch illegal write operation
	 * to the tail pages.
	 */
	pgprot_t pgprot = PAGE_KERNEL_RO;
	pte_t entry = mk_pte(walk->reuse_page, pgprot);
	struct page *page = pte_page(*pte);

	list_add(&page->lru, walk->vmemmap_pages);
	set_pte_at(&init_mm, addr, pte, entry);
}

/**
 * vmemmap_remap_free - remap the vmemmap virtual address range [@start, @end)
 *			to the page which @reuse is mapped to, then free vmemmap
 *			which the range are mapped to.
 * @start:	start address of the vmemmap virtual address range that we want
 *		to remap.
 * @end:	end address of the vmemmap virtual address range that we want to
 *		remap.
 * @reuse:	reuse address.
 *
 * Note: This function depends on vmemmap being base page mapped. Please make
 * sure that we disable PMD mapping of vmemmap pages when calling this function.
 */
void vmemmap_remap_free(unsigned long start, unsigned long end,
			unsigned long reuse)
{
	LIST_HEAD(vmemmap_pages);
	struct vmemmap_remap_walk walk = {
		.remap_pte	= vmemmap_remap_pte,
		.reuse_addr	= reuse,
		.vmemmap_pages	= &vmemmap_pages,
	};

	/*
	 * In order to make remapping routine most efficient for the huge pages,
	 * the routine of vmemmap page table walking has the following rules
	 * (see more details from the vmemmap_pte_range()):
	 *
	 * - The range [@start, @end) and the range [@reuse, @reuse + PAGE_SIZE)
	 *   should be continuous.
	 * - The @reuse address is part of the range [@reuse, @end) that we are
	 *   walking which is passed to vmemmap_remap_range().
	 * - The @reuse address is the first in the complete range.
	 *
	 * So we need to make sure that @start and @reuse meet the above rules.
	 */
	BUG_ON(start - reuse != PAGE_SIZE);

	vmemmap_remap_range(reuse, end, &walk);
	free_vmemmap_page_list(&vmemmap_pages);
}
226 227 228 229 230 231

/*
 * Allocate a block of memory to be used to back the virtual memory map
 * or to back the page tables that are used to create the mapping.
 * Uses the main allocators if they are available, else bootmem.
 */
232

233
static void * __ref __earlyonly_bootmem_alloc(int node,
234 235 236 237
				unsigned long size,
				unsigned long align,
				unsigned long goal)
{
238
	return memblock_alloc_try_nid_raw(size, align, goal,
239
					       MEMBLOCK_ALLOC_ACCESSIBLE, node);
240 241
}

242 243 244 245
void * __meminit vmemmap_alloc_block(unsigned long size, int node)
{
	/* If the main allocator is up use that, fallback to bootmem. */
	if (slab_is_available()) {
246 247 248
		gfp_t gfp_mask = GFP_KERNEL|__GFP_RETRY_MAYFAIL|__GFP_NOWARN;
		int order = get_order(size);
		static bool warned;
249 250
		struct page *page;

251
		page = alloc_pages_node(node, gfp_mask, order);
252 253
		if (page)
			return page_address(page);
254 255 256 257 258 259

		if (!warned) {
			warn_alloc(gfp_mask & ~__GFP_NOWARN, NULL,
				   "vmemmap alloc failure: order:%u", order);
			warned = true;
		}
260 261
		return NULL;
	} else
262
		return __earlyonly_bootmem_alloc(node, size, size,
263 264 265
				__pa(MAX_DMA_ADDRESS));
}

266 267 268
static void * __meminit altmap_alloc_block_buf(unsigned long size,
					       struct vmem_altmap *altmap);

269
/* need to make sure size is all the same during early stage */
270 271
void * __meminit vmemmap_alloc_block_buf(unsigned long size, int node,
					 struct vmem_altmap *altmap)
272
{
273 274 275 276
	void *ptr;

	if (altmap)
		return altmap_alloc_block_buf(size, altmap);
277

278
	ptr = sparse_buffer_alloc(size);
279 280
	if (!ptr)
		ptr = vmemmap_alloc_block(size, node);
281 282 283
	return ptr;
}

284 285 286 287 288 289 290 291 292 293 294 295 296 297 298
static unsigned long __meminit vmem_altmap_next_pfn(struct vmem_altmap *altmap)
{
	return altmap->base_pfn + altmap->reserve + altmap->alloc
		+ altmap->align;
}

static unsigned long __meminit vmem_altmap_nr_free(struct vmem_altmap *altmap)
{
	unsigned long allocated = altmap->alloc + altmap->align;

	if (altmap->free > allocated)
		return altmap->free - allocated;
	return 0;
}

299 300
static void * __meminit altmap_alloc_block_buf(unsigned long size,
					       struct vmem_altmap *altmap)
301
{
302
	unsigned long pfn, nr_pfns, nr_align;
303 304 305 306 307 308 309

	if (size & ~PAGE_MASK) {
		pr_warn_once("%s: allocations must be multiple of PAGE_SIZE (%ld)\n",
				__func__, size);
		return NULL;
	}

310
	pfn = vmem_altmap_next_pfn(altmap);
311
	nr_pfns = size >> PAGE_SHIFT;
312 313 314 315 316 317 318 319 320
	nr_align = 1UL << find_first_bit(&nr_pfns, BITS_PER_LONG);
	nr_align = ALIGN(pfn, nr_align) - pfn;
	if (nr_pfns + nr_align > vmem_altmap_nr_free(altmap))
		return NULL;

	altmap->alloc += nr_pfns;
	altmap->align += nr_align;
	pfn += nr_align;

321 322
	pr_debug("%s: pfn: %#lx alloc: %ld align: %ld nr: %#lx\n",
			__func__, pfn, altmap->alloc, altmap->align, nr_pfns);
323
	return __va(__pfn_to_phys(pfn));
324 325
}

326 327 328 329 330 331
void __meminit vmemmap_verify(pte_t *pte, int node,
				unsigned long start, unsigned long end)
{
	unsigned long pfn = pte_pfn(*pte);
	int actual_node = early_pfn_to_nid(pfn);

332
	if (node_distance(actual_node, node) > LOCAL_DISTANCE)
333 334
		pr_warn("[%lx-%lx] potential offnode page_structs\n",
			start, end - 1);
335 336
}

337 338
pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node,
				       struct vmem_altmap *altmap)
339
{
340 341 342
	pte_t *pte = pte_offset_kernel(pmd, addr);
	if (pte_none(*pte)) {
		pte_t entry;
343 344
		void *p;

345
		p = vmemmap_alloc_block_buf(PAGE_SIZE, node, altmap);
346
		if (!p)
A
Al Viro 已提交
347
			return NULL;
348 349 350 351
		entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL);
		set_pte_at(&init_mm, addr, pte, entry);
	}
	return pte;
352 353
}

354 355 356 357 358 359 360 361 362 363 364
static void * __meminit vmemmap_alloc_block_zero(unsigned long size, int node)
{
	void *p = vmemmap_alloc_block(size, node);

	if (!p)
		return NULL;
	memset(p, 0, size);

	return p;
}

365
pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node)
366
{
367 368
	pmd_t *pmd = pmd_offset(pud, addr);
	if (pmd_none(*pmd)) {
369
		void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
370
		if (!p)
A
Al Viro 已提交
371
			return NULL;
372
		pmd_populate_kernel(&init_mm, pmd, p);
373
	}
374
	return pmd;
375 376
}

377
pud_t * __meminit vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node)
378
{
379
	pud_t *pud = pud_offset(p4d, addr);
380
	if (pud_none(*pud)) {
381
		void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
382
		if (!p)
A
Al Viro 已提交
383
			return NULL;
384 385 386 387
		pud_populate(&init_mm, pud, p);
	}
	return pud;
}
388

389 390 391 392
p4d_t * __meminit vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node)
{
	p4d_t *p4d = p4d_offset(pgd, addr);
	if (p4d_none(*p4d)) {
393
		void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
394 395 396 397 398 399 400
		if (!p)
			return NULL;
		p4d_populate(&init_mm, p4d, p);
	}
	return p4d;
}

401 402 403 404
pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
{
	pgd_t *pgd = pgd_offset_k(addr);
	if (pgd_none(*pgd)) {
405
		void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
406
		if (!p)
A
Al Viro 已提交
407
			return NULL;
408
		pgd_populate(&init_mm, pgd, p);
409
	}
410
	return pgd;
411 412
}

413 414
int __meminit vmemmap_populate_basepages(unsigned long start, unsigned long end,
					 int node, struct vmem_altmap *altmap)
415
{
416
	unsigned long addr = start;
417
	pgd_t *pgd;
418
	p4d_t *p4d;
419 420 421
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;
422

423 424 425 426
	for (; addr < end; addr += PAGE_SIZE) {
		pgd = vmemmap_pgd_populate(addr, node);
		if (!pgd)
			return -ENOMEM;
427 428 429 430
		p4d = vmemmap_p4d_populate(pgd, addr, node);
		if (!p4d)
			return -ENOMEM;
		pud = vmemmap_pud_populate(p4d, addr, node);
431 432 433 434 435
		if (!pud)
			return -ENOMEM;
		pmd = vmemmap_pmd_populate(pud, addr, node);
		if (!pmd)
			return -ENOMEM;
436
		pte = vmemmap_pte_populate(pmd, addr, node, altmap);
437 438 439
		if (!pte)
			return -ENOMEM;
		vmemmap_verify(pte, node, addr, addr + PAGE_SIZE);
440
	}
441 442

	return 0;
443 444
}

445 446
struct page * __meminit __populate_section_memmap(unsigned long pfn,
		unsigned long nr_pages, int nid, struct vmem_altmap *altmap)
447
{
448 449 450 451 452 453
	unsigned long start = (unsigned long) pfn_to_page(pfn);
	unsigned long end = start + nr_pages * sizeof(struct page);

	if (WARN_ON_ONCE(!IS_ALIGNED(pfn, PAGES_PER_SUBSECTION) ||
		!IS_ALIGNED(nr_pages, PAGES_PER_SUBSECTION)))
		return NULL;
454

455
	if (vmemmap_populate(start, end, nid, altmap))
456 457
		return NULL;

458
	return pfn_to_page(pfn);
459
}