hugetlbpage.c 11.9 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
S
Steve Capper 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
/*
 * arch/arm64/mm/hugetlbpage.c
 *
 * Copyright (C) 2013 Linaro Ltd.
 *
 * Based on arch/x86/mm/hugetlbpage.c.
 */

#include <linux/init.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/pagemap.h>
#include <linux/err.h>
#include <linux/sysctl.h>
#include <asm/mman.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>

21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42
/*
 * HugeTLB Support Matrix
 *
 * ---------------------------------------------------
 * | Page Size | CONT PTE |  PMD  | CONT PMD |  PUD  |
 * ---------------------------------------------------
 * |     4K    |   64K    |   2M  |    32M   |   1G  |
 * |    16K    |    2M    |  32M  |     1G   |       |
 * |    64K    |    2M    | 512M  |    16G   |       |
 * ---------------------------------------------------
 */

/*
 * Reserve CMA areas for the largest supported gigantic
 * huge page when requested. Any other smaller gigantic
 * huge pages could still be served from those areas.
 */
#ifdef CONFIG_CMA
void __init arm64_hugetlb_cma_reserve(void)
{
	int order;

43 44 45
	if (pud_sect_supported())
		order = PUD_SHIFT - PAGE_SHIFT;
	else
46 47
		order = CONT_PMD_SHIFT - PAGE_SHIFT;

48 49 50 51 52 53 54 55 56 57 58
	/*
	 * HugeTLB CMA reservation is required for gigantic
	 * huge pages which could not be allocated via the
	 * page allocator. Just warn if there is any change
	 * breaking this assumption.
	 */
	WARN_ON(order <= MAX_ORDER);
	hugetlb_cma_reserve(order);
}
#endif /* CONFIG_CMA */

59
static bool __hugetlb_valid_size(unsigned long size)
60
{
61
	switch (size) {
62
#ifndef __PAGETABLE_PMD_FOLDED
63
	case PUD_SIZE:
64
		return pud_sect_supported();
65 66
#endif
	case CONT_PMD_SIZE:
67
	case PMD_SIZE:
68 69 70
	case CONT_PTE_SIZE:
		return true;
	}
71

72 73
	return false;
}
74 75 76 77 78 79 80 81 82 83 84 85 86

#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
bool arch_hugetlb_migration_supported(struct hstate *h)
{
	size_t pagesize = huge_page_size(h);

	if (!__hugetlb_valid_size(pagesize)) {
		pr_warn("%s: unrecognized huge page size 0x%lx\n",
			__func__, pagesize);
		return false;
	}
	return true;
}
87 88
#endif

S
Steve Capper 已提交
89 90
int pmd_huge(pmd_t pmd)
{
91
	return pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT);
S
Steve Capper 已提交
92 93 94 95
}

int pud_huge(pud_t pud)
{
96
#ifndef __PAGETABLE_PMD_FOLDED
97
	return pud_val(pud) && !(pud_val(pud) & PUD_TABLE_BIT);
98 99 100
#else
	return 0;
#endif
S
Steve Capper 已提交
101 102
}

103 104 105 106 107 108 109 110 111 112
/*
 * Select all bits except the pfn
 */
static inline pgprot_t pte_pgprot(pte_t pte)
{
	unsigned long pfn = pte_pfn(pte);

	return __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^ pte_val(pte));
}

113
static int find_num_contig(struct mm_struct *mm, unsigned long addr,
114
			   pte_t *ptep, size_t *pgsize)
115
{
116
	pgd_t *pgdp = pgd_offset(mm, addr);
117
	p4d_t *p4dp;
118 119
	pud_t *pudp;
	pmd_t *pmdp;
120 121

	*pgsize = PAGE_SIZE;
122 123
	p4dp = p4d_offset(pgdp, addr);
	pudp = pud_offset(p4dp, addr);
124 125
	pmdp = pmd_offset(pudp, addr);
	if ((pte_t *)pmdp == ptep) {
126 127 128 129 130 131
		*pgsize = PMD_SIZE;
		return CONT_PMDS;
	}
	return CONT_PTES;
}

132 133 134 135 136 137 138
static inline int num_contig_ptes(unsigned long size, size_t *pgsize)
{
	int contig_ptes = 0;

	*pgsize = size;

	switch (size) {
139
#ifndef __PAGETABLE_PMD_FOLDED
140
	case PUD_SIZE:
141 142 143
		if (pud_sect_supported())
			contig_ptes = 1;
		break;
144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160
#endif
	case PMD_SIZE:
		contig_ptes = 1;
		break;
	case CONT_PMD_SIZE:
		*pgsize = PMD_SIZE;
		contig_ptes = CONT_PMDS;
		break;
	case CONT_PTE_SIZE:
		*pgsize = PAGE_SIZE;
		contig_ptes = CONT_PTES;
		break;
	}

	return contig_ptes;
}

161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183
/*
 * Changing some bits of contiguous entries requires us to follow a
 * Break-Before-Make approach, breaking the whole contiguous set
 * before we can change any entries. See ARM DDI 0487A.k_iss10775,
 * "Misprogramming of the Contiguous bit", page D4-1762.
 *
 * This helper performs the break step.
 */
static pte_t get_clear_flush(struct mm_struct *mm,
			     unsigned long addr,
			     pte_t *ptep,
			     unsigned long pgsize,
			     unsigned long ncontig)
{
	pte_t orig_pte = huge_ptep_get(ptep);
	bool valid = pte_valid(orig_pte);
	unsigned long i, saddr = addr;

	for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) {
		pte_t pte = ptep_get_and_clear(mm, addr, ptep);

		/*
		 * If HW_AFDBM is enabled, then the HW could turn on
184 185
		 * the dirty or accessed bit for any page in the set,
		 * so check them all.
186 187 188
		 */
		if (pte_dirty(pte))
			orig_pte = pte_mkdirty(orig_pte);
189 190 191

		if (pte_young(pte))
			orig_pte = pte_mkyoung(orig_pte);
192 193
	}

194 195
	if (valid) {
		struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0);
196
		flush_tlb_range(&vma, saddr, addr);
197
	}
198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215
	return orig_pte;
}

/*
 * Changing some bits of contiguous entries requires us to follow a
 * Break-Before-Make approach, breaking the whole contiguous set
 * before we can change any entries. See ARM DDI 0487A.k_iss10775,
 * "Misprogramming of the Contiguous bit", page D4-1762.
 *
 * This helper performs the break step for use cases where the
 * original pte is not needed.
 */
static void clear_flush(struct mm_struct *mm,
			     unsigned long addr,
			     pte_t *ptep,
			     unsigned long pgsize,
			     unsigned long ncontig)
{
216
	struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0);
217 218 219 220 221 222 223 224
	unsigned long i, saddr = addr;

	for (i = 0; i < ncontig; i++, addr += pgsize, ptep++)
		pte_clear(mm, addr, ptep);

	flush_tlb_range(&vma, saddr, addr);
}

225 226 227 228 229
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
			    pte_t *ptep, pte_t pte)
{
	size_t pgsize;
	int i;
230
	int ncontig;
231
	unsigned long pfn, dpfn;
232 233
	pgprot_t hugeprot;

234 235 236 237 238 239
	/*
	 * Code needs to be expanded to handle huge swap and migration
	 * entries. Needed for HUGETLB and MEMORY_FAILURE.
	 */
	WARN_ON(!pte_present(pte));

240
	if (!pte_cont(pte)) {
241 242 243 244
		set_pte_at(mm, addr, ptep, pte);
		return;
	}

245
	ncontig = find_num_contig(mm, addr, ptep, &pgsize);
246
	pfn = pte_pfn(pte);
247
	dpfn = pgsize >> PAGE_SHIFT;
248
	hugeprot = pte_pgprot(pte);
249

250 251
	clear_flush(mm, addr, ptep, pgsize, ncontig);

252
	for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
253 254 255
		set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot));
}

256 257 258 259 260 261 262 263 264 265 266 267
void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
			  pte_t *ptep, pte_t pte, unsigned long sz)
{
	int i, ncontig;
	size_t pgsize;

	ncontig = num_contig_ptes(sz, &pgsize);

	for (i = 0; i < ncontig; i++, ptep++)
		set_pte(ptep, pte);
}

268
pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
269 270
		      unsigned long addr, unsigned long sz)
{
271
	pgd_t *pgdp;
272
	p4d_t *p4dp;
273 274 275 276 277
	pud_t *pudp;
	pmd_t *pmdp;
	pte_t *ptep = NULL;

	pgdp = pgd_offset(mm, addr);
278 279
	p4dp = p4d_offset(pgdp, addr);
	pudp = pud_alloc(mm, p4dp, addr);
280
	if (!pudp)
281 282 283
		return NULL;

	if (sz == PUD_SIZE) {
284
		ptep = (pte_t *)pudp;
285
	} else if (sz == (CONT_PTE_SIZE)) {
286
		pmdp = pmd_alloc(mm, pudp, addr);
287 288
		if (!pmdp)
			return NULL;
289 290 291 292 293 294 295 296 297

		WARN_ON(addr & (sz - 1));
		/*
		 * Note that if this code were ever ported to the
		 * 32-bit arm platform then it will cause trouble in
		 * the case where CONFIG_HIGHPTE is set, since there
		 * will be no pte_unmap() to correspond with this
		 * pte_alloc_map().
		 */
298
		ptep = pte_alloc_map(mm, pmdp, addr);
299
	} else if (sz == PMD_SIZE) {
300
		if (want_pmd_share(vma, addr) && pud_none(READ_ONCE(*pudp)))
301
			ptep = huge_pmd_share(mm, vma, addr, pudp);
302
		else
303
			ptep = (pte_t *)pmd_alloc(mm, pudp, addr);
304
	} else if (sz == (CONT_PMD_SIZE)) {
305
		pmdp = pmd_alloc(mm, pudp, addr);
306
		WARN_ON(addr & (sz - 1));
307
		return (pte_t *)pmdp;
308 309
	}

310
	return ptep;
311 312
}

313 314
pte_t *huge_pte_offset(struct mm_struct *mm,
		       unsigned long addr, unsigned long sz)
315
{
316
	pgd_t *pgdp;
317
	p4d_t *p4dp;
318 319
	pud_t *pudp, pud;
	pmd_t *pmdp, pmd;
320

321 322
	pgdp = pgd_offset(mm, addr);
	if (!pgd_present(READ_ONCE(*pgdp)))
323
		return NULL;
324

325 326 327 328 329
	p4dp = p4d_offset(pgdp, addr);
	if (!p4d_present(READ_ONCE(*p4dp)))
		return NULL;

	pudp = pud_offset(p4dp, addr);
330 331
	pud = READ_ONCE(*pudp);
	if (sz != PUD_SIZE && pud_none(pud))
332
		return NULL;
333
	/* hugepage or swap? */
334 335
	if (pud_huge(pud) || !pud_present(pud))
		return (pte_t *)pudp;
336 337
	/* table; check the next level */

338 339 340
	if (sz == CONT_PMD_SIZE)
		addr &= CONT_PMD_MASK;

341 342
	pmdp = pmd_offset(pudp, addr);
	pmd = READ_ONCE(*pmdp);
343
	if (!(sz == PMD_SIZE || sz == CONT_PMD_SIZE) &&
344
	    pmd_none(pmd))
345
		return NULL;
346 347
	if (pmd_huge(pmd) || !pmd_present(pmd))
		return (pte_t *)pmdp;
348

349 350
	if (sz == CONT_PTE_SIZE)
		return pte_offset_kernel(pmdp, (addr & CONT_PTE_MASK));
351

352 353 354
	return NULL;
}

355
pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags)
356
{
357
	size_t pagesize = 1UL << shift;
358

359
	entry = pte_mkhuge(entry);
360 361 362 363 364 365 366 367 368 369 370
	if (pagesize == CONT_PTE_SIZE) {
		entry = pte_mkcont(entry);
	} else if (pagesize == CONT_PMD_SIZE) {
		entry = pmd_pte(pmd_mkcont(pte_pmd(entry)));
	} else if (pagesize != PUD_SIZE && pagesize != PMD_SIZE) {
		pr_warn("%s: unrecognized huge page size 0x%lx\n",
			__func__, pagesize);
	}
	return entry;
}

371 372 373 374 375 376 377 378 379 380 381 382
void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
		    pte_t *ptep, unsigned long sz)
{
	int i, ncontig;
	size_t pgsize;

	ncontig = num_contig_ptes(sz, &pgsize);

	for (i = 0; i < ncontig; i++, addr += pgsize, ptep++)
		pte_clear(mm, addr, ptep);
}

383 384 385
pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
			      unsigned long addr, pte_t *ptep)
{
386
	int ncontig;
387 388 389 390
	size_t pgsize;
	pte_t orig_pte = huge_ptep_get(ptep);

	if (!pte_cont(orig_pte))
391
		return ptep_get_and_clear(mm, addr, ptep);
392 393 394

	ncontig = find_num_contig(mm, addr, ptep, &pgsize);

395
	return get_clear_flush(mm, addr, ptep, pgsize, ncontig);
396 397
}

398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426
/*
 * huge_ptep_set_access_flags will update access flags (dirty, accesssed)
 * and write permission.
 *
 * For a contiguous huge pte range we need to check whether or not write
 * permission has to change only on the first pte in the set. Then for
 * all the contiguous ptes we need to check whether or not there is a
 * discrepancy between dirty or young.
 */
static int __cont_access_flags_changed(pte_t *ptep, pte_t pte, int ncontig)
{
	int i;

	if (pte_write(pte) != pte_write(huge_ptep_get(ptep)))
		return 1;

	for (i = 0; i < ncontig; i++) {
		pte_t orig_pte = huge_ptep_get(ptep + i);

		if (pte_dirty(pte) != pte_dirty(orig_pte))
			return 1;

		if (pte_young(pte) != pte_young(orig_pte))
			return 1;
	}

	return 0;
}

427 428 429 430
int huge_ptep_set_access_flags(struct vm_area_struct *vma,
			       unsigned long addr, pte_t *ptep,
			       pte_t pte, int dirty)
{
431
	int ncontig, i;
432 433 434
	size_t pgsize = 0;
	unsigned long pfn = pte_pfn(pte), dpfn;
	pgprot_t hugeprot;
435
	pte_t orig_pte;
436 437

	if (!pte_cont(pte))
438
		return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
439 440 441 442

	ncontig = find_num_contig(vma->vm_mm, addr, ptep, &pgsize);
	dpfn = pgsize >> PAGE_SHIFT;

443 444 445
	if (!__cont_access_flags_changed(ptep, pte, ncontig))
		return 0;

446 447
	orig_pte = get_clear_flush(vma->vm_mm, addr, ptep, pgsize, ncontig);

448
	/* Make sure we don't lose the dirty or young state */
449 450 451
	if (pte_dirty(orig_pte))
		pte = pte_mkdirty(pte);

452 453 454
	if (pte_young(orig_pte))
		pte = pte_mkyoung(pte);

455 456 457
	hugeprot = pte_pgprot(pte);
	for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
		set_pte_at(vma->vm_mm, addr, ptep, pfn_pte(pfn, hugeprot));
458

459
	return 1;
460 461 462 463 464
}

void huge_ptep_set_wrprotect(struct mm_struct *mm,
			     unsigned long addr, pte_t *ptep)
{
465 466
	unsigned long pfn, dpfn;
	pgprot_t hugeprot;
467 468
	int ncontig, i;
	size_t pgsize;
469
	pte_t pte;
470

471
	if (!pte_cont(READ_ONCE(*ptep))) {
472
		ptep_set_wrprotect(mm, addr, ptep);
473
		return;
474
	}
475 476

	ncontig = find_num_contig(mm, addr, ptep, &pgsize);
477 478 479 480 481 482 483 484 485 486
	dpfn = pgsize >> PAGE_SHIFT;

	pte = get_clear_flush(mm, addr, ptep, pgsize, ncontig);
	pte = pte_wrprotect(pte);

	hugeprot = pte_pgprot(pte);
	pfn = pte_pfn(pte);

	for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
		set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot));
487 488 489 490 491
}

void huge_ptep_clear_flush(struct vm_area_struct *vma,
			   unsigned long addr, pte_t *ptep)
{
492
	size_t pgsize;
493
	int ncontig;
494

495
	if (!pte_cont(READ_ONCE(*ptep))) {
496
		ptep_clear_flush(vma, addr, ptep);
497
		return;
498
	}
499 500

	ncontig = find_num_contig(vma->vm_mm, addr, ptep, &pgsize);
501
	clear_flush(vma->vm_mm, addr, ptep, pgsize, ncontig);
502 503
}

504 505
static int __init hugetlbpage_init(void)
{
506 507 508
	if (pud_sect_supported())
		hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);

509
	hugetlb_add_hstate(CONT_PMD_SHIFT - PAGE_SHIFT);
510
	hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
511
	hugetlb_add_hstate(CONT_PTE_SHIFT - PAGE_SHIFT);
512 513 514 515 516

	return 0;
}
arch_initcall(hugetlbpage_init);

517
bool __init arch_hugetlb_valid_size(unsigned long size)
S
Steve Capper 已提交
518
{
519
	return __hugetlb_valid_size(size);
520
}