pgtable.h 28.1 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0-only */
C
Catalin Marinas 已提交
2 3 4 5 6 7
/*
 * Copyright (C) 2012 ARM Ltd.
 */
#ifndef __ASM_PGTABLE_H
#define __ASM_PGTABLE_H

8
#include <asm/bug.h>
C
Catalin Marinas 已提交
9 10 11
#include <asm/proc-fns.h>

#include <asm/memory.h>
12
#include <asm/mte.h>
C
Catalin Marinas 已提交
13
#include <asm/pgtable-hwdef.h>
14
#include <asm/pgtable-prot.h>
15
#include <asm/tlbflush.h>
C
Catalin Marinas 已提交
16 17

/*
18
 * VMALLOC range.
19
 *
20
 * VMALLOC_START: beginning of the kernel vmalloc space
21
 * VMALLOC_END: extends to the available space below vmemmap, PCI I/O space
22
 *	and fixed mappings
C
Catalin Marinas 已提交
23
 */
24
#define VMALLOC_START		(MODULES_END)
S
Steve Capper 已提交
25
#define VMALLOC_END		(- PUD_SIZE - VMEMMAP_SIZE - SZ_64K)
C
Catalin Marinas 已提交
26

27 28
#define vmemmap			((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT))

29
#define FIRST_USER_ADDRESS	0UL
C
Catalin Marinas 已提交
30 31

#ifndef __ASSEMBLY__
32

33
#include <asm/cmpxchg.h>
34
#include <asm/fixmap.h>
35
#include <linux/mmdebug.h>
36 37
#include <linux/mm_types.h>
#include <linux/sched.h>
38

39 40 41 42 43 44 45 46 47 48
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE

/* Set stride and tlb_level in flush_*_tlb_range */
#define flush_pmd_tlb_range(vma, addr, end)	\
	__flush_tlb_range(vma, addr, end, PMD_SIZE, false, 2)
#define flush_pud_tlb_range(vma, addr, end)	\
	__flush_tlb_range(vma, addr, end, PUD_SIZE, false, 1)
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */

49 50 51 52 53 54 55 56
/*
 * Outside of a few very special situations (e.g. hibernation), we always
 * use broadcast TLB invalidation instructions, therefore a spurious page
 * fault on one CPU which has been handled concurrently by another CPU
 * does not need to perform additional invalidation.
 */
#define flush_tlb_fix_spurious_fault(vma, address) do { } while (0)

C
Catalin Marinas 已提交
57 58 59 60
/*
 * ZERO_PAGE is a global shared page that is always zero: used
 * for zero-mapped memory areas etc..
 */
61
extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
62
#define ZERO_PAGE(vaddr)	phys_to_page(__pa_symbol(empty_zero_page))
C
Catalin Marinas 已提交
63

64 65
#define pte_ERROR(e)	\
	pr_err("%s:%d: bad pte %016llx.\n", __FILE__, __LINE__, pte_val(e))
66

67 68 69 70 71
/*
 * Macros to convert between a physical address and its placement in a
 * page table entry, taking care of 52-bit addresses.
 */
#ifdef CONFIG_ARM64_PA_BITS_52
72 73 74 75 76 77 78 79 80
static inline phys_addr_t __pte_to_phys(pte_t pte)
{
	return (pte_val(pte) & PTE_ADDR_LOW) |
		((pte_val(pte) & PTE_ADDR_HIGH) << 36);
}
static inline pteval_t __phys_to_pte_val(phys_addr_t phys)
{
	return (phys | (phys >> 36)) & PTE_ADDR_MASK;
}
81 82 83 84
#else
#define __pte_to_phys(pte)	(pte_val(pte) & PTE_ADDR_MASK)
#define __phys_to_pte_val(phys)	(phys)
#endif
C
Catalin Marinas 已提交
85

86 87 88
#define pte_pfn(pte)		(__pte_to_phys(pte) >> PAGE_SHIFT)
#define pfn_pte(pfn,prot)	\
	__pte(__phys_to_pte_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
C
Catalin Marinas 已提交
89 90 91 92

#define pte_none(pte)		(!pte_val(pte))
#define pte_clear(mm,addr,ptep)	set_pte(ptep, __pte(0))
#define pte_page(pte)		(pfn_to_page(pte_pfn(pte)))
93

C
Catalin Marinas 已提交
94 95 96
/*
 * The following only work if pte_present(). Undefined behaviour otherwise.
 */
97 98 99 100
#define pte_present(pte)	(!!(pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)))
#define pte_young(pte)		(!!(pte_val(pte) & PTE_AF))
#define pte_special(pte)	(!!(pte_val(pte) & PTE_SPECIAL))
#define pte_write(pte)		(!!(pte_val(pte) & PTE_WRITE))
101
#define pte_user_exec(pte)	(!(pte_val(pte) & PTE_UXN))
102
#define pte_cont(pte)		(!!(pte_val(pte) & PTE_CONT))
103
#define pte_devmap(pte)		(!!(pte_val(pte) & PTE_DEVMAP))
104 105
#define pte_tagged(pte)		((pte_val(pte) & PTE_ATTRINDX_MASK) == \
				 PTE_ATTRINDX(MT_NORMAL_TAGGED))
C
Catalin Marinas 已提交
106

107 108 109 110 111 112 113 114 115 116
#define pte_cont_addr_end(addr, end)						\
({	unsigned long __boundary = ((addr) + CONT_PTE_SIZE) & CONT_PTE_MASK;	\
	(__boundary - 1 < (end) - 1) ? __boundary : (end);			\
})

#define pmd_cont_addr_end(addr, end)						\
({	unsigned long __boundary = ((addr) + CONT_PMD_SIZE) & CONT_PMD_MASK;	\
	(__boundary - 1 < (end) - 1) ? __boundary : (end);			\
})

117
#define pte_hw_dirty(pte)	(pte_write(pte) && !(pte_val(pte) & PTE_RDONLY))
118 119 120
#define pte_sw_dirty(pte)	(!!(pte_val(pte) & PTE_DIRTY))
#define pte_dirty(pte)		(pte_sw_dirty(pte) || pte_hw_dirty(pte))

121
#define pte_valid(pte)		(!!(pte_val(pte) & PTE_VALID))
122 123 124 125
/*
 * Execute-only user mappings do not have the PTE_USER bit set. All valid
 * kernel mappings have the PTE_UXN bit set.
 */
126
#define pte_valid_not_user(pte) \
127
	((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN))
128 129 130 131
/*
 * Could the pte be present in the TLB? We must check mm_tlb_flush_pending
 * so that we don't erroneously return false for pages that have been
 * remapped as PROT_NONE but are yet to be flushed from the TLB.
W
Will Deacon 已提交
132 133 134
 * Note that we can't make any assumptions based on the state of the access
 * flag, since ptep_clear_flush_young() elides a DSB when invalidating the
 * TLB.
135 136
 */
#define pte_accessible(mm, pte)	\
W
Will Deacon 已提交
137
	(mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte))
C
Catalin Marinas 已提交
138

139
/*
140 141 142 143 144
 * p??_access_permitted() is true for valid user mappings (PTE_USER
 * bit set, subject to the write permission check). For execute-only
 * mappings, like PROT_EXEC with EPAN (both PTE_USER and PTE_UXN bits
 * not set) must return false. PROT_NONE mappings do not have the
 * PTE_VALID bit set.
145 146
 */
#define pte_access_permitted(pte, write) \
147
	(((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER)) && (!(write) || pte_write(pte)))
148 149 150 151 152
#define pmd_access_permitted(pmd, write) \
	(pte_access_permitted(pmd_pte(pmd), (write)))
#define pud_access_permitted(pud, write) \
	(pte_access_permitted(pud_pte(pud), (write)))

153
static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
154
{
155
	pte_val(pte) &= ~pgprot_val(prot);
156 157 158
	return pte;
}

159
static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot)
160
{
161
	pte_val(pte) |= pgprot_val(prot);
162 163 164
	return pte;
}

165 166 167 168 169 170 171 172 173 174 175 176
static inline pmd_t clear_pmd_bit(pmd_t pmd, pgprot_t prot)
{
	pmd_val(pmd) &= ~pgprot_val(prot);
	return pmd;
}

static inline pmd_t set_pmd_bit(pmd_t pmd, pgprot_t prot)
{
	pmd_val(pmd) |= pgprot_val(prot);
	return pmd;
}

177 178
static inline pte_t pte_mkwrite(pte_t pte)
{
179 180 181
	pte = set_pte_bit(pte, __pgprot(PTE_WRITE));
	pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY));
	return pte;
182 183
}

184 185
static inline pte_t pte_mkclean(pte_t pte)
{
186 187 188 189
	pte = clear_pte_bit(pte, __pgprot(PTE_DIRTY));
	pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));

	return pte;
190 191 192 193
}

static inline pte_t pte_mkdirty(pte_t pte)
{
194 195 196 197 198 199
	pte = set_pte_bit(pte, __pgprot(PTE_DIRTY));

	if (pte_write(pte))
		pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY));

	return pte;
200 201
}

202 203 204 205 206 207 208 209 210 211 212 213 214 215
static inline pte_t pte_wrprotect(pte_t pte)
{
	/*
	 * If hardware-dirty (PTE_WRITE/DBM bit set and PTE_RDONLY
	 * clear), set the PTE_DIRTY bit.
	 */
	if (pte_hw_dirty(pte))
		pte = pte_mkdirty(pte);

	pte = clear_pte_bit(pte, __pgprot(PTE_WRITE));
	pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
	return pte;
}

216 217
static inline pte_t pte_mkold(pte_t pte)
{
218
	return clear_pte_bit(pte, __pgprot(PTE_AF));
219 220 221 222
}

static inline pte_t pte_mkyoung(pte_t pte)
{
223
	return set_pte_bit(pte, __pgprot(PTE_AF));
224 225 226 227
}

static inline pte_t pte_mkspecial(pte_t pte)
{
228
	return set_pte_bit(pte, __pgprot(PTE_SPECIAL));
229
}
C
Catalin Marinas 已提交
230

231 232
static inline pte_t pte_mkcont(pte_t pte)
{
233 234
	pte = set_pte_bit(pte, __pgprot(PTE_CONT));
	return set_pte_bit(pte, __pgprot(PTE_TYPE_PAGE));
235 236 237 238 239 240 241
}

static inline pte_t pte_mknoncont(pte_t pte)
{
	return clear_pte_bit(pte, __pgprot(PTE_CONT));
}

242 243 244 245 246
static inline pte_t pte_mkpresent(pte_t pte)
{
	return set_pte_bit(pte, __pgprot(PTE_VALID));
}

247 248 249 250 251
static inline pmd_t pmd_mkcont(pmd_t pmd)
{
	return __pmd(pmd_val(pmd) | PMD_SECT_CONT);
}

252 253
static inline pte_t pte_mkdevmap(pte_t pte)
{
254
	return set_pte_bit(pte, __pgprot(PTE_DEVMAP | PTE_SPECIAL));
255 256
}

C
Catalin Marinas 已提交
257 258
static inline void set_pte(pte_t *ptep, pte_t pte)
{
259
	WRITE_ONCE(*ptep, pte);
260 261 262 263 264

	/*
	 * Only if the new pte is valid and kernel, otherwise TLB maintenance
	 * or update_mmu_cache() have the necessary barriers.
	 */
265
	if (pte_valid_not_user(pte)) {
266
		dsb(ishst);
267 268
		isb();
	}
C
Catalin Marinas 已提交
269 270
}

271
extern void __sync_icache_dcache(pte_t pteval);
C
Catalin Marinas 已提交
272

273 274 275 276 277 278 279 280 281 282 283 284 285
/*
 * PTE bits configuration in the presence of hardware Dirty Bit Management
 * (PTE_WRITE == PTE_DBM):
 *
 * Dirty  Writable | PTE_RDONLY  PTE_WRITE  PTE_DIRTY (sw)
 *   0      0      |   1           0          0
 *   0      1      |   1           1          0
 *   1      0      |   1           0          1
 *   1      1      |   0           1          x
 *
 * When hardware DBM is not present, the sofware PTE_DIRTY bit is updated via
 * the page fault mechanism. Checking the dirty status of a pte becomes:
 *
286
 *   PTE_DIRTY || (PTE_WRITE && !PTE_RDONLY)
287
 */
288 289 290

static inline void __check_racy_pte_update(struct mm_struct *mm, pte_t *ptep,
					   pte_t pte)
C
Catalin Marinas 已提交
291
{
292 293
	pte_t old_pte;

294 295 296 297 298 299 300 301 302
	if (!IS_ENABLED(CONFIG_DEBUG_VM))
		return;

	old_pte = READ_ONCE(*ptep);

	if (!pte_valid(old_pte) || !pte_valid(pte))
		return;
	if (mm != current->active_mm && atomic_read(&mm->mm_users) <= 1)
		return;
303

304
	/*
305 306 307
	 * Check for potential race with hardware updates of the pte
	 * (ptep_set_access_flags safely changes valid ptes without going
	 * through an invalid entry).
308
	 */
309 310 311 312 313 314 315 316 317 318 319 320 321 322
	VM_WARN_ONCE(!pte_young(pte),
		     "%s: racy access flag clearing: 0x%016llx -> 0x%016llx",
		     __func__, pte_val(old_pte), pte_val(pte));
	VM_WARN_ONCE(pte_write(old_pte) && !pte_dirty(pte),
		     "%s: racy dirty state clearing: 0x%016llx -> 0x%016llx",
		     __func__, pte_val(old_pte), pte_val(pte));
}

static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
			      pte_t *ptep, pte_t pte)
{
	if (pte_present(pte) && pte_user_exec(pte) && !pte_special(pte))
		__sync_icache_dcache(pte);

323 324 325 326
	if (system_supports_mte() &&
	    pte_present(pte) && pte_tagged(pte) && !pte_special(pte))
		mte_sync_tags(ptep, pte);

327
	__check_racy_pte_update(mm, ptep, pte);
328

C
Catalin Marinas 已提交
329 330 331 332 333 334
	set_pte(ptep, pte);
}

/*
 * Huge pte definitions.
 */
S
Steve Capper 已提交
335 336 337 338 339
#define pte_mkhuge(pte)		(__pte(pte_val(pte) & ~PTE_TABLE_BIT))

/*
 * Hugetlb definitions.
 */
340
#define HUGE_MAX_HSTATE		4
S
Steve Capper 已提交
341 342 343 344
#define HPAGE_SHIFT		PMD_SHIFT
#define HPAGE_SIZE		(_AC(1, UL) << HPAGE_SHIFT)
#define HPAGE_MASK		(~(HPAGE_SIZE - 1))
#define HUGETLB_PAGE_ORDER	(HPAGE_SHIFT - PAGE_SHIFT)
C
Catalin Marinas 已提交
345

346 347 348 349 350
static inline pte_t pgd_pte(pgd_t pgd)
{
	return __pte(pgd_val(pgd));
}

351 352 353 354 355
static inline pte_t p4d_pte(p4d_t p4d)
{
	return __pte(p4d_val(p4d));
}

S
Steve Capper 已提交
356 357 358 359 360
static inline pte_t pud_pte(pud_t pud)
{
	return __pte(pud_val(pud));
}

361 362 363 364 365
static inline pud_t pte_pud(pte_t pte)
{
	return __pud(pte_val(pte));
}

S
Steve Capper 已提交
366 367 368 369 370
static inline pmd_t pud_pmd(pud_t pud)
{
	return __pmd(pud_val(pud));
}

371 372 373 374
static inline pte_t pmd_pte(pmd_t pmd)
{
	return __pte(pmd_val(pmd));
}
S
Steve Capper 已提交
375

376 377 378 379
static inline pmd_t pte_pmd(pte_t pte)
{
	return __pmd(pte_val(pte));
}
S
Steve Capper 已提交
380

381
static inline pgprot_t mk_pud_sect_prot(pgprot_t prot)
382
{
383 384 385 386
	return __pgprot((pgprot_val(prot) & ~PUD_TABLE_BIT) | PUD_TYPE_SECT);
}

static inline pgprot_t mk_pmd_sect_prot(pgprot_t prot)
387
{
388
	return __pgprot((pgprot_val(prot) & ~PMD_TABLE_BIT) | PMD_TYPE_SECT);
389 390
}

391 392
#ifdef CONFIG_NUMA_BALANCING
/*
393
 * See the comment in include/linux/pgtable.h
394 395 396 397 398 399 400 401 402 403 404 405
 */
static inline int pte_protnone(pte_t pte)
{
	return (pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)) == PTE_PROT_NONE;
}

static inline int pmd_protnone(pmd_t pmd)
{
	return pte_protnone(pmd_pte(pmd));
}
#endif

406 407 408 409 410 411 412
#define pmd_present_invalid(pmd)     (!!(pmd_val(pmd) & PMD_PRESENT_INVALID))

static inline int pmd_present(pmd_t pmd)
{
	return pte_present(pmd_pte(pmd)) || pmd_present_invalid(pmd);
}

S
Steve Capper 已提交
413 414 415 416 417
/*
 * THP definitions.
 */

#ifdef CONFIG_TRANSPARENT_HUGEPAGE
418 419 420 421
static inline int pmd_trans_huge(pmd_t pmd)
{
	return pmd_val(pmd) && pmd_present(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT);
}
S
Steve Capper 已提交
422
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
S
Steve Capper 已提交
423

424
#define pmd_dirty(pmd)		pte_dirty(pmd_pte(pmd))
425
#define pmd_young(pmd)		pte_young(pmd_pte(pmd))
426
#define pmd_valid(pmd)		pte_valid(pmd_pte(pmd))
427 428 429
#define pmd_wrprotect(pmd)	pte_pmd(pte_wrprotect(pmd_pte(pmd)))
#define pmd_mkold(pmd)		pte_pmd(pte_mkold(pmd_pte(pmd)))
#define pmd_mkwrite(pmd)	pte_pmd(pte_mkwrite(pmd_pte(pmd)))
430
#define pmd_mkclean(pmd)	pte_pmd(pte_mkclean(pmd_pte(pmd)))
431 432
#define pmd_mkdirty(pmd)	pte_pmd(pte_mkdirty(pmd_pte(pmd)))
#define pmd_mkyoung(pmd)	pte_pmd(pte_mkyoung(pmd_pte(pmd)))
433 434 435 436 437 438 439 440

static inline pmd_t pmd_mkinvalid(pmd_t pmd)
{
	pmd = set_pmd_bit(pmd, __pgprot(PMD_PRESENT_INVALID));
	pmd = clear_pmd_bit(pmd, __pgprot(PMD_SECT_VALID));

	return pmd;
}
S
Steve Capper 已提交
441

442 443
#define pmd_thp_or_huge(pmd)	(pmd_huge(pmd) || pmd_trans_huge(pmd))

444
#define pmd_write(pmd)		pte_write(pmd_pte(pmd))
S
Steve Capper 已提交
445 446 447

#define pmd_mkhuge(pmd)		(__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT))

448 449 450
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define pmd_devmap(pmd)		pte_devmap(pmd_pte(pmd))
#endif
451 452 453 454
static inline pmd_t pmd_mkdevmap(pmd_t pmd)
{
	return pte_pmd(set_pte_bit(pmd_pte(pmd), __pgprot(PTE_DEVMAP)));
}
455

456 457 458 459
#define __pmd_to_phys(pmd)	__pte_to_phys(pmd_pte(pmd))
#define __phys_to_pmd_val(phys)	__phys_to_pte_val(phys)
#define pmd_pfn(pmd)		((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT)
#define pfn_pmd(pfn,prot)	__pmd(__phys_to_pmd_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
S
Steve Capper 已提交
460 461
#define mk_pmd(page,prot)	pfn_pmd(page_to_pfn(page),prot)

462
#define pud_young(pud)		pte_young(pud_pte(pud))
463
#define pud_mkyoung(pud)	pte_pud(pte_mkyoung(pud_pte(pud)))
S
Steve Capper 已提交
464
#define pud_write(pud)		pte_write(pud_pte(pud))
465

466 467
#define pud_mkhuge(pud)		(__pud(pud_val(pud) & ~PUD_TABLE_BIT))

468 469 470 471
#define __pud_to_phys(pud)	__pte_to_phys(pud_pte(pud))
#define __phys_to_pud_val(phys)	__phys_to_pte_val(phys)
#define pud_pfn(pud)		((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT)
#define pfn_pud(pfn,prot)	__pud(__phys_to_pud_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
S
Steve Capper 已提交
472

473
#define set_pmd_at(mm, addr, pmdp, pmd)	set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd))
474
#define set_pud_at(mm, addr, pudp, pud)	set_pte_at(mm, addr, (pte_t *)pudp, pud_pte(pud))
S
Steve Capper 已提交
475

476 477 478
#define __p4d_to_phys(p4d)	__pte_to_phys(p4d_pte(p4d))
#define __phys_to_p4d_val(phys)	__phys_to_pte_val(phys)

479 480 481
#define __pgd_to_phys(pgd)	__pte_to_phys(pgd_pte(pgd))
#define __phys_to_pgd_val(phys)	__phys_to_pte_val(phys)

482 483 484
#define __pgprot_modify(prot,mask,bits) \
	__pgprot((pgprot_val(prot) & ~(mask)) | (bits))

485
#define pgprot_nx(prot) \
486
	__pgprot_modify(prot, PTE_MAYBE_GP, PTE_PXN)
487

C
Catalin Marinas 已提交
488 489 490 491
/*
 * Mark the prot value as uncacheable and unbufferable.
 */
#define pgprot_noncached(prot) \
492
	__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN)
C
Catalin Marinas 已提交
493
#define pgprot_writecombine(prot) \
494
	__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
495 496
#define pgprot_device(prot) \
	__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN)
497 498 499
#define pgprot_tagged(prot) \
	__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_TAGGED))
#define pgprot_mhp	pgprot_tagged
500 501 502 503 504 505 506 507
/*
 * DMA allocations for non-coherent devices use what the Arm architecture calls
 * "Normal non-cacheable" memory, which permits speculation, unaligned accesses
 * and merging of writes.  This is different from "Device-nGnR[nE]" memory which
 * is intended for MMIO and thus forbids speculation, preserves access size,
 * requires strict alignment and can also force write responses to come from the
 * endpoint.
 */
508 509 510 511
#define pgprot_dmacoherent(prot) \
	__pgprot_modify(prot, PTE_ATTRINDX_MASK, \
			PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)

C
Catalin Marinas 已提交
512 513 514 515 516 517 518
#define __HAVE_PHYS_MEM_ACCESS_PROT
struct file;
extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
				     unsigned long size, pgprot_t vma_prot);

#define pmd_none(pmd)		(!pmd_val(pmd))

519
#define pmd_bad(pmd)		(!(pmd_val(pmd) & PMD_TABLE_BIT))
C
Catalin Marinas 已提交
520

521 522 523 524
#define pmd_table(pmd)		((pmd_val(pmd) & PMD_TYPE_MASK) == \
				 PMD_TYPE_TABLE)
#define pmd_sect(pmd)		((pmd_val(pmd) & PMD_TYPE_MASK) == \
				 PMD_TYPE_SECT)
525
#define pmd_leaf(pmd)		pmd_sect(pmd)
526

527
#if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS < 3
528 529
static inline bool pud_sect(pud_t pud) { return false; }
static inline bool pud_table(pud_t pud) { return true; }
530 531 532
#else
#define pud_sect(pud)		((pud_val(pud) & PUD_TYPE_MASK) == \
				 PUD_TYPE_SECT)
533 534
#define pud_table(pud)		((pud_val(pud) & PUD_TYPE_MASK) == \
				 PUD_TYPE_TABLE)
535
#endif
536

537 538 539 540
extern pgd_t init_pg_dir[PTRS_PER_PGD];
extern pgd_t init_pg_end[];
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
541
extern pgd_t idmap_pg_end[];
542
extern pgd_t tramp_pg_dir[PTRS_PER_PGD];
543
extern pgd_t reserved_pg_dir[PTRS_PER_PGD];
544 545 546 547 548 549 550 551 552

extern void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd);

static inline bool in_swapper_pgdir(void *addr)
{
	return ((unsigned long)addr & PAGE_MASK) ==
	        ((unsigned long)swapper_pg_dir & PAGE_MASK);
}

C
Catalin Marinas 已提交
553 554
static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
{
555 556
#ifdef __PAGETABLE_PMD_FOLDED
	if (in_swapper_pgdir(pmdp)) {
557 558 559
		set_swapper_pgd((pgd_t *)pmdp, __pgd(pmd_val(pmd)));
		return;
	}
560
#endif /* __PAGETABLE_PMD_FOLDED */
561

562
	WRITE_ONCE(*pmdp, pmd);
563

564
	if (pmd_valid(pmd)) {
565
		dsb(ishst);
566 567
		isb();
	}
C
Catalin Marinas 已提交
568 569 570 571 572 573 574
}

static inline void pmd_clear(pmd_t *pmdp)
{
	set_pmd(pmdp, __pmd(0));
}

575
static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
C
Catalin Marinas 已提交
576
{
577
	return __pmd_to_phys(pmd);
C
Catalin Marinas 已提交
578 579
}

580 581 582 583
static inline unsigned long pmd_page_vaddr(pmd_t pmd)
{
	return (unsigned long)__va(pmd_page_paddr(pmd));
}
584

M
Mark Rutland 已提交
585
/* Find an entry in the third-level page table. */
586
#define pte_offset_phys(dir,addr)	(pmd_page_paddr(READ_ONCE(*(dir))) + pte_index(addr) * sizeof(pte_t))
M
Mark Rutland 已提交
587

588 589 590 591
#define pte_set_fixmap(addr)		((pte_t *)set_fixmap_offset(FIX_PTE, addr))
#define pte_set_fixmap_offset(pmd, addr)	pte_set_fixmap(pte_offset_phys(pmd, addr))
#define pte_clear_fixmap()		clear_fixmap(FIX_PTE)

592
#define pmd_page(pmd)			phys_to_page(__pmd_to_phys(pmd))
C
Catalin Marinas 已提交
593

594 595 596
/* use ONLY for statically allocated translation tables */
#define pte_offset_kimg(dir,addr)	((pte_t *)__phys_to_kimg(pte_offset_phys((dir), (addr))))

C
Catalin Marinas 已提交
597 598 599 600 601 602
/*
 * Conversion functions: convert a page and protection to a page entry,
 * and a page entry and page directory to the page they refer to.
 */
#define mk_pte(page,prot)	pfn_pte(page_to_pfn(page),prot)

603
#if CONFIG_PGTABLE_LEVELS > 2
C
Catalin Marinas 已提交
604

605 606
#define pmd_ERROR(e)	\
	pr_err("%s:%d: bad pmd %016llx.\n", __FILE__, __LINE__, pmd_val(e))
607

C
Catalin Marinas 已提交
608
#define pud_none(pud)		(!pud_val(pud))
609
#define pud_bad(pud)		(!(pud_val(pud) & PUD_TABLE_BIT))
610
#define pud_present(pud)	pte_present(pud_pte(pud))
611
#define pud_leaf(pud)		pud_sect(pud)
612
#define pud_valid(pud)		pte_valid(pud_pte(pud))
C
Catalin Marinas 已提交
613 614 615

static inline void set_pud(pud_t *pudp, pud_t pud)
{
616 617
#ifdef __PAGETABLE_PUD_FOLDED
	if (in_swapper_pgdir(pudp)) {
618 619 620
		set_swapper_pgd((pgd_t *)pudp, __pgd(pud_val(pud)));
		return;
	}
621
#endif /* __PAGETABLE_PUD_FOLDED */
622

623
	WRITE_ONCE(*pudp, pud);
624

625
	if (pud_valid(pud)) {
626
		dsb(ishst);
627 628
		isb();
	}
C
Catalin Marinas 已提交
629 630 631 632 633 634 635
}

static inline void pud_clear(pud_t *pudp)
{
	set_pud(pudp, __pud(0));
}

636
static inline phys_addr_t pud_page_paddr(pud_t pud)
C
Catalin Marinas 已提交
637
{
638
	return __pud_to_phys(pud);
C
Catalin Marinas 已提交
639 640
}

641
static inline pmd_t *pud_pgtable(pud_t pud)
642
{
643
	return (pmd_t *)__va(pud_page_paddr(pud));
644
}
645

646
/* Find an entry in the second-level page table. */
647
#define pmd_offset_phys(dir, addr)	(pud_page_paddr(READ_ONCE(*(dir))) + pmd_index(addr) * sizeof(pmd_t))
648

649 650 651
#define pmd_set_fixmap(addr)		((pmd_t *)set_fixmap_offset(FIX_PMD, addr))
#define pmd_set_fixmap_offset(pud, addr)	pmd_set_fixmap(pmd_offset_phys(pud, addr))
#define pmd_clear_fixmap()		clear_fixmap(FIX_PMD)
652

653
#define pud_page(pud)			phys_to_page(__pud_to_phys(pud))
S
Steve Capper 已提交
654

655 656 657
/* use ONLY for statically allocated translation tables */
#define pmd_offset_kimg(dir,addr)	((pmd_t *)__phys_to_kimg(pmd_offset_phys((dir), (addr))))

658 659 660 661
#else

#define pud_page_paddr(pud)	({ BUILD_BUG(); 0; })

662 663 664 665 666
/* Match pmd_offset folding in <asm/generic/pgtable-nopmd.h> */
#define pmd_set_fixmap(addr)		NULL
#define pmd_set_fixmap_offset(pudp, addr)	((pmd_t *)pudp)
#define pmd_clear_fixmap()

667 668
#define pmd_offset_kimg(dir,addr)	((pmd_t *)dir)

669
#endif	/* CONFIG_PGTABLE_LEVELS > 2 */
C
Catalin Marinas 已提交
670

671
#if CONFIG_PGTABLE_LEVELS > 3
672

673 674
#define pud_ERROR(e)	\
	pr_err("%s:%d: bad pud %016llx.\n", __FILE__, __LINE__, pud_val(e))
675

676 677 678
#define p4d_none(p4d)		(!p4d_val(p4d))
#define p4d_bad(p4d)		(!(p4d_val(p4d) & 2))
#define p4d_present(p4d)	(p4d_val(p4d))
679

680
static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
681
{
682 683
	if (in_swapper_pgdir(p4dp)) {
		set_swapper_pgd((pgd_t *)p4dp, __pgd(p4d_val(p4d)));
684 685 686
		return;
	}

687
	WRITE_ONCE(*p4dp, p4d);
688
	dsb(ishst);
689
	isb();
690 691
}

692
static inline void p4d_clear(p4d_t *p4dp)
693
{
694
	set_p4d(p4dp, __p4d(0));
695 696
}

697
static inline phys_addr_t p4d_page_paddr(p4d_t p4d)
698
{
699
	return __p4d_to_phys(p4d);
700 701
}

702
static inline pud_t *p4d_pgtable(p4d_t p4d)
703
{
704
	return (pud_t *)__va(p4d_page_paddr(p4d));
705
}
706

707
/* Find an entry in the frst-level page table. */
708
#define pud_offset_phys(dir, addr)	(p4d_page_paddr(READ_ONCE(*(dir))) + pud_index(addr) * sizeof(pud_t))
709

710
#define pud_set_fixmap(addr)		((pud_t *)set_fixmap_offset(FIX_PUD, addr))
711
#define pud_set_fixmap_offset(p4d, addr)	pud_set_fixmap(pud_offset_phys(p4d, addr))
712
#define pud_clear_fixmap()		clear_fixmap(FIX_PUD)
713

714
#define p4d_page(p4d)		pfn_to_page(__phys_to_pfn(__p4d_to_phys(p4d)))
715

716 717 718
/* use ONLY for statically allocated translation tables */
#define pud_offset_kimg(dir,addr)	((pud_t *)__phys_to_kimg(pud_offset_phys((dir), (addr))))

719 720
#else

721
#define p4d_page_paddr(p4d)	({ BUILD_BUG(); 0;})
722 723
#define pgd_page_paddr(pgd)	({ BUILD_BUG(); 0;})

724 725 726 727 728
/* Match pud_offset folding in <asm/generic/pgtable-nopud.h> */
#define pud_set_fixmap(addr)		NULL
#define pud_set_fixmap_offset(pgdp, addr)	((pud_t *)pgdp)
#define pud_clear_fixmap()

729 730
#define pud_offset_kimg(dir,addr)	((pud_t *)dir)

731
#endif  /* CONFIG_PGTABLE_LEVELS > 3 */
732

733 734
#define pgd_ERROR(e)	\
	pr_err("%s:%d: bad pgd %016llx.\n", __FILE__, __LINE__, pgd_val(e))
735

736 737 738
#define pgd_set_fixmap(addr)	((pgd_t *)set_fixmap_offset(FIX_PGD, addr))
#define pgd_clear_fixmap()	clear_fixmap(FIX_PGD)

C
Catalin Marinas 已提交
739 740
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
741 742 743 744
	/*
	 * Normal and Normal-Tagged are two different memory types and indices
	 * in MAIR_EL1. The mask below has to include PTE_ATTRINDX_MASK.
	 */
745
	const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
746 747
			      PTE_PROT_NONE | PTE_VALID | PTE_WRITE | PTE_GP |
			      PTE_ATTRINDX_MASK;
748 749
	/* preserve the hardware dirty information */
	if (pte_hw_dirty(pte))
750
		pte = pte_mkdirty(pte);
C
Catalin Marinas 已提交
751 752 753 754
	pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
	return pte;
}

755 756 757 758 759
static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
{
	return pte_pmd(pte_modify(pmd_pte(pmd), newprot));
}

760 761 762 763 764
#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
extern int ptep_set_access_flags(struct vm_area_struct *vma,
				 unsigned long address, pte_t *ptep,
				 pte_t entry, int dirty);

765 766 767 768 769 770 771 772
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
					unsigned long address, pmd_t *pmdp,
					pmd_t entry, int dirty)
{
	return ptep_set_access_flags(vma, address, (pte_t *)pmdp, pmd_pte(entry), dirty);
}
773 774 775 776 777 778 779 780 781 782

static inline int pud_devmap(pud_t pud)
{
	return 0;
}

static inline int pgd_devmap(pgd_t pgd)
{
	return 0;
}
783 784
#endif

785 786 787 788
/*
 * Atomic pte/pmd modifications.
 */
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
789
static inline int __ptep_test_and_clear_young(pte_t *ptep)
790
{
791
	pte_t old_pte, pte;
792

793 794 795 796 797 798 799
	pte = READ_ONCE(*ptep);
	do {
		old_pte = pte;
		pte = pte_mkold(pte);
		pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
					       pte_val(old_pte), pte_val(pte));
	} while (pte_val(pte) != pte_val(old_pte));
800

801
	return pte_young(pte);
802 803
}

804 805 806 807 808 809 810
static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
					    unsigned long address,
					    pte_t *ptep)
{
	return __ptep_test_and_clear_young(ptep);
}

811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831
#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
					 unsigned long address, pte_t *ptep)
{
	int young = ptep_test_and_clear_young(vma, address, ptep);

	if (young) {
		/*
		 * We can elide the trailing DSB here since the worst that can
		 * happen is that a CPU continues to use the young entry in its
		 * TLB and we mistakenly reclaim the associated page. The
		 * window for such an event is bounded by the next
		 * context-switch, which provides a DSB to complete the TLB
		 * invalidation.
		 */
		flush_tlb_page_nosync(vma, address);
	}

	return young;
}

832 833 834 835 836 837 838 839 840 841 842 843 844 845
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
					    unsigned long address,
					    pmd_t *pmdp)
{
	return ptep_test_and_clear_young(vma, address, (pte_t *)pmdp);
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */

#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
				       unsigned long address, pte_t *ptep)
{
846
	return __pte(xchg_relaxed(&pte_val(*ptep), 0));
847 848 849
}

#ifdef CONFIG_TRANSPARENT_HUGEPAGE
850 851 852
#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
					    unsigned long address, pmd_t *pmdp)
853 854 855 856 857 858
{
	return pte_pmd(ptep_get_and_clear(mm, address, (pte_t *)pmdp));
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */

/*
859 860
 * ptep_set_wrprotect - mark read-only while trasferring potential hardware
 * dirty status (PTE_DBM && !PTE_RDONLY) to the software PTE_DIRTY bit.
861 862 863 864
 */
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
{
865 866 867 868 869 870 871 872 873
	pte_t old_pte, pte;

	pte = READ_ONCE(*ptep);
	do {
		old_pte = pte;
		pte = pte_wrprotect(pte);
		pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
					       pte_val(old_pte), pte_val(pte));
	} while (pte_val(pte) != pte_val(old_pte));
874 875 876 877 878 879 880 881 882
}

#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define __HAVE_ARCH_PMDP_SET_WRPROTECT
static inline void pmdp_set_wrprotect(struct mm_struct *mm,
				      unsigned long address, pmd_t *pmdp)
{
	ptep_set_wrprotect(mm, address, (pte_t *)pmdp);
}
883 884 885 886 887 888 889

#define pmdp_establish pmdp_establish
static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
		unsigned long address, pmd_t *pmdp, pmd_t pmd)
{
	return __pmd(xchg_relaxed(&pmd_val(*pmdp), pmd_val(pmd)));
}
890 891
#endif

C
Catalin Marinas 已提交
892 893
/*
 * Encode and decode a swap entry:
894
 *	bits 0-1:	present (must be zero)
895 896
 *	bits 2-7:	swap type
 *	bits 8-57:	swap offset
897
 *	bit  58:	PTE_PROT_NONE (must be zero)
C
Catalin Marinas 已提交
898
 */
899
#define __SWP_TYPE_SHIFT	2
C
Catalin Marinas 已提交
900
#define __SWP_TYPE_BITS		6
901
#define __SWP_OFFSET_BITS	50
C
Catalin Marinas 已提交
902 903
#define __SWP_TYPE_MASK		((1 << __SWP_TYPE_BITS) - 1)
#define __SWP_OFFSET_SHIFT	(__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
904
#define __SWP_OFFSET_MASK	((1UL << __SWP_OFFSET_BITS) - 1)
C
Catalin Marinas 已提交
905 906

#define __swp_type(x)		(((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
907
#define __swp_offset(x)		(((x).val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK)
C
Catalin Marinas 已提交
908 909 910 911 912
#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })

#define __pte_to_swp_entry(pte)	((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(swp)	((pte_t) { (swp).val })

913 914 915 916 917
#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
#define __pmd_to_swp_entry(pmd)		((swp_entry_t) { pmd_val(pmd) })
#define __swp_entry_to_pmd(swp)		__pmd((swp).val)
#endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */

C
Catalin Marinas 已提交
918 919
/*
 * Ensure that there are not more swap files than can be encoded in the kernel
920
 * PTEs.
C
Catalin Marinas 已提交
921 922 923 924 925
 */
#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)

extern int kern_addr_valid(unsigned long addr);

926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957
#ifdef CONFIG_ARM64_MTE

#define __HAVE_ARCH_PREPARE_TO_SWAP
static inline int arch_prepare_to_swap(struct page *page)
{
	if (system_supports_mte())
		return mte_save_tags(page);
	return 0;
}

#define __HAVE_ARCH_SWAP_INVALIDATE
static inline void arch_swap_invalidate_page(int type, pgoff_t offset)
{
	if (system_supports_mte())
		mte_invalidate_tags(type, offset);
}

static inline void arch_swap_invalidate_area(int type)
{
	if (system_supports_mte())
		mte_invalidate_tags_area(type);
}

#define __HAVE_ARCH_SWAP_RESTORE
static inline void arch_swap_restore(swp_entry_t entry, struct page *page)
{
	if (system_supports_mte() && mte_restore_tags(entry, page))
		set_bit(PG_mte_tagged, &page->flags);
}

#endif /* CONFIG_ARM64_MTE */

958 959 960 961 962 963 964
/*
 * On AArch64, the cache coherency is handled via the set_pte_at() function.
 */
static inline void update_mmu_cache(struct vm_area_struct *vma,
				    unsigned long addr, pte_t *ptep)
{
	/*
965 966 967
	 * We don't do anything here, so there's a very small chance of
	 * us retaking a user fault which we just fixed up. The alternative
	 * is doing a dsb(ishst), but that penalises the fastpath.
968 969 970 971 972
	 */
}

#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)

973 974 975 976 977 978
#ifdef CONFIG_ARM64_PA_BITS_52
#define phys_to_ttbr(addr)	(((addr) | ((addr) >> 46)) & TTBR_BADDR_MASK_52)
#else
#define phys_to_ttbr(addr)	(addr)
#endif

979 980 981 982 983 984 985 986 987 988 989 990 991 992
/*
 * On arm64 without hardware Access Flag, copying from user will fail because
 * the pte is old and cannot be marked young. So we always end up with zeroed
 * page after fork() + CoW for pfn mappings. We don't always have a
 * hardware-managed access flag on arm64.
 */
static inline bool arch_faults_on_old_pte(void)
{
	WARN_ON(preemptible());

	return !cpu_has_hw_af();
}
#define arch_faults_on_old_pte arch_faults_on_old_pte

993 994 995 996 997 998 999 1000 1001 1002 1003 1004
static inline pgprot_t arch_filter_pgprot(pgprot_t prot)
{
	if (cpus_have_const_cap(ARM64_HAS_EPAN))
		return prot;

	if (pgprot_val(prot) != pgprot_val(PAGE_EXECONLY))
		return prot;

	return PAGE_READONLY_EXEC;
}


C
Catalin Marinas 已提交
1005 1006 1007
#endif /* !__ASSEMBLY__ */

#endif /* __ASM_PGTABLE_H */