pgtable.h 28.6 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0-only */
C
Catalin Marinas 已提交
2 3 4 5 6 7
/*
 * Copyright (C) 2012 ARM Ltd.
 */
#ifndef __ASM_PGTABLE_H
#define __ASM_PGTABLE_H

8
#include <asm/bug.h>
C
Catalin Marinas 已提交
9 10 11
#include <asm/proc-fns.h>

#include <asm/memory.h>
12
#include <asm/mte.h>
C
Catalin Marinas 已提交
13
#include <asm/pgtable-hwdef.h>
14
#include <asm/pgtable-prot.h>
15
#include <asm/tlbflush.h>
C
Catalin Marinas 已提交
16 17

/*
18
 * VMALLOC range.
19
 *
20
 * VMALLOC_START: beginning of the kernel vmalloc space
21
 * VMALLOC_END: extends to the available space below vmemmap, PCI I/O space
22
 *	and fixed mappings
C
Catalin Marinas 已提交
23
 */
24
#define VMALLOC_START		(MODULES_END)
25
#define VMALLOC_END		(VMEMMAP_START - SZ_256M)
C
Catalin Marinas 已提交
26

27 28
#define vmemmap			((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT))

29
#define FIRST_USER_ADDRESS	0UL
C
Catalin Marinas 已提交
30 31

#ifndef __ASSEMBLY__
32

33
#include <asm/cmpxchg.h>
34
#include <asm/fixmap.h>
35
#include <linux/mmdebug.h>
36 37
#include <linux/mm_types.h>
#include <linux/sched.h>
38

39 40 41 42 43 44 45 46 47 48
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE

/* Set stride and tlb_level in flush_*_tlb_range */
#define flush_pmd_tlb_range(vma, addr, end)	\
	__flush_tlb_range(vma, addr, end, PMD_SIZE, false, 2)
#define flush_pud_tlb_range(vma, addr, end)	\
	__flush_tlb_range(vma, addr, end, PUD_SIZE, false, 1)
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */

49 50 51 52 53 54 55 56
/*
 * Outside of a few very special situations (e.g. hibernation), we always
 * use broadcast TLB invalidation instructions, therefore a spurious page
 * fault on one CPU which has been handled concurrently by another CPU
 * does not need to perform additional invalidation.
 */
#define flush_tlb_fix_spurious_fault(vma, address) do { } while (0)

C
Catalin Marinas 已提交
57 58 59 60
/*
 * ZERO_PAGE is a global shared page that is always zero: used
 * for zero-mapped memory areas etc..
 */
61
extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
62
#define ZERO_PAGE(vaddr)	phys_to_page(__pa_symbol(empty_zero_page))
C
Catalin Marinas 已提交
63

64 65
#define pte_ERROR(e)	\
	pr_err("%s:%d: bad pte %016llx.\n", __FILE__, __LINE__, pte_val(e))
66

67 68 69 70 71 72 73 74 75 76 77 78
/*
 * Macros to convert between a physical address and its placement in a
 * page table entry, taking care of 52-bit addresses.
 */
#ifdef CONFIG_ARM64_PA_BITS_52
#define __pte_to_phys(pte)	\
	((pte_val(pte) & PTE_ADDR_LOW) | ((pte_val(pte) & PTE_ADDR_HIGH) << 36))
#define __phys_to_pte_val(phys)	(((phys) | ((phys) >> 36)) & PTE_ADDR_MASK)
#else
#define __pte_to_phys(pte)	(pte_val(pte) & PTE_ADDR_MASK)
#define __phys_to_pte_val(phys)	(phys)
#endif
C
Catalin Marinas 已提交
79

80 81 82
#define pte_pfn(pte)		(__pte_to_phys(pte) >> PAGE_SHIFT)
#define pfn_pte(pfn,prot)	\
	__pte(__phys_to_pte_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
C
Catalin Marinas 已提交
83 84 85 86

#define pte_none(pte)		(!pte_val(pte))
#define pte_clear(mm,addr,ptep)	set_pte(ptep, __pte(0))
#define pte_page(pte)		(pfn_to_page(pte_pfn(pte)))
87

C
Catalin Marinas 已提交
88 89 90
/*
 * The following only work if pte_present(). Undefined behaviour otherwise.
 */
91 92 93 94
#define pte_present(pte)	(!!(pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)))
#define pte_young(pte)		(!!(pte_val(pte) & PTE_AF))
#define pte_special(pte)	(!!(pte_val(pte) & PTE_SPECIAL))
#define pte_write(pte)		(!!(pte_val(pte) & PTE_WRITE))
95
#define pte_user_exec(pte)	(!(pte_val(pte) & PTE_UXN))
96
#define pte_cont(pte)		(!!(pte_val(pte) & PTE_CONT))
97
#define pte_devmap(pte)		(!!(pte_val(pte) & PTE_DEVMAP))
98 99
#define pte_tagged(pte)		((pte_val(pte) & PTE_ATTRINDX_MASK) == \
				 PTE_ATTRINDX(MT_NORMAL_TAGGED))
C
Catalin Marinas 已提交
100

101 102 103 104 105 106 107 108 109 110
#define pte_cont_addr_end(addr, end)						\
({	unsigned long __boundary = ((addr) + CONT_PTE_SIZE) & CONT_PTE_MASK;	\
	(__boundary - 1 < (end) - 1) ? __boundary : (end);			\
})

#define pmd_cont_addr_end(addr, end)						\
({	unsigned long __boundary = ((addr) + CONT_PMD_SIZE) & CONT_PMD_MASK;	\
	(__boundary - 1 < (end) - 1) ? __boundary : (end);			\
})

111
#define pte_hw_dirty(pte)	(pte_write(pte) && !(pte_val(pte) & PTE_RDONLY))
112 113 114
#define pte_sw_dirty(pte)	(!!(pte_val(pte) & PTE_DIRTY))
#define pte_dirty(pte)		(pte_sw_dirty(pte) || pte_hw_dirty(pte))

115
#define pte_valid(pte)		(!!(pte_val(pte) & PTE_VALID))
116 117 118 119
/*
 * Execute-only user mappings do not have the PTE_USER bit set. All valid
 * kernel mappings have the PTE_UXN bit set.
 */
120
#define pte_valid_not_user(pte) \
121
	((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN))
122 123 124 125
/*
 * Could the pte be present in the TLB? We must check mm_tlb_flush_pending
 * so that we don't erroneously return false for pages that have been
 * remapped as PROT_NONE but are yet to be flushed from the TLB.
W
Will Deacon 已提交
126 127 128
 * Note that we can't make any assumptions based on the state of the access
 * flag, since ptep_clear_flush_young() elides a DSB when invalidating the
 * TLB.
129 130
 */
#define pte_accessible(mm, pte)	\
W
Will Deacon 已提交
131
	(mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte))
C
Catalin Marinas 已提交
132

133
/*
134 135 136 137 138
 * p??_access_permitted() is true for valid user mappings (PTE_USER
 * bit set, subject to the write permission check). For execute-only
 * mappings, like PROT_EXEC with EPAN (both PTE_USER and PTE_UXN bits
 * not set) must return false. PROT_NONE mappings do not have the
 * PTE_VALID bit set.
139 140
 */
#define pte_access_permitted(pte, write) \
141
	(((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER)) && (!(write) || pte_write(pte)))
142 143 144 145 146
#define pmd_access_permitted(pmd, write) \
	(pte_access_permitted(pmd_pte(pmd), (write)))
#define pud_access_permitted(pud, write) \
	(pte_access_permitted(pud_pte(pud), (write)))

147
static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
148
{
149
	pte_val(pte) &= ~pgprot_val(prot);
150 151 152
	return pte;
}

153
static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot)
154
{
155
	pte_val(pte) |= pgprot_val(prot);
156 157 158
	return pte;
}

159 160 161 162 163 164 165 166 167 168 169 170
static inline pmd_t clear_pmd_bit(pmd_t pmd, pgprot_t prot)
{
	pmd_val(pmd) &= ~pgprot_val(prot);
	return pmd;
}

static inline pmd_t set_pmd_bit(pmd_t pmd, pgprot_t prot)
{
	pmd_val(pmd) |= pgprot_val(prot);
	return pmd;
}

171 172
static inline pte_t pte_mkwrite(pte_t pte)
{
173 174 175
	pte = set_pte_bit(pte, __pgprot(PTE_WRITE));
	pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY));
	return pte;
176 177
}

178 179
static inline pte_t pte_mkclean(pte_t pte)
{
180 181 182 183
	pte = clear_pte_bit(pte, __pgprot(PTE_DIRTY));
	pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));

	return pte;
184 185 186 187
}

static inline pte_t pte_mkdirty(pte_t pte)
{
188 189 190 191 192 193
	pte = set_pte_bit(pte, __pgprot(PTE_DIRTY));

	if (pte_write(pte))
		pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY));

	return pte;
194 195
}

196 197 198 199 200 201 202 203 204 205 206 207 208 209
static inline pte_t pte_wrprotect(pte_t pte)
{
	/*
	 * If hardware-dirty (PTE_WRITE/DBM bit set and PTE_RDONLY
	 * clear), set the PTE_DIRTY bit.
	 */
	if (pte_hw_dirty(pte))
		pte = pte_mkdirty(pte);

	pte = clear_pte_bit(pte, __pgprot(PTE_WRITE));
	pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
	return pte;
}

210 211
static inline pte_t pte_mkold(pte_t pte)
{
212
	return clear_pte_bit(pte, __pgprot(PTE_AF));
213 214 215 216
}

static inline pte_t pte_mkyoung(pte_t pte)
{
217
	return set_pte_bit(pte, __pgprot(PTE_AF));
218 219 220 221
}

static inline pte_t pte_mkspecial(pte_t pte)
{
222
	return set_pte_bit(pte, __pgprot(PTE_SPECIAL));
223
}
C
Catalin Marinas 已提交
224

225 226
static inline pte_t pte_mkcont(pte_t pte)
{
227 228
	pte = set_pte_bit(pte, __pgprot(PTE_CONT));
	return set_pte_bit(pte, __pgprot(PTE_TYPE_PAGE));
229 230 231 232 233 234 235
}

static inline pte_t pte_mknoncont(pte_t pte)
{
	return clear_pte_bit(pte, __pgprot(PTE_CONT));
}

236 237 238 239 240
static inline pte_t pte_mkpresent(pte_t pte)
{
	return set_pte_bit(pte, __pgprot(PTE_VALID));
}

241 242 243 244 245
static inline pmd_t pmd_mkcont(pmd_t pmd)
{
	return __pmd(pmd_val(pmd) | PMD_SECT_CONT);
}

246 247
static inline pte_t pte_mkdevmap(pte_t pte)
{
248
	return set_pte_bit(pte, __pgprot(PTE_DEVMAP | PTE_SPECIAL));
249 250
}

C
Catalin Marinas 已提交
251 252
static inline void set_pte(pte_t *ptep, pte_t pte)
{
253
	WRITE_ONCE(*ptep, pte);
254 255 256 257 258

	/*
	 * Only if the new pte is valid and kernel, otherwise TLB maintenance
	 * or update_mmu_cache() have the necessary barriers.
	 */
259
	if (pte_valid_not_user(pte)) {
260
		dsb(ishst);
261 262
		isb();
	}
C
Catalin Marinas 已提交
263 264
}

265
extern void __sync_icache_dcache(pte_t pteval);
C
Catalin Marinas 已提交
266

267 268 269 270 271 272 273 274 275 276 277 278 279
/*
 * PTE bits configuration in the presence of hardware Dirty Bit Management
 * (PTE_WRITE == PTE_DBM):
 *
 * Dirty  Writable | PTE_RDONLY  PTE_WRITE  PTE_DIRTY (sw)
 *   0      0      |   1           0          0
 *   0      1      |   1           1          0
 *   1      0      |   1           0          1
 *   1      1      |   0           1          x
 *
 * When hardware DBM is not present, the sofware PTE_DIRTY bit is updated via
 * the page fault mechanism. Checking the dirty status of a pte becomes:
 *
280
 *   PTE_DIRTY || (PTE_WRITE && !PTE_RDONLY)
281
 */
282 283 284

static inline void __check_racy_pte_update(struct mm_struct *mm, pte_t *ptep,
					   pte_t pte)
C
Catalin Marinas 已提交
285
{
286 287
	pte_t old_pte;

288 289 290 291 292 293 294 295 296
	if (!IS_ENABLED(CONFIG_DEBUG_VM))
		return;

	old_pte = READ_ONCE(*ptep);

	if (!pte_valid(old_pte) || !pte_valid(pte))
		return;
	if (mm != current->active_mm && atomic_read(&mm->mm_users) <= 1)
		return;
297

298
	/*
299 300 301
	 * Check for potential race with hardware updates of the pte
	 * (ptep_set_access_flags safely changes valid ptes without going
	 * through an invalid entry).
302
	 */
303 304 305 306 307 308 309 310 311 312 313 314 315 316
	VM_WARN_ONCE(!pte_young(pte),
		     "%s: racy access flag clearing: 0x%016llx -> 0x%016llx",
		     __func__, pte_val(old_pte), pte_val(pte));
	VM_WARN_ONCE(pte_write(old_pte) && !pte_dirty(pte),
		     "%s: racy dirty state clearing: 0x%016llx -> 0x%016llx",
		     __func__, pte_val(old_pte), pte_val(pte));
}

static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
			      pte_t *ptep, pte_t pte)
{
	if (pte_present(pte) && pte_user_exec(pte) && !pte_special(pte))
		__sync_icache_dcache(pte);

317 318 319 320
	if (system_supports_mte() &&
	    pte_present(pte) && pte_tagged(pte) && !pte_special(pte))
		mte_sync_tags(ptep, pte);

321
	__check_racy_pte_update(mm, ptep, pte);
322

C
Catalin Marinas 已提交
323 324 325 326 327 328
	set_pte(ptep, pte);
}

/*
 * Huge pte definitions.
 */
S
Steve Capper 已提交
329 330 331 332 333
#define pte_mkhuge(pte)		(__pte(pte_val(pte) & ~PTE_TABLE_BIT))

/*
 * Hugetlb definitions.
 */
334
#define HUGE_MAX_HSTATE		4
S
Steve Capper 已提交
335 336 337 338
#define HPAGE_SHIFT		PMD_SHIFT
#define HPAGE_SIZE		(_AC(1, UL) << HPAGE_SHIFT)
#define HPAGE_MASK		(~(HPAGE_SIZE - 1))
#define HUGETLB_PAGE_ORDER	(HPAGE_SHIFT - PAGE_SHIFT)
C
Catalin Marinas 已提交
339

340 341 342 343 344
static inline pte_t pgd_pte(pgd_t pgd)
{
	return __pte(pgd_val(pgd));
}

345 346 347 348 349
static inline pte_t p4d_pte(p4d_t p4d)
{
	return __pte(p4d_val(p4d));
}

S
Steve Capper 已提交
350 351 352 353 354
static inline pte_t pud_pte(pud_t pud)
{
	return __pte(pud_val(pud));
}

355 356 357 358 359
static inline pud_t pte_pud(pte_t pte)
{
	return __pud(pte_val(pte));
}

S
Steve Capper 已提交
360 361 362 363 364
static inline pmd_t pud_pmd(pud_t pud)
{
	return __pmd(pud_val(pud));
}

365 366 367 368
static inline pte_t pmd_pte(pmd_t pmd)
{
	return __pte(pmd_val(pmd));
}
S
Steve Capper 已提交
369

370 371 372 373
static inline pmd_t pte_pmd(pte_t pte)
{
	return __pmd(pte_val(pte));
}
S
Steve Capper 已提交
374

375
static inline pgprot_t mk_pud_sect_prot(pgprot_t prot)
376
{
377 378 379 380
	return __pgprot((pgprot_val(prot) & ~PUD_TABLE_BIT) | PUD_TYPE_SECT);
}

static inline pgprot_t mk_pmd_sect_prot(pgprot_t prot)
381
{
382
	return __pgprot((pgprot_val(prot) & ~PMD_TABLE_BIT) | PMD_TYPE_SECT);
383 384
}

385 386
#ifdef CONFIG_NUMA_BALANCING
/*
387
 * See the comment in include/linux/pgtable.h
388 389 390 391 392 393 394 395 396 397 398 399
 */
static inline int pte_protnone(pte_t pte)
{
	return (pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)) == PTE_PROT_NONE;
}

static inline int pmd_protnone(pmd_t pmd)
{
	return pte_protnone(pmd_pte(pmd));
}
#endif

400 401 402 403 404 405 406
#define pmd_present_invalid(pmd)     (!!(pmd_val(pmd) & PMD_PRESENT_INVALID))

static inline int pmd_present(pmd_t pmd)
{
	return pte_present(pmd_pte(pmd)) || pmd_present_invalid(pmd);
}

S
Steve Capper 已提交
407 408 409 410 411
/*
 * THP definitions.
 */

#ifdef CONFIG_TRANSPARENT_HUGEPAGE
412 413 414 415
static inline int pmd_trans_huge(pmd_t pmd)
{
	return pmd_val(pmd) && pmd_present(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT);
}
S
Steve Capper 已提交
416
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
S
Steve Capper 已提交
417

418
#define pmd_dirty(pmd)		pte_dirty(pmd_pte(pmd))
419
#define pmd_young(pmd)		pte_young(pmd_pte(pmd))
420
#define pmd_valid(pmd)		pte_valid(pmd_pte(pmd))
421
#define pmd_cont(pmd)		pte_cont(pmd_pte(pmd))
422 423 424
#define pmd_wrprotect(pmd)	pte_pmd(pte_wrprotect(pmd_pte(pmd)))
#define pmd_mkold(pmd)		pte_pmd(pte_mkold(pmd_pte(pmd)))
#define pmd_mkwrite(pmd)	pte_pmd(pte_mkwrite(pmd_pte(pmd)))
425
#define pmd_mkclean(pmd)	pte_pmd(pte_mkclean(pmd_pte(pmd)))
426 427
#define pmd_mkdirty(pmd)	pte_pmd(pte_mkdirty(pmd_pte(pmd)))
#define pmd_mkyoung(pmd)	pte_pmd(pte_mkyoung(pmd_pte(pmd)))
428 429 430 431 432 433 434 435

static inline pmd_t pmd_mkinvalid(pmd_t pmd)
{
	pmd = set_pmd_bit(pmd, __pgprot(PMD_PRESENT_INVALID));
	pmd = clear_pmd_bit(pmd, __pgprot(PMD_SECT_VALID));

	return pmd;
}
S
Steve Capper 已提交
436

437 438
#define pmd_thp_or_huge(pmd)	(pmd_huge(pmd) || pmd_trans_huge(pmd))

439
#define pmd_write(pmd)		pte_write(pmd_pte(pmd))
S
Steve Capper 已提交
440 441 442

#define pmd_mkhuge(pmd)		(__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT))

443 444 445
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define pmd_devmap(pmd)		pte_devmap(pmd_pte(pmd))
#endif
446 447 448 449
static inline pmd_t pmd_mkdevmap(pmd_t pmd)
{
	return pte_pmd(set_pte_bit(pmd_pte(pmd), __pgprot(PTE_DEVMAP)));
}
450

451 452 453 454
#define __pmd_to_phys(pmd)	__pte_to_phys(pmd_pte(pmd))
#define __phys_to_pmd_val(phys)	__phys_to_pte_val(phys)
#define pmd_pfn(pmd)		((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT)
#define pfn_pmd(pfn,prot)	__pmd(__phys_to_pmd_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
S
Steve Capper 已提交
455 456
#define mk_pmd(page,prot)	pfn_pmd(page_to_pfn(page),prot)

457
#define pud_young(pud)		pte_young(pud_pte(pud))
458
#define pud_mkyoung(pud)	pte_pud(pte_mkyoung(pud_pte(pud)))
S
Steve Capper 已提交
459
#define pud_write(pud)		pte_write(pud_pte(pud))
460

461 462
#define pud_mkhuge(pud)		(__pud(pud_val(pud) & ~PUD_TABLE_BIT))

463 464 465 466
#define __pud_to_phys(pud)	__pte_to_phys(pud_pte(pud))
#define __phys_to_pud_val(phys)	__phys_to_pte_val(phys)
#define pud_pfn(pud)		((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT)
#define pfn_pud(pfn,prot)	__pud(__phys_to_pud_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
S
Steve Capper 已提交
467

468
#define set_pmd_at(mm, addr, pmdp, pmd)	set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd))
469
#define set_pud_at(mm, addr, pudp, pud)	set_pte_at(mm, addr, (pte_t *)pudp, pud_pte(pud))
S
Steve Capper 已提交
470

471 472 473
#define __p4d_to_phys(p4d)	__pte_to_phys(p4d_pte(p4d))
#define __phys_to_p4d_val(phys)	__phys_to_pte_val(phys)

474 475 476
#define __pgd_to_phys(pgd)	__pte_to_phys(pgd_pte(pgd))
#define __phys_to_pgd_val(phys)	__phys_to_pte_val(phys)

477 478 479
#define __pgprot_modify(prot,mask,bits) \
	__pgprot((pgprot_val(prot) & ~(mask)) | (bits))

480
#define pgprot_nx(prot) \
481
	__pgprot_modify(prot, PTE_MAYBE_GP, PTE_PXN)
482

C
Catalin Marinas 已提交
483 484 485 486
/*
 * Mark the prot value as uncacheable and unbufferable.
 */
#define pgprot_noncached(prot) \
487
	__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN)
C
Catalin Marinas 已提交
488
#define pgprot_writecombine(prot) \
489
	__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
490 491
#define pgprot_device(prot) \
	__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN)
492 493 494
#define pgprot_tagged(prot) \
	__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_TAGGED))
#define pgprot_mhp	pgprot_tagged
495 496 497 498 499 500 501 502
/*
 * DMA allocations for non-coherent devices use what the Arm architecture calls
 * "Normal non-cacheable" memory, which permits speculation, unaligned accesses
 * and merging of writes.  This is different from "Device-nGnR[nE]" memory which
 * is intended for MMIO and thus forbids speculation, preserves access size,
 * requires strict alignment and can also force write responses to come from the
 * endpoint.
 */
503 504 505 506
#define pgprot_dmacoherent(prot) \
	__pgprot_modify(prot, PTE_ATTRINDX_MASK, \
			PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)

C
Catalin Marinas 已提交
507 508 509 510 511 512 513
#define __HAVE_PHYS_MEM_ACCESS_PROT
struct file;
extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
				     unsigned long size, pgprot_t vma_prot);

#define pmd_none(pmd)		(!pmd_val(pmd))

514 515 516 517
#define pmd_table(pmd)		((pmd_val(pmd) & PMD_TYPE_MASK) == \
				 PMD_TYPE_TABLE)
#define pmd_sect(pmd)		((pmd_val(pmd) & PMD_TYPE_MASK) == \
				 PMD_TYPE_SECT)
518
#define pmd_leaf(pmd)		pmd_sect(pmd)
519
#define pmd_bad(pmd)		(!pmd_table(pmd))
520

521 522 523
#define pmd_leaf_size(pmd)	(pmd_cont(pmd) ? CONT_PMD_SIZE : PMD_SIZE)
#define pte_leaf_size(pte)	(pte_cont(pte) ? CONT_PTE_SIZE : PAGE_SIZE)

524
#if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS < 3
525 526
static inline bool pud_sect(pud_t pud) { return false; }
static inline bool pud_table(pud_t pud) { return true; }
527 528 529
#else
#define pud_sect(pud)		((pud_val(pud) & PUD_TYPE_MASK) == \
				 PUD_TYPE_SECT)
530 531
#define pud_table(pud)		((pud_val(pud) & PUD_TYPE_MASK) == \
				 PUD_TYPE_TABLE)
532
#endif
533

534 535 536 537
extern pgd_t init_pg_dir[PTRS_PER_PGD];
extern pgd_t init_pg_end[];
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
538
extern pgd_t idmap_pg_end[];
539
extern pgd_t tramp_pg_dir[PTRS_PER_PGD];
540
extern pgd_t reserved_pg_dir[PTRS_PER_PGD];
541 542 543 544 545 546 547 548 549

extern void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd);

static inline bool in_swapper_pgdir(void *addr)
{
	return ((unsigned long)addr & PAGE_MASK) ==
	        ((unsigned long)swapper_pg_dir & PAGE_MASK);
}

C
Catalin Marinas 已提交
550 551
static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
{
552 553
#ifdef __PAGETABLE_PMD_FOLDED
	if (in_swapper_pgdir(pmdp)) {
554 555 556
		set_swapper_pgd((pgd_t *)pmdp, __pgd(pmd_val(pmd)));
		return;
	}
557
#endif /* __PAGETABLE_PMD_FOLDED */
558

559
	WRITE_ONCE(*pmdp, pmd);
560

561
	if (pmd_valid(pmd)) {
562
		dsb(ishst);
563 564
		isb();
	}
C
Catalin Marinas 已提交
565 566 567 568 569 570 571
}

static inline void pmd_clear(pmd_t *pmdp)
{
	set_pmd(pmdp, __pmd(0));
}

572
static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
C
Catalin Marinas 已提交
573
{
574
	return __pmd_to_phys(pmd);
C
Catalin Marinas 已提交
575 576
}

577 578 579 580
static inline unsigned long pmd_page_vaddr(pmd_t pmd)
{
	return (unsigned long)__va(pmd_page_paddr(pmd));
}
581

M
Mark Rutland 已提交
582
/* Find an entry in the third-level page table. */
583
#define pte_offset_phys(dir,addr)	(pmd_page_paddr(READ_ONCE(*(dir))) + pte_index(addr) * sizeof(pte_t))
M
Mark Rutland 已提交
584

585 586 587 588
#define pte_set_fixmap(addr)		((pte_t *)set_fixmap_offset(FIX_PTE, addr))
#define pte_set_fixmap_offset(pmd, addr)	pte_set_fixmap(pte_offset_phys(pmd, addr))
#define pte_clear_fixmap()		clear_fixmap(FIX_PTE)

589
#define pmd_page(pmd)			phys_to_page(__pmd_to_phys(pmd))
C
Catalin Marinas 已提交
590

591 592 593
/* use ONLY for statically allocated translation tables */
#define pte_offset_kimg(dir,addr)	((pte_t *)__phys_to_kimg(pte_offset_phys((dir), (addr))))

C
Catalin Marinas 已提交
594 595 596 597 598 599
/*
 * Conversion functions: convert a page and protection to a page entry,
 * and a page entry and page directory to the page they refer to.
 */
#define mk_pte(page,prot)	pfn_pte(page_to_pfn(page),prot)

600
#if CONFIG_PGTABLE_LEVELS > 2
C
Catalin Marinas 已提交
601

602 603
#define pmd_ERROR(e)	\
	pr_err("%s:%d: bad pmd %016llx.\n", __FILE__, __LINE__, pmd_val(e))
604

C
Catalin Marinas 已提交
605
#define pud_none(pud)		(!pud_val(pud))
606
#define pud_bad(pud)		(!pud_table(pud))
607
#define pud_present(pud)	pte_present(pud_pte(pud))
608
#define pud_leaf(pud)		pud_sect(pud)
609
#define pud_valid(pud)		pte_valid(pud_pte(pud))
C
Catalin Marinas 已提交
610 611 612

static inline void set_pud(pud_t *pudp, pud_t pud)
{
613 614
#ifdef __PAGETABLE_PUD_FOLDED
	if (in_swapper_pgdir(pudp)) {
615 616 617
		set_swapper_pgd((pgd_t *)pudp, __pgd(pud_val(pud)));
		return;
	}
618
#endif /* __PAGETABLE_PUD_FOLDED */
619

620
	WRITE_ONCE(*pudp, pud);
621

622
	if (pud_valid(pud)) {
623
		dsb(ishst);
624 625
		isb();
	}
C
Catalin Marinas 已提交
626 627 628 629 630 631 632
}

static inline void pud_clear(pud_t *pudp)
{
	set_pud(pudp, __pud(0));
}

633
static inline phys_addr_t pud_page_paddr(pud_t pud)
C
Catalin Marinas 已提交
634
{
635
	return __pud_to_phys(pud);
C
Catalin Marinas 已提交
636 637
}

638 639 640 641
static inline unsigned long pud_page_vaddr(pud_t pud)
{
	return (unsigned long)__va(pud_page_paddr(pud));
}
642

643
/* Find an entry in the second-level page table. */
644
#define pmd_offset_phys(dir, addr)	(pud_page_paddr(READ_ONCE(*(dir))) + pmd_index(addr) * sizeof(pmd_t))
645

646 647 648
#define pmd_set_fixmap(addr)		((pmd_t *)set_fixmap_offset(FIX_PMD, addr))
#define pmd_set_fixmap_offset(pud, addr)	pmd_set_fixmap(pmd_offset_phys(pud, addr))
#define pmd_clear_fixmap()		clear_fixmap(FIX_PMD)
649

650
#define pud_page(pud)			phys_to_page(__pud_to_phys(pud))
S
Steve Capper 已提交
651

652 653 654
/* use ONLY for statically allocated translation tables */
#define pmd_offset_kimg(dir,addr)	((pmd_t *)__phys_to_kimg(pmd_offset_phys((dir), (addr))))

655 656 657 658
#else

#define pud_page_paddr(pud)	({ BUILD_BUG(); 0; })

659 660 661 662 663
/* Match pmd_offset folding in <asm/generic/pgtable-nopmd.h> */
#define pmd_set_fixmap(addr)		NULL
#define pmd_set_fixmap_offset(pudp, addr)	((pmd_t *)pudp)
#define pmd_clear_fixmap()

664 665
#define pmd_offset_kimg(dir,addr)	((pmd_t *)dir)

666
#endif	/* CONFIG_PGTABLE_LEVELS > 2 */
C
Catalin Marinas 已提交
667

668
#if CONFIG_PGTABLE_LEVELS > 3
669

670 671
#define pud_ERROR(e)	\
	pr_err("%s:%d: bad pud %016llx.\n", __FILE__, __LINE__, pud_val(e))
672

673 674 675
#define p4d_none(p4d)		(!p4d_val(p4d))
#define p4d_bad(p4d)		(!(p4d_val(p4d) & 2))
#define p4d_present(p4d)	(p4d_val(p4d))
676

677
static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
678
{
679 680
	if (in_swapper_pgdir(p4dp)) {
		set_swapper_pgd((pgd_t *)p4dp, __pgd(p4d_val(p4d)));
681 682 683
		return;
	}

684
	WRITE_ONCE(*p4dp, p4d);
685
	dsb(ishst);
686
	isb();
687 688
}

689
static inline void p4d_clear(p4d_t *p4dp)
690
{
691
	set_p4d(p4dp, __p4d(0));
692 693
}

694
static inline phys_addr_t p4d_page_paddr(p4d_t p4d)
695
{
696
	return __p4d_to_phys(p4d);
697 698
}

699 700 701 702
static inline unsigned long p4d_page_vaddr(p4d_t p4d)
{
	return (unsigned long)__va(p4d_page_paddr(p4d));
}
703

704
/* Find an entry in the frst-level page table. */
705
#define pud_offset_phys(dir, addr)	(p4d_page_paddr(READ_ONCE(*(dir))) + pud_index(addr) * sizeof(pud_t))
706

707
#define pud_set_fixmap(addr)		((pud_t *)set_fixmap_offset(FIX_PUD, addr))
708
#define pud_set_fixmap_offset(p4d, addr)	pud_set_fixmap(pud_offset_phys(p4d, addr))
709
#define pud_clear_fixmap()		clear_fixmap(FIX_PUD)
710

711
#define p4d_page(p4d)		pfn_to_page(__phys_to_pfn(__p4d_to_phys(p4d)))
712

713 714 715
/* use ONLY for statically allocated translation tables */
#define pud_offset_kimg(dir,addr)	((pud_t *)__phys_to_kimg(pud_offset_phys((dir), (addr))))

716 717
#else

718
#define p4d_page_paddr(p4d)	({ BUILD_BUG(); 0;})
719 720
#define pgd_page_paddr(pgd)	({ BUILD_BUG(); 0;})

721 722 723 724 725
/* Match pud_offset folding in <asm/generic/pgtable-nopud.h> */
#define pud_set_fixmap(addr)		NULL
#define pud_set_fixmap_offset(pgdp, addr)	((pud_t *)pgdp)
#define pud_clear_fixmap()

726 727
#define pud_offset_kimg(dir,addr)	((pud_t *)dir)

728
#endif  /* CONFIG_PGTABLE_LEVELS > 3 */
729

730 731
#define pgd_ERROR(e)	\
	pr_err("%s:%d: bad pgd %016llx.\n", __FILE__, __LINE__, pgd_val(e))
732

733 734 735
#define pgd_set_fixmap(addr)	((pgd_t *)set_fixmap_offset(FIX_PGD, addr))
#define pgd_clear_fixmap()	clear_fixmap(FIX_PGD)

C
Catalin Marinas 已提交
736 737
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
738 739 740 741
	/*
	 * Normal and Normal-Tagged are two different memory types and indices
	 * in MAIR_EL1. The mask below has to include PTE_ATTRINDX_MASK.
	 */
742
	const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
743 744
			      PTE_PROT_NONE | PTE_VALID | PTE_WRITE | PTE_GP |
			      PTE_ATTRINDX_MASK;
745 746
	/* preserve the hardware dirty information */
	if (pte_hw_dirty(pte))
747
		pte = pte_mkdirty(pte);
C
Catalin Marinas 已提交
748 749 750 751
	pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
	return pte;
}

752 753 754 755 756
static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
{
	return pte_pmd(pte_modify(pmd_pte(pmd), newprot));
}

757 758 759 760 761
#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
extern int ptep_set_access_flags(struct vm_area_struct *vma,
				 unsigned long address, pte_t *ptep,
				 pte_t entry, int dirty);

762 763 764 765 766 767 768 769
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
					unsigned long address, pmd_t *pmdp,
					pmd_t entry, int dirty)
{
	return ptep_set_access_flags(vma, address, (pte_t *)pmdp, pmd_pte(entry), dirty);
}
770 771 772 773 774 775 776 777 778 779

static inline int pud_devmap(pud_t pud)
{
	return 0;
}

static inline int pgd_devmap(pgd_t pgd)
{
	return 0;
}
780 781
#endif

782 783 784 785
/*
 * Atomic pte/pmd modifications.
 */
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
786
static inline int __ptep_test_and_clear_young(pte_t *ptep)
787
{
788
	pte_t old_pte, pte;
789

790 791 792 793 794 795 796
	pte = READ_ONCE(*ptep);
	do {
		old_pte = pte;
		pte = pte_mkold(pte);
		pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
					       pte_val(old_pte), pte_val(pte));
	} while (pte_val(pte) != pte_val(old_pte));
797

798
	return pte_young(pte);
799 800
}

801 802 803 804 805 806 807
static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
					    unsigned long address,
					    pte_t *ptep)
{
	return __ptep_test_and_clear_young(ptep);
}

808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828
#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
					 unsigned long address, pte_t *ptep)
{
	int young = ptep_test_and_clear_young(vma, address, ptep);

	if (young) {
		/*
		 * We can elide the trailing DSB here since the worst that can
		 * happen is that a CPU continues to use the young entry in its
		 * TLB and we mistakenly reclaim the associated page. The
		 * window for such an event is bounded by the next
		 * context-switch, which provides a DSB to complete the TLB
		 * invalidation.
		 */
		flush_tlb_page_nosync(vma, address);
	}

	return young;
}

829 830 831 832 833 834 835 836 837 838 839 840 841 842
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
					    unsigned long address,
					    pmd_t *pmdp)
{
	return ptep_test_and_clear_young(vma, address, (pte_t *)pmdp);
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */

#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
				       unsigned long address, pte_t *ptep)
{
843
	return __pte(xchg_relaxed(&pte_val(*ptep), 0));
844 845 846
}

#ifdef CONFIG_TRANSPARENT_HUGEPAGE
847 848 849
#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
					    unsigned long address, pmd_t *pmdp)
850 851 852 853 854 855
{
	return pte_pmd(ptep_get_and_clear(mm, address, (pte_t *)pmdp));
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */

/*
856 857
 * ptep_set_wrprotect - mark read-only while trasferring potential hardware
 * dirty status (PTE_DBM && !PTE_RDONLY) to the software PTE_DIRTY bit.
858 859 860 861
 */
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
{
862 863 864 865 866 867 868 869 870
	pte_t old_pte, pte;

	pte = READ_ONCE(*ptep);
	do {
		old_pte = pte;
		pte = pte_wrprotect(pte);
		pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
					       pte_val(old_pte), pte_val(pte));
	} while (pte_val(pte) != pte_val(old_pte));
871 872 873 874 875 876 877 878 879
}

#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define __HAVE_ARCH_PMDP_SET_WRPROTECT
static inline void pmdp_set_wrprotect(struct mm_struct *mm,
				      unsigned long address, pmd_t *pmdp)
{
	ptep_set_wrprotect(mm, address, (pte_t *)pmdp);
}
880 881 882 883 884 885 886

#define pmdp_establish pmdp_establish
static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
		unsigned long address, pmd_t *pmdp, pmd_t pmd)
{
	return __pmd(xchg_relaxed(&pmd_val(*pmdp), pmd_val(pmd)));
}
887 888
#endif

C
Catalin Marinas 已提交
889 890
/*
 * Encode and decode a swap entry:
891
 *	bits 0-1:	present (must be zero)
892 893
 *	bits 2-7:	swap type
 *	bits 8-57:	swap offset
894
 *	bit  58:	PTE_PROT_NONE (must be zero)
C
Catalin Marinas 已提交
895
 */
896
#define __SWP_TYPE_SHIFT	2
C
Catalin Marinas 已提交
897
#define __SWP_TYPE_BITS		6
898
#define __SWP_OFFSET_BITS	50
C
Catalin Marinas 已提交
899 900
#define __SWP_TYPE_MASK		((1 << __SWP_TYPE_BITS) - 1)
#define __SWP_OFFSET_SHIFT	(__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
901
#define __SWP_OFFSET_MASK	((1UL << __SWP_OFFSET_BITS) - 1)
C
Catalin Marinas 已提交
902 903

#define __swp_type(x)		(((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
904
#define __swp_offset(x)		(((x).val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK)
C
Catalin Marinas 已提交
905 906 907 908 909
#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })

#define __pte_to_swp_entry(pte)	((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(swp)	((pte_t) { (swp).val })

910 911 912 913 914
#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
#define __pmd_to_swp_entry(pmd)		((swp_entry_t) { pmd_val(pmd) })
#define __swp_entry_to_pmd(swp)		__pmd((swp).val)
#endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */

C
Catalin Marinas 已提交
915 916
/*
 * Ensure that there are not more swap files than can be encoded in the kernel
917
 * PTEs.
C
Catalin Marinas 已提交
918 919 920 921 922
 */
#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)

extern int kern_addr_valid(unsigned long addr);

923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954
#ifdef CONFIG_ARM64_MTE

#define __HAVE_ARCH_PREPARE_TO_SWAP
static inline int arch_prepare_to_swap(struct page *page)
{
	if (system_supports_mte())
		return mte_save_tags(page);
	return 0;
}

#define __HAVE_ARCH_SWAP_INVALIDATE
static inline void arch_swap_invalidate_page(int type, pgoff_t offset)
{
	if (system_supports_mte())
		mte_invalidate_tags(type, offset);
}

static inline void arch_swap_invalidate_area(int type)
{
	if (system_supports_mte())
		mte_invalidate_tags_area(type);
}

#define __HAVE_ARCH_SWAP_RESTORE
static inline void arch_swap_restore(swp_entry_t entry, struct page *page)
{
	if (system_supports_mte() && mte_restore_tags(entry, page))
		set_bit(PG_mte_tagged, &page->flags);
}

#endif /* CONFIG_ARM64_MTE */

955 956 957 958 959 960 961
/*
 * On AArch64, the cache coherency is handled via the set_pte_at() function.
 */
static inline void update_mmu_cache(struct vm_area_struct *vma,
				    unsigned long addr, pte_t *ptep)
{
	/*
962 963 964
	 * We don't do anything here, so there's a very small chance of
	 * us retaking a user fault which we just fixed up. The alternative
	 * is doing a dsb(ishst), but that penalises the fastpath.
965 966 967 968 969
	 */
}

#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)

970 971 972 973 974 975
#ifdef CONFIG_ARM64_PA_BITS_52
#define phys_to_ttbr(addr)	(((addr) | ((addr) >> 46)) & TTBR_BADDR_MASK_52)
#else
#define phys_to_ttbr(addr)	(addr)
#endif

976 977 978 979 980 981 982 983 984 985 986 987
/*
 * On arm64 without hardware Access Flag, copying from user will fail because
 * the pte is old and cannot be marked young. So we always end up with zeroed
 * page after fork() + CoW for pfn mappings. We don't always have a
 * hardware-managed access flag on arm64.
 */
static inline bool arch_faults_on_old_pte(void)
{
	WARN_ON(preemptible());

	return !cpu_has_hw_af();
}
988 989 990 991 992 993 994 995 996 997 998
#define arch_faults_on_old_pte		arch_faults_on_old_pte

/*
 * Experimentally, it's cheap to set the access flag in hardware and we
 * benefit from prefaulting mappings as 'old' to start with.
 */
static inline bool arch_wants_old_prefaulted_pte(void)
{
	return !arch_faults_on_old_pte();
}
#define arch_wants_old_prefaulted_pte	arch_wants_old_prefaulted_pte
999

1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011
static inline pgprot_t arch_filter_pgprot(pgprot_t prot)
{
	if (cpus_have_const_cap(ARM64_HAS_EPAN))
		return prot;

	if (pgprot_val(prot) != pgprot_val(PAGE_EXECONLY))
		return prot;

	return PAGE_READONLY_EXEC;
}


C
Catalin Marinas 已提交
1012 1013 1014
#endif /* !__ASSEMBLY__ */

#endif /* __ASM_PGTABLE_H */