pgtable.h 20.7 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0-only */
P
Palmer Dabbelt 已提交
2 3 4 5 6 7 8 9
/*
 * Copyright (C) 2012 Regents of the University of California
 */

#ifndef _ASM_RISCV_PGTABLE_H
#define _ASM_RISCV_PGTABLE_H

#include <linux/mmzone.h>
10
#include <linux/sizes.h>
P
Palmer Dabbelt 已提交
11 12 13

#include <asm/pgtable-bits.h>

14 15
#ifndef CONFIG_MMU
#define KERNEL_LINK_ADDR	PAGE_OFFSET
16
#define KERN_VIRT_SIZE		(UL(-1))
17
#else
P
Palmer Dabbelt 已提交
18

19
#define ADDRESS_SPACE_END	(UL(-1))
P
Palmer Dabbelt 已提交
20

21 22 23 24 25 26
#ifdef CONFIG_64BIT
/* Leave 2GB for kernel and BPF at the end of the address space */
#define KERNEL_LINK_ADDR	(ADDRESS_SPACE_END - SZ_2G + 1)
#else
#define KERNEL_LINK_ADDR	PAGE_OFFSET
#endif
27

28 29 30 31 32 33 34 35 36 37 38
/* Number of entries in the page global directory */
#define PTRS_PER_PGD    (PAGE_SIZE / sizeof(pgd_t))
/* Number of entries in the page table */
#define PTRS_PER_PTE    (PAGE_SIZE / sizeof(pte_t))

/*
 * Half of the kernel address space (half of the entries of the page global
 * directory) is for the direct mapping.
 */
#define KERN_VIRT_SIZE          ((PTRS_PER_PGD / 2 * PGDIR_SIZE) / 2)

39
#define VMALLOC_SIZE     (KERN_VIRT_SIZE >> 1)
40
#define VMALLOC_END      PAGE_OFFSET
41 42 43
#define VMALLOC_START    (PAGE_OFFSET - VMALLOC_SIZE)

#define BPF_JIT_REGION_SIZE	(SZ_128M)
44
#ifdef CONFIG_64BIT
45 46
#define BPF_JIT_REGION_START	(BPF_JIT_REGION_END - BPF_JIT_REGION_SIZE)
#define BPF_JIT_REGION_END	(MODULES_END)
47
#else
48 49
#define BPF_JIT_REGION_START	(PAGE_OFFSET - BPF_JIT_REGION_SIZE)
#define BPF_JIT_REGION_END	(VMALLOC_END)
50 51 52 53
#endif

/* Modules always live before the kernel */
#ifdef CONFIG_64BIT
54 55 56 57
/* This is used to define the end of the KASAN shadow region */
#define MODULES_LOWEST_VADDR	(KERNEL_LINK_ADDR - SZ_2G)
#define MODULES_VADDR		(PFN_ALIGN((unsigned long)&_end) - SZ_2G)
#define MODULES_END		(PFN_ALIGN((unsigned long)&_start))
58
#endif
59 60 61 62 63 64

/*
 * Roughly size the vmemmap space to be large enough to fit enough
 * struct pages to map half the virtual address space. Then
 * position vmemmap directly below the VMALLOC region.
 */
65
#ifdef CONFIG_64BIT
66 67
#define VA_BITS		(pgtable_l5_enabled ? \
				57 : (pgtable_l4_enabled ? 48 : 39))
68 69 70 71
#else
#define VA_BITS		32
#endif

72
#define VMEMMAP_SHIFT \
73
	(VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT)
74
#define VMEMMAP_SIZE	BIT(VMEMMAP_SHIFT)
75
#define VMEMMAP_END	VMALLOC_START
76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94
#define VMEMMAP_START	(VMALLOC_START - VMEMMAP_SIZE)

/*
 * Define vmemmap for pfn_to_page & page_to_pfn calls. Needed if kernel
 * is configured with CONFIG_SPARSEMEM_VMEMMAP enabled.
 */
#define vmemmap		((struct page *)VMEMMAP_START)

#define PCI_IO_SIZE      SZ_16M
#define PCI_IO_END       VMEMMAP_START
#define PCI_IO_START     (PCI_IO_END - PCI_IO_SIZE)

#define FIXADDR_TOP      PCI_IO_START
#ifdef CONFIG_64BIT
#define FIXADDR_SIZE     PMD_SIZE
#else
#define FIXADDR_SIZE     PGDIR_SIZE
#endif
#define FIXADDR_START    (FIXADDR_TOP - FIXADDR_SIZE)
V
Vitaly Wool 已提交
95

P
Palmer Dabbelt 已提交
96 97
#endif

V
Vitaly Wool 已提交
98
#ifdef CONFIG_XIP_KERNEL
99 100
#define XIP_OFFSET		SZ_32M
#define XIP_OFFSET_MASK		(SZ_32M - 1)
101 102
#else
#define XIP_OFFSET		0
103 104
#endif

105 106 107 108 109 110
#ifndef __ASSEMBLY__

#include <asm/page.h>
#include <asm/tlbflush.h>
#include <linux/mm_types.h>

111 112
#define __page_val_to_pfn(_val)  (((_val) & _PAGE_PFN_MASK) >> _PAGE_PFN_SHIFT)

P
Palmer Dabbelt 已提交
113 114 115 116 117 118
#ifdef CONFIG_64BIT
#include <asm/pgtable-64.h>
#else
#include <asm/pgtable-32.h>
#endif /* CONFIG_64BIT */

119 120
#include <linux/page_table_check.h>

P
Palmer Dabbelt 已提交
121 122 123
#ifdef CONFIG_XIP_KERNEL
#define XIP_FIXUP(addr) ({							\
	uintptr_t __a = (uintptr_t)(addr);					\
124 125
	(__a >= CONFIG_XIP_PHYS_ADDR && \
	 __a < CONFIG_XIP_PHYS_ADDR + XIP_OFFSET * 2) ?	\
P
Palmer Dabbelt 已提交
126 127 128 129 130 131 132
		__a - CONFIG_XIP_PHYS_ADDR + CONFIG_PHYS_RAM_BASE - XIP_OFFSET :\
		__a;								\
	})
#else
#define XIP_FIXUP(addr)		(addr)
#endif /* CONFIG_XIP_KERNEL */

A
Alexandre Ghiti 已提交
133 134 135 136 137 138 139 140
struct pt_alloc_ops {
	pte_t *(*get_pte_virt)(phys_addr_t pa);
	phys_addr_t (*alloc_pte)(uintptr_t va);
#ifndef __PAGETABLE_PMD_FOLDED
	pmd_t *(*get_pmd_virt)(phys_addr_t pa);
	phys_addr_t (*alloc_pmd)(uintptr_t va);
	pud_t *(*get_pud_virt)(phys_addr_t pa);
	phys_addr_t (*alloc_pud)(uintptr_t va);
141 142
	p4d_t *(*get_p4d_virt)(phys_addr_t pa);
	phys_addr_t (*alloc_p4d)(uintptr_t va);
A
Alexandre Ghiti 已提交
143 144 145
#endif
};

146
extern struct pt_alloc_ops pt_ops __initdata;
P
Palmer Dabbelt 已提交
147

C
Christoph Hellwig 已提交
148
#ifdef CONFIG_MMU
P
Palmer Dabbelt 已提交
149 150 151 152 153 154
/* Number of PGD entries that a user-mode program can use */
#define USER_PTRS_PER_PGD   (TASK_SIZE / PGDIR_SIZE)

/* Page protection bits */
#define _PAGE_BASE	(_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER)

155
#define PAGE_NONE		__pgprot(_PAGE_PROT_NONE | _PAGE_READ)
P
Palmer Dabbelt 已提交
156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172
#define PAGE_READ		__pgprot(_PAGE_BASE | _PAGE_READ)
#define PAGE_WRITE		__pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_WRITE)
#define PAGE_EXEC		__pgprot(_PAGE_BASE | _PAGE_EXEC)
#define PAGE_READ_EXEC		__pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC)
#define PAGE_WRITE_EXEC		__pgprot(_PAGE_BASE | _PAGE_READ |	\
					 _PAGE_EXEC | _PAGE_WRITE)

#define PAGE_COPY		PAGE_READ
#define PAGE_COPY_EXEC		PAGE_EXEC
#define PAGE_COPY_READ_EXEC	PAGE_READ_EXEC
#define PAGE_SHARED		PAGE_WRITE
#define PAGE_SHARED_EXEC	PAGE_WRITE_EXEC

#define _PAGE_KERNEL		(_PAGE_READ \
				| _PAGE_WRITE \
				| _PAGE_PRESENT \
				| _PAGE_ACCESSED \
173 174
				| _PAGE_DIRTY \
				| _PAGE_GLOBAL)
P
Palmer Dabbelt 已提交
175 176

#define PAGE_KERNEL		__pgprot(_PAGE_KERNEL)
A
Atish Patra 已提交
177 178 179 180
#define PAGE_KERNEL_READ	__pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
#define PAGE_KERNEL_EXEC	__pgprot(_PAGE_KERNEL | _PAGE_EXEC)
#define PAGE_KERNEL_READ_EXEC	__pgprot((_PAGE_KERNEL & ~_PAGE_WRITE) \
					 | _PAGE_EXEC)
P
Palmer Dabbelt 已提交
181

182 183
#define PAGE_TABLE		__pgprot(_PAGE_TABLE)

184 185
#define _PAGE_IOREMAP	((_PAGE_KERNEL & ~_PAGE_MTMASK) | _PAGE_IO)
#define PAGE_KERNEL_IO		__pgprot(_PAGE_IOREMAP)
186

P
Palmer Dabbelt 已提交
187 188
extern pgd_t swapper_pg_dir[];

189 190 191 192 193 194 195 196 197 198 199 200
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline int pmd_present(pmd_t pmd)
{
	/*
	 * Checking for _PAGE_LEAF is needed too because:
	 * When splitting a THP, split_huge_page() will temporarily clear
	 * the present bit, in this situation, pmd_present() and
	 * pmd_trans_huge() still needs to return true.
	 */
	return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE | _PAGE_LEAF));
}
#else
P
Palmer Dabbelt 已提交
201 202
static inline int pmd_present(pmd_t pmd)
{
203
	return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
P
Palmer Dabbelt 已提交
204
}
205
#endif
P
Palmer Dabbelt 已提交
206 207 208 209 210 211 212 213

static inline int pmd_none(pmd_t pmd)
{
	return (pmd_val(pmd) == 0);
}

static inline int pmd_bad(pmd_t pmd)
{
214
	return !pmd_present(pmd) || (pmd_val(pmd) & _PAGE_LEAF);
P
Palmer Dabbelt 已提交
215 216
}

217 218 219
#define pmd_leaf	pmd_leaf
static inline int pmd_leaf(pmd_t pmd)
{
N
Nanyong Sun 已提交
220
	return pmd_present(pmd) && (pmd_val(pmd) & _PAGE_LEAF);
221 222
}

P
Palmer Dabbelt 已提交
223 224 225 226 227 228 229 230 231 232 233 234
static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
{
	*pmdp = pmd;
}

static inline void pmd_clear(pmd_t *pmdp)
{
	set_pmd(pmdp, __pmd(0));
}

static inline pgd_t pfn_pgd(unsigned long pfn, pgprot_t prot)
{
235 236 237 238 239
	unsigned long prot_val = pgprot_val(prot);

	ALT_THEAD_PMA(prot_val);

	return __pgd((pfn << _PAGE_PFN_SHIFT) | prot_val);
P
Palmer Dabbelt 已提交
240 241
}

242 243
static inline unsigned long _pgd_pfn(pgd_t pgd)
{
244
	return __page_val_to_pfn(pgd_val(pgd));
245 246
}

P
Palmer Dabbelt 已提交
247 248
static inline struct page *pmd_page(pmd_t pmd)
{
249
	return pfn_to_page(__page_val_to_pfn(pmd_val(pmd)));
P
Palmer Dabbelt 已提交
250 251 252 253
}

static inline unsigned long pmd_page_vaddr(pmd_t pmd)
{
254
	return (unsigned long)pfn_to_virt(__page_val_to_pfn(pmd_val(pmd)));
P
Palmer Dabbelt 已提交
255 256
}

257 258 259 260 261
static inline pte_t pmd_pte(pmd_t pmd)
{
	return __pte(pmd_val(pmd));
}

262 263 264 265 266
static inline pte_t pud_pte(pud_t pud)
{
	return __pte(pud_val(pud));
}

P
Palmer Dabbelt 已提交
267 268 269
/* Yields the page frame number (PFN) of a page table entry */
static inline unsigned long pte_pfn(pte_t pte)
{
270
	return __page_val_to_pfn(pte_val(pte));
P
Palmer Dabbelt 已提交
271 272 273 274 275 276 277
}

#define pte_page(x)     pfn_to_page(pte_pfn(x))

/* Constructs a page table entry */
static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
{
278 279 280 281 282
	unsigned long prot_val = pgprot_val(prot);

	ALT_THEAD_PMA(prot_val);

	return __pte((pfn << _PAGE_PFN_SHIFT) | prot_val);
P
Palmer Dabbelt 已提交
283 284
}

285
#define mk_pte(page, prot)       pfn_pte(page_to_pfn(page), prot)
P
Palmer Dabbelt 已提交
286 287 288

static inline int pte_present(pte_t pte)
{
289
	return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
P
Palmer Dabbelt 已提交
290 291 292 293 294 295 296 297 298 299 300 301
}

static inline int pte_none(pte_t pte)
{
	return (pte_val(pte) == 0);
}

static inline int pte_write(pte_t pte)
{
	return pte_val(pte) & _PAGE_WRITE;
}

302 303 304 305 306
static inline int pte_exec(pte_t pte)
{
	return pte_val(pte) & _PAGE_EXEC;
}

307 308 309 310 311
static inline int pte_user(pte_t pte)
{
	return pte_val(pte) & _PAGE_USER;
}

P
Palmer Dabbelt 已提交
312 313
static inline int pte_huge(pte_t pte)
{
N
Nanyong Sun 已提交
314
	return pte_present(pte) && (pte_val(pte) & _PAGE_LEAF);
P
Palmer Dabbelt 已提交
315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372
}

static inline int pte_dirty(pte_t pte)
{
	return pte_val(pte) & _PAGE_DIRTY;
}

static inline int pte_young(pte_t pte)
{
	return pte_val(pte) & _PAGE_ACCESSED;
}

static inline int pte_special(pte_t pte)
{
	return pte_val(pte) & _PAGE_SPECIAL;
}

/* static inline pte_t pte_rdprotect(pte_t pte) */

static inline pte_t pte_wrprotect(pte_t pte)
{
	return __pte(pte_val(pte) & ~(_PAGE_WRITE));
}

/* static inline pte_t pte_mkread(pte_t pte) */

static inline pte_t pte_mkwrite(pte_t pte)
{
	return __pte(pte_val(pte) | _PAGE_WRITE);
}

/* static inline pte_t pte_mkexec(pte_t pte) */

static inline pte_t pte_mkdirty(pte_t pte)
{
	return __pte(pte_val(pte) | _PAGE_DIRTY);
}

static inline pte_t pte_mkclean(pte_t pte)
{
	return __pte(pte_val(pte) & ~(_PAGE_DIRTY));
}

static inline pte_t pte_mkyoung(pte_t pte)
{
	return __pte(pte_val(pte) | _PAGE_ACCESSED);
}

static inline pte_t pte_mkold(pte_t pte)
{
	return __pte(pte_val(pte) & ~(_PAGE_ACCESSED));
}

static inline pte_t pte_mkspecial(pte_t pte)
{
	return __pte(pte_val(pte) | _PAGE_SPECIAL);
}

373 374 375 376 377
static inline pte_t pte_mkhuge(pte_t pte)
{
	return pte;
}

378 379 380 381 382 383 384 385 386 387 388 389 390 391 392
#ifdef CONFIG_NUMA_BALANCING
/*
 * See the comment in include/asm-generic/pgtable.h
 */
static inline int pte_protnone(pte_t pte)
{
	return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE)) == _PAGE_PROT_NONE;
}

static inline int pmd_protnone(pmd_t pmd)
{
	return pte_protnone(pmd_pte(pmd));
}
#endif

P
Palmer Dabbelt 已提交
393 394 395
/* Modify page protection bits */
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
396 397 398 399 400
	unsigned long newprot_val = pgprot_val(newprot);

	ALT_THEAD_PMA(newprot_val);

	return __pte((pte_val(pte) & _PAGE_CHG_MASK) | newprot_val);
P
Palmer Dabbelt 已提交
401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420
}

#define pgd_ERROR(e) \
	pr_err("%s:%d: bad pgd " PTE_FMT ".\n", __FILE__, __LINE__, pgd_val(e))


/* Commit new configuration to MMU hardware */
static inline void update_mmu_cache(struct vm_area_struct *vma,
	unsigned long address, pte_t *ptep)
{
	/*
	 * The kernel assumes that TLBs don't cache invalid entries, but
	 * in RISC-V, SFENCE.VMA specifies an ordering constraint, not a
	 * cache flush; it is necessary even after writing invalid entries.
	 * Relying on flush_tlb_fix_spurious_fault would suffice, but
	 * the extra traps reduce performance.  So, eagerly SFENCE.VMA.
	 */
	local_flush_tlb_page(address);
}

421 422 423 424 425 426 427 428
static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
		unsigned long address, pmd_t *pmdp)
{
	pte_t *ptep = (pte_t *)pmdp;

	update_mmu_cache(vma, address, ptep);
}

P
Palmer Dabbelt 已提交
429 430 431 432 433 434
#define __HAVE_ARCH_PTE_SAME
static inline int pte_same(pte_t pte_a, pte_t pte_b)
{
	return pte_val(pte_a) == pte_val(pte_b);
}

435 436 437 438 439 440 441 442 443 444 445 446
/*
 * Certain architectures need to do special things when PTEs within
 * a page table are directly modified.  Thus, the following hook is
 * made available.
 */
static inline void set_pte(pte_t *ptep, pte_t pteval)
{
	*ptep = pteval;
}

void flush_icache_pte(pte_t pte);

447
static inline void __set_pte_at(struct mm_struct *mm,
448 449 450 451 452 453 454 455
	unsigned long addr, pte_t *ptep, pte_t pteval)
{
	if (pte_present(pteval) && pte_exec(pteval))
		flush_icache_pte(pteval);

	set_pte(ptep, pteval);
}

456 457 458 459 460 461 462
static inline void set_pte_at(struct mm_struct *mm,
	unsigned long addr, pte_t *ptep, pte_t pteval)
{
	page_table_check_pte_set(mm, addr, ptep, pteval);
	__set_pte_at(mm, addr, ptep, pteval);
}

463 464 465
static inline void pte_clear(struct mm_struct *mm,
	unsigned long addr, pte_t *ptep)
{
466
	__set_pte_at(mm, addr, ptep, __pte(0));
467 468
}

P
Palmer Dabbelt 已提交
469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486
#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
static inline int ptep_set_access_flags(struct vm_area_struct *vma,
					unsigned long address, pte_t *ptep,
					pte_t entry, int dirty)
{
	if (!pte_same(*ptep, entry))
		set_pte_at(vma->vm_mm, address, ptep, entry);
	/*
	 * update_mmu_cache will unconditionally execute, handling both
	 * the case that the PTE changed and the spurious fault case.
	 */
	return true;
}

#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
				       unsigned long address, pte_t *ptep)
{
487 488 489 490 491
	pte_t pte = __pte(atomic_long_xchg((atomic_long_t *)ptep, 0));

	page_table_check_pte_clear(mm, address, pte);

	return pte;
P
Palmer Dabbelt 已提交
492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532
}

#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
					    unsigned long address,
					    pte_t *ptep)
{
	if (!pte_young(*ptep))
		return 0;
	return test_and_clear_bit(_PAGE_ACCESSED_OFFSET, &pte_val(*ptep));
}

#define __HAVE_ARCH_PTEP_SET_WRPROTECT
static inline void ptep_set_wrprotect(struct mm_struct *mm,
				      unsigned long address, pte_t *ptep)
{
	atomic_long_and(~(unsigned long)_PAGE_WRITE, (atomic_long_t *)ptep);
}

#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
					 unsigned long address, pte_t *ptep)
{
	/*
	 * This comment is borrowed from x86, but applies equally to RISC-V:
	 *
	 * Clearing the accessed bit without a TLB flush
	 * doesn't cause data corruption. [ It could cause incorrect
	 * page aging and the (mistaken) reclaim of hot pages, but the
	 * chance of that should be relatively low. ]
	 *
	 * So as a performance optimization don't flush the TLB when
	 * clearing the accessed bit, it will eventually be flushed by
	 * a context switch or a VM operation anyway. [ In the rare
	 * event of it not getting flushed for a long time the delay
	 * shouldn't really matter because there's no real memory
	 * pressure for swapout to react to. ]
	 */
	return ptep_test_and_clear_young(vma, address, ptep);
}

533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554
#define pgprot_noncached pgprot_noncached
static inline pgprot_t pgprot_noncached(pgprot_t _prot)
{
	unsigned long prot = pgprot_val(_prot);

	prot &= ~_PAGE_MTMASK;
	prot |= _PAGE_IO;

	return __pgprot(prot);
}

#define pgprot_writecombine pgprot_writecombine
static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
{
	unsigned long prot = pgprot_val(_prot);

	prot &= ~_PAGE_MTMASK;
	prot |= _PAGE_NOCACHE;

	return __pgprot(prot);
}

555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572
/*
 * THP functions
 */
static inline pmd_t pte_pmd(pte_t pte)
{
	return __pmd(pte_val(pte));
}

static inline pmd_t pmd_mkhuge(pmd_t pmd)
{
	return pmd;
}

static inline pmd_t pmd_mkinvalid(pmd_t pmd)
{
	return __pmd(pmd_val(pmd) & ~(_PAGE_PRESENT|_PAGE_PROT_NONE));
}

573
#define __pmd_to_phys(pmd)  (__page_val_to_pfn(pmd_val(pmd)) << PAGE_SHIFT)
574 575 576 577 578 579

static inline unsigned long pmd_pfn(pmd_t pmd)
{
	return ((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT);
}

580
#define __pud_to_phys(pud)  (__page_val_to_pfn(pud_val(pud)) << PAGE_SHIFT)
581 582 583 584 585 586

static inline unsigned long pud_pfn(pud_t pud)
{
	return ((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT);
}

587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602
static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
{
	return pte_pmd(pte_modify(pmd_pte(pmd), newprot));
}

#define pmd_write pmd_write
static inline int pmd_write(pmd_t pmd)
{
	return pte_write(pmd_pte(pmd));
}

static inline int pmd_dirty(pmd_t pmd)
{
	return pte_dirty(pmd_pte(pmd));
}

603
#define pmd_young pmd_young
604 605 606 607 608
static inline int pmd_young(pmd_t pmd)
{
	return pte_young(pmd_pte(pmd));
}

609 610 611 612 613
static inline int pmd_user(pmd_t pmd)
{
	return pte_user(pmd_pte(pmd));
}

614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646
static inline pmd_t pmd_mkold(pmd_t pmd)
{
	return pte_pmd(pte_mkold(pmd_pte(pmd)));
}

static inline pmd_t pmd_mkyoung(pmd_t pmd)
{
	return pte_pmd(pte_mkyoung(pmd_pte(pmd)));
}

static inline pmd_t pmd_mkwrite(pmd_t pmd)
{
	return pte_pmd(pte_mkwrite(pmd_pte(pmd)));
}

static inline pmd_t pmd_wrprotect(pmd_t pmd)
{
	return pte_pmd(pte_wrprotect(pmd_pte(pmd)));
}

static inline pmd_t pmd_mkclean(pmd_t pmd)
{
	return pte_pmd(pte_mkclean(pmd_pte(pmd)));
}

static inline pmd_t pmd_mkdirty(pmd_t pmd)
{
	return pte_pmd(pte_mkdirty(pmd_pte(pmd)));
}

static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
				pmd_t *pmdp, pmd_t pmd)
{
647 648 649 650
	page_table_check_pmd_set(mm, addr, pmdp, pmd);
	return __set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd));
}

651 652 653
static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
				pud_t *pudp, pud_t pud)
{
654 655 656 657 658 659 660 661 662 663 664 665 666
	page_table_check_pud_set(mm, addr, pudp, pud);
	return __set_pte_at(mm, addr, (pte_t *)pudp, pud_pte(pud));
}

#ifdef CONFIG_PAGE_TABLE_CHECK
static inline bool pte_user_accessible_page(pte_t pte)
{
	return pte_present(pte) && pte_user(pte);
}

static inline bool pmd_user_accessible_page(pmd_t pmd)
{
	return pmd_leaf(pmd) && pmd_user(pmd);
667 668
}

669 670 671 672 673 674
static inline bool pud_user_accessible_page(pud_t pud)
{
	return pud_leaf(pud) && pud_user(pud);
}
#endif

675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline int pmd_trans_huge(pmd_t pmd)
{
	return pmd_leaf(pmd);
}

#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
					unsigned long address, pmd_t *pmdp,
					pmd_t entry, int dirty)
{
	return ptep_set_access_flags(vma, address, (pte_t *)pmdp, pmd_pte(entry), dirty);
}

#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
					unsigned long address, pmd_t *pmdp)
{
	return ptep_test_and_clear_young(vma, address, (pte_t *)pmdp);
}

#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
					unsigned long address, pmd_t *pmdp)
{
700 701 702 703 704
	pmd_t pmd = __pmd(atomic_long_xchg((atomic_long_t *)pmdp, 0));

	page_table_check_pmd_clear(mm, address, pmd);

	return pmd;
705 706 707 708 709 710 711 712 713 714 715 716 717
}

#define __HAVE_ARCH_PMDP_SET_WRPROTECT
static inline void pmdp_set_wrprotect(struct mm_struct *mm,
					unsigned long address, pmd_t *pmdp)
{
	ptep_set_wrprotect(mm, address, (pte_t *)pmdp);
}

#define pmdp_establish pmdp_establish
static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
				unsigned long address, pmd_t *pmdp, pmd_t pmd)
{
718
	page_table_check_pmd_set(vma->vm_mm, address, pmdp, pmd);
719 720 721 722
	return __pmd(atomic_long_xchg((atomic_long_t *)pmdp, pmd_val(pmd)));
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */

P
Palmer Dabbelt 已提交
723 724 725 726 727
/*
 * Encode and decode a swap entry
 *
 * Format of swap PTE:
 *	bit            0:	_PAGE_PRESENT (zero)
728 729 730 731
 *	bit       1 to 3:       _PAGE_LEAF (zero)
 *	bit            5:	_PAGE_PROT_NONE (zero)
 *	bits      6 to 10:	swap type
 *	bits 10 to XLEN-1:	swap offset
P
Palmer Dabbelt 已提交
732
 */
733
#define __SWP_TYPE_SHIFT	6
P
Palmer Dabbelt 已提交
734 735 736 737 738 739 740 741 742 743 744 745 746 747 748
#define __SWP_TYPE_BITS		5
#define __SWP_TYPE_MASK		((1UL << __SWP_TYPE_BITS) - 1)
#define __SWP_OFFSET_SHIFT	(__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)

#define MAX_SWAPFILES_CHECK()	\
	BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)

#define __swp_type(x)	(((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
#define __swp_offset(x)	((x).val >> __SWP_OFFSET_SHIFT)
#define __swp_entry(type, offset) ((swp_entry_t) \
	{ ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })

#define __pte_to_swp_entry(pte)	((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x)	((pte_t) { (x).val })

N
Nanyong Sun 已提交
749 750 751 752 753
#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
#define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) })
#define __swp_entry_to_pmd(swp) __pmd((swp).val)
#endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */

754 755 756 757 758
/*
 * In the RV64 Linux scheme, we give the user half of the virtual-address space
 * and give the kernel the other (upper) half.
 */
#ifdef CONFIG_64BIT
759
#define KERN_VIRT_START	(-(BIT(VA_BITS)) + TASK_SIZE)
760 761 762 763
#else
#define KERN_VIRT_START	FIXADDR_START
#endif

P
Palmer Dabbelt 已提交
764
/*
765
 * Task size is 0x4000000000 for RV64 or 0x9fc00000 for RV32.
P
Palmer Dabbelt 已提交
766
 * Note that PGDIR_SIZE must evenly divide TASK_SIZE.
767 768 769 770 771 772 773 774 775
 * Task size is:
 * -     0x9fc00000 (~2.5GB) for RV32.
 * -   0x4000000000 ( 256GB) for RV64 using SV39 mmu
 * - 0x800000000000 ( 128TB) for RV64 using SV48 mmu
 *
 * Note that PGDIR_SIZE must evenly divide TASK_SIZE since "RISC-V
 * Instruction Set Manual Volume II: Privileged Architecture" states that
 * "load and store effective addresses, which are 64bits, must have bits
 * 63–48 all equal to bit 47, or else a page-fault exception will occur."
P
Palmer Dabbelt 已提交
776 777
 */
#ifdef CONFIG_64BIT
778 779 780 781 782 783 784 785 786 787 788
#define TASK_SIZE_64	(PGDIR_SIZE * PTRS_PER_PGD / 2)
#define TASK_SIZE_MIN	(PGDIR_SIZE_L3 * PTRS_PER_PGD / 2)

#ifdef CONFIG_COMPAT
#define TASK_SIZE_32	(_AC(0x80000000, UL) - PAGE_SIZE)
#define TASK_SIZE	(test_thread_flag(TIF_32BIT) ? \
			 TASK_SIZE_32 : TASK_SIZE_64)
#else
#define TASK_SIZE	TASK_SIZE_64
#endif

P
Palmer Dabbelt 已提交
789
#else
A
Alexandre Ghiti 已提交
790 791
#define TASK_SIZE	FIXADDR_START
#define TASK_SIZE_MIN	TASK_SIZE
P
Palmer Dabbelt 已提交
792 793
#endif

C
Christoph Hellwig 已提交
794 795
#else /* CONFIG_MMU */

796
#define PAGE_SHARED		__pgprot(0)
C
Christoph Hellwig 已提交
797 798
#define PAGE_KERNEL		__pgprot(0)
#define swapper_pg_dir		NULL
799
#define TASK_SIZE		0xffffffffUL
C
Christoph Hellwig 已提交
800
#define VMALLOC_START		0
801
#define VMALLOC_END		TASK_SIZE
C
Christoph Hellwig 已提交
802 803 804 805 806

#endif /* !CONFIG_MMU */

#define kern_addr_valid(addr)   (1) /* FIXME */

807
extern char _start[];
V
Vitaly Wool 已提交
808 809 810 811 812 813 814 815 816
extern void *_dtb_early_va;
extern uintptr_t _dtb_early_pa;
#if defined(CONFIG_XIP_KERNEL) && defined(CONFIG_MMU)
#define dtb_early_va	(*(void **)XIP_FIXUP(&_dtb_early_va))
#define dtb_early_pa	(*(uintptr_t *)XIP_FIXUP(&_dtb_early_pa))
#else
#define dtb_early_va	_dtb_early_va
#define dtb_early_pa	_dtb_early_pa
#endif /* CONFIG_XIP_KERNEL */
A
Alexandre Ghiti 已提交
817 818
extern u64 satp_mode;
extern bool pgtable_l4_enabled;
V
Vitaly Wool 已提交
819

C
Christoph Hellwig 已提交
820
void paging_init(void);
821
void misc_mem_init(void);
C
Christoph Hellwig 已提交
822 823 824 825 826 827 828 829

/*
 * ZERO_PAGE is a global shared page that is always zero,
 * used for zero-mapped memory areas, etc.
 */
extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))

P
Palmer Dabbelt 已提交
830 831 832
#endif /* !__ASSEMBLY__ */

#endif /* _ASM_RISCV_PGTABLE_H */