pgtable.h 28.9 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
H
H. Peter Anvin 已提交
2 3
#ifndef _ASM_X86_PGTABLE_H
#define _ASM_X86_PGTABLE_H
4

5
#include <linux/mem_encrypt.h>
6
#include <asm/page.h>
7
#include <asm/pgtable_types.h>
8

9 10 11
/*
 * Macro to mark a page protection value as UC-
 */
12 13 14 15
#define pgprot_noncached(prot)						\
	((boot_cpu_data.x86 > 3)					\
	 ? (__pgprot(pgprot_val(prot) |					\
		     cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS)))	\
16 17
	 : (prot))

18 19 20 21 22 23
/*
 * Macros to add or remove encryption attribute
 */
#define pgprot_encrypted(prot)	__pgprot(__sme_set(pgprot_val(prot)))
#define pgprot_decrypted(prot)	__pgprot(__sme_clr(pgprot_val(prot)))

24
#ifndef __ASSEMBLY__
25 26
#include <asm/x86_init.h>

27 28 29
extern pgd_t early_top_pgt[PTRS_PER_PGD];
int __init __early_make_pgtable(unsigned long address, pmdval_t pmd);

30
void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd);
S
Stephen Smalley 已提交
31 32 33 34 35 36 37
void ptdump_walk_pgd_level_checkwx(void);

#ifdef CONFIG_DEBUG_WX
#define debug_checkwx() ptdump_walk_pgd_level_checkwx()
#else
#define debug_checkwx() do { } while (0)
#endif
38

39 40 41 42
/*
 * ZERO_PAGE is a global shared page that is always zero: used
 * for zero-mapped memory areas etc..
 */
43 44
extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
	__visible;
45 46
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))

47 48
extern spinlock_t pgd_lock;
extern struct list_head pgd_list;
49

50 51
extern struct mm_struct *pgd_page_get_mm(struct page *page);

52 53
extern pmdval_t early_pmd_flags;

54 55 56 57 58 59 60 61 62 63 64
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
#else  /* !CONFIG_PARAVIRT */
#define set_pte(ptep, pte)		native_set_pte(ptep, pte)
#define set_pte_at(mm, addr, ptep, pte)	native_set_pte_at(mm, addr, ptep, pte)

#define set_pte_atomic(ptep, pte)					\
	native_set_pte_atomic(ptep, pte)

#define set_pmd(pmdp, pmd)		native_set_pmd(pmdp, pmd)

65
#ifndef __PAGETABLE_P4D_FOLDED
66 67 68 69
#define set_pgd(pgdp, pgd)		native_set_pgd(pgdp, pgd)
#define pgd_clear(pgd)			native_pgd_clear(pgd)
#endif

70 71 72 73 74 75 76 77
#ifndef set_p4d
# define set_p4d(p4dp, p4d)		native_set_p4d(p4dp, p4d)
#endif

#ifndef __PAGETABLE_PUD_FOLDED
#define p4d_clear(p4d)			native_p4d_clear(p4d)
#endif

78 79 80 81
#ifndef set_pud
# define set_pud(pudp, pud)		native_set_pud(pudp, pud)
#endif

82
#ifndef __PAGETABLE_PUD_FOLDED
83 84 85 86 87 88 89 90 91
#define pud_clear(pud)			native_pud_clear(pud)
#endif

#define pte_clear(mm, addr, ptep)	native_pte_clear(mm, addr, ptep)
#define pmd_clear(pmd)			native_pmd_clear(pmd)

#define pgd_val(x)	native_pgd_val(x)
#define __pgd(x)	native_make_pgd(x)

92 93 94 95 96
#ifndef __PAGETABLE_P4D_FOLDED
#define p4d_val(x)	native_p4d_val(x)
#define __p4d(x)	native_make_p4d(x)
#endif

97 98 99 100 101 102 103 104 105 106 107 108 109
#ifndef __PAGETABLE_PUD_FOLDED
#define pud_val(x)	native_pud_val(x)
#define __pud(x)	native_make_pud(x)
#endif

#ifndef __PAGETABLE_PMD_FOLDED
#define pmd_val(x)	native_pmd_val(x)
#define __pmd(x)	native_make_pmd(x)
#endif

#define pte_val(x)	native_pte_val(x)
#define __pte(x)	native_make_pte(x)

110 111
#define arch_end_context_switch(prev)	do {} while(0)

112 113
#endif	/* CONFIG_PARAVIRT */

114 115 116 117
/*
 * The following only work if pte_present() is true.
 * Undefined behaviour if not..
 */
118 119
static inline int pte_dirty(pte_t pte)
{
120
	return pte_flags(pte) & _PAGE_DIRTY;
121 122
}

123 124 125 126 127 128 129 130

static inline u32 read_pkru(void)
{
	if (boot_cpu_has(X86_FEATURE_OSPKE))
		return __read_pkru();
	return 0;
}

131 132 133 134 135 136
static inline void write_pkru(u32 pkru)
{
	if (boot_cpu_has(X86_FEATURE_OSPKE))
		__write_pkru(pkru);
}

137 138
static inline int pte_young(pte_t pte)
{
139
	return pte_flags(pte) & _PAGE_ACCESSED;
140 141
}

142 143 144 145
static inline int pmd_dirty(pmd_t pmd)
{
	return pmd_flags(pmd) & _PAGE_DIRTY;
}
146

J
Johannes Weiner 已提交
147 148 149 150 151
static inline int pmd_young(pmd_t pmd)
{
	return pmd_flags(pmd) & _PAGE_ACCESSED;
}

152 153 154 155 156 157 158 159 160 161
static inline int pud_dirty(pud_t pud)
{
	return pud_flags(pud) & _PAGE_DIRTY;
}

static inline int pud_young(pud_t pud)
{
	return pud_flags(pud) & _PAGE_ACCESSED;
}

162 163
static inline int pte_write(pte_t pte)
{
164
	return pte_flags(pte) & _PAGE_RW;
165 166 167 168
}

static inline int pte_huge(pte_t pte)
{
169
	return pte_flags(pte) & _PAGE_PSE;
170 171
}

172 173
static inline int pte_global(pte_t pte)
{
174
	return pte_flags(pte) & _PAGE_GLOBAL;
175 176 177 178
}

static inline int pte_exec(pte_t pte)
{
179
	return !(pte_flags(pte) & _PAGE_NX);
180 181
}

N
Nick Piggin 已提交
182 183
static inline int pte_special(pte_t pte)
{
184
	return pte_flags(pte) & _PAGE_SPECIAL;
N
Nick Piggin 已提交
185 186
}

H
Hugh Dickins 已提交
187 188 189 190 191
static inline unsigned long pte_pfn(pte_t pte)
{
	return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT;
}

A
Akinobu Mita 已提交
192 193
static inline unsigned long pmd_pfn(pmd_t pmd)
{
194
	return (pmd_val(pmd) & pmd_pfn_mask(pmd)) >> PAGE_SHIFT;
A
Akinobu Mita 已提交
195 196
}

197 198
static inline unsigned long pud_pfn(pud_t pud)
{
199
	return (pud_val(pud) & pud_pfn_mask(pud)) >> PAGE_SHIFT;
200 201
}

202 203 204 205 206
static inline unsigned long p4d_pfn(p4d_t p4d)
{
	return (p4d_val(p4d) & p4d_pfn_mask(p4d)) >> PAGE_SHIFT;
}

207 208 209 210 211
static inline unsigned long pgd_pfn(pgd_t pgd)
{
	return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT;
}

212 213 214 215 216 217
static inline int p4d_large(p4d_t p4d)
{
	/* No 512 GiB pages yet */
	return 0;
}

H
Hugh Dickins 已提交
218 219
#define pte_page(pte)	pfn_to_page(pte_pfn(pte))

220 221
static inline int pmd_large(pmd_t pte)
{
222
	return pmd_flags(pte) & _PAGE_PSE;
223 224
}

J
Johannes Weiner 已提交
225 226 227
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline int pmd_trans_huge(pmd_t pmd)
{
228
	return (pmd_val(pmd) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE;
J
Johannes Weiner 已提交
229
}
230

231 232 233 234 235 236 237
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
static inline int pud_trans_huge(pud_t pud)
{
	return (pud_val(pud) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE;
}
#endif

238
#define has_transparent_hugepage has_transparent_hugepage
239 240
static inline int has_transparent_hugepage(void)
{
241
	return boot_cpu_has(X86_FEATURE_PSE);
242
}
243 244 245 246 247 248

#ifdef __HAVE_ARCH_PTE_DEVMAP
static inline int pmd_devmap(pmd_t pmd)
{
	return !!(pmd_val(pmd) & _PAGE_DEVMAP);
}
249 250 251 252 253 254 255 256 257 258 259 260

#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
static inline int pud_devmap(pud_t pud)
{
	return !!(pud_val(pud) & _PAGE_DEVMAP);
}
#else
static inline int pud_devmap(pud_t pud)
{
	return 0;
}
#endif
261 262 263 264 265

static inline int pgd_devmap(pgd_t pgd)
{
	return 0;
}
266
#endif
J
Johannes Weiner 已提交
267 268
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */

269 270 271 272 273 274 275 276 277 278 279 280 281 282
static inline pte_t pte_set_flags(pte_t pte, pteval_t set)
{
	pteval_t v = native_pte_val(pte);

	return native_make_pte(v | set);
}

static inline pte_t pte_clear_flags(pte_t pte, pteval_t clear)
{
	pteval_t v = native_pte_val(pte);

	return native_make_pte(v & ~clear);
}

283 284
static inline pte_t pte_mkclean(pte_t pte)
{
285
	return pte_clear_flags(pte, _PAGE_DIRTY);
286 287 288 289
}

static inline pte_t pte_mkold(pte_t pte)
{
290
	return pte_clear_flags(pte, _PAGE_ACCESSED);
291 292 293 294
}

static inline pte_t pte_wrprotect(pte_t pte)
{
295
	return pte_clear_flags(pte, _PAGE_RW);
296 297 298 299
}

static inline pte_t pte_mkexec(pte_t pte)
{
300
	return pte_clear_flags(pte, _PAGE_NX);
301 302 303 304
}

static inline pte_t pte_mkdirty(pte_t pte)
{
305
	return pte_set_flags(pte, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
306 307 308 309
}

static inline pte_t pte_mkyoung(pte_t pte)
{
310
	return pte_set_flags(pte, _PAGE_ACCESSED);
311 312 313 314
}

static inline pte_t pte_mkwrite(pte_t pte)
{
315
	return pte_set_flags(pte, _PAGE_RW);
316 317 318 319
}

static inline pte_t pte_mkhuge(pte_t pte)
{
320
	return pte_set_flags(pte, _PAGE_PSE);
321 322 323 324
}

static inline pte_t pte_clrhuge(pte_t pte)
{
325
	return pte_clear_flags(pte, _PAGE_PSE);
326 327 328 329
}

static inline pte_t pte_mkglobal(pte_t pte)
{
330
	return pte_set_flags(pte, _PAGE_GLOBAL);
331 332 333 334
}

static inline pte_t pte_clrglobal(pte_t pte)
{
335
	return pte_clear_flags(pte, _PAGE_GLOBAL);
336
}
337

N
Nick Piggin 已提交
338 339
static inline pte_t pte_mkspecial(pte_t pte)
{
340
	return pte_set_flags(pte, _PAGE_SPECIAL);
N
Nick Piggin 已提交
341 342
}

343 344 345 346 347
static inline pte_t pte_mkdevmap(pte_t pte)
{
	return pte_set_flags(pte, _PAGE_SPECIAL|_PAGE_DEVMAP);
}

J
Johannes Weiner 已提交
348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366
static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set)
{
	pmdval_t v = native_pmd_val(pmd);

	return __pmd(v | set);
}

static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear)
{
	pmdval_t v = native_pmd_val(pmd);

	return __pmd(v & ~clear);
}

static inline pmd_t pmd_mkold(pmd_t pmd)
{
	return pmd_clear_flags(pmd, _PAGE_ACCESSED);
}

367 368 369 370 371
static inline pmd_t pmd_mkclean(pmd_t pmd)
{
	return pmd_clear_flags(pmd, _PAGE_DIRTY);
}

J
Johannes Weiner 已提交
372 373 374 375 376 377 378
static inline pmd_t pmd_wrprotect(pmd_t pmd)
{
	return pmd_clear_flags(pmd, _PAGE_RW);
}

static inline pmd_t pmd_mkdirty(pmd_t pmd)
{
379
	return pmd_set_flags(pmd, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
J
Johannes Weiner 已提交
380 381
}

382 383 384 385 386
static inline pmd_t pmd_mkdevmap(pmd_t pmd)
{
	return pmd_set_flags(pmd, _PAGE_DEVMAP);
}

J
Johannes Weiner 已提交
387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403
static inline pmd_t pmd_mkhuge(pmd_t pmd)
{
	return pmd_set_flags(pmd, _PAGE_PSE);
}

static inline pmd_t pmd_mkyoung(pmd_t pmd)
{
	return pmd_set_flags(pmd, _PAGE_ACCESSED);
}

static inline pmd_t pmd_mkwrite(pmd_t pmd)
{
	return pmd_set_flags(pmd, _PAGE_RW);
}

static inline pmd_t pmd_mknotpresent(pmd_t pmd)
{
404
	return pmd_clear_flags(pmd, _PAGE_PRESENT | _PAGE_PROTNONE);
J
Johannes Weiner 已提交
405 406
}

407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465
static inline pud_t pud_set_flags(pud_t pud, pudval_t set)
{
	pudval_t v = native_pud_val(pud);

	return __pud(v | set);
}

static inline pud_t pud_clear_flags(pud_t pud, pudval_t clear)
{
	pudval_t v = native_pud_val(pud);

	return __pud(v & ~clear);
}

static inline pud_t pud_mkold(pud_t pud)
{
	return pud_clear_flags(pud, _PAGE_ACCESSED);
}

static inline pud_t pud_mkclean(pud_t pud)
{
	return pud_clear_flags(pud, _PAGE_DIRTY);
}

static inline pud_t pud_wrprotect(pud_t pud)
{
	return pud_clear_flags(pud, _PAGE_RW);
}

static inline pud_t pud_mkdirty(pud_t pud)
{
	return pud_set_flags(pud, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
}

static inline pud_t pud_mkdevmap(pud_t pud)
{
	return pud_set_flags(pud, _PAGE_DEVMAP);
}

static inline pud_t pud_mkhuge(pud_t pud)
{
	return pud_set_flags(pud, _PAGE_PSE);
}

static inline pud_t pud_mkyoung(pud_t pud)
{
	return pud_set_flags(pud, _PAGE_ACCESSED);
}

static inline pud_t pud_mkwrite(pud_t pud)
{
	return pud_set_flags(pud, _PAGE_RW);
}

static inline pud_t pud_mknotpresent(pud_t pud)
{
	return pud_clear_flags(pud, _PAGE_PRESENT | _PAGE_PROTNONE);
}

466
#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
467 468 469 470 471 472 473 474 475 476
static inline int pte_soft_dirty(pte_t pte)
{
	return pte_flags(pte) & _PAGE_SOFT_DIRTY;
}

static inline int pmd_soft_dirty(pmd_t pmd)
{
	return pmd_flags(pmd) & _PAGE_SOFT_DIRTY;
}

477 478 479 480 481
static inline int pud_soft_dirty(pud_t pud)
{
	return pud_flags(pud) & _PAGE_SOFT_DIRTY;
}

482 483 484 485 486 487 488 489 490 491
static inline pte_t pte_mksoft_dirty(pte_t pte)
{
	return pte_set_flags(pte, _PAGE_SOFT_DIRTY);
}

static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
{
	return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY);
}

492 493 494 495 496
static inline pud_t pud_mksoft_dirty(pud_t pud)
{
	return pud_set_flags(pud, _PAGE_SOFT_DIRTY);
}

497 498 499 500 501 502 503 504 505 506
static inline pte_t pte_clear_soft_dirty(pte_t pte)
{
	return pte_clear_flags(pte, _PAGE_SOFT_DIRTY);
}

static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
{
	return pmd_clear_flags(pmd, _PAGE_SOFT_DIRTY);
}

507 508 509 510 511
static inline pud_t pud_clear_soft_dirty(pud_t pud)
{
	return pud_clear_flags(pud, _PAGE_SOFT_DIRTY);
}

512 513
#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */

514 515 516 517 518 519 520 521 522 523 524 525 526 527
/*
 * Mask out unsupported bits in a present pgprot.  Non-present pgprots
 * can use those bits for other purposes, so leave them be.
 */
static inline pgprotval_t massage_pgprot(pgprot_t pgprot)
{
	pgprotval_t protval = pgprot_val(pgprot);

	if (protval & _PAGE_PRESENT)
		protval &= __supported_pte_mask;

	return protval;
}

528 529
static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
{
530 531
	return __pte(((phys_addr_t)page_nr << PAGE_SHIFT) |
		     massage_pgprot(pgprot));
532 533 534 535
}

static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
{
536 537
	return __pmd(((phys_addr_t)page_nr << PAGE_SHIFT) |
		     massage_pgprot(pgprot));
538 539
}

540 541 542 543 544 545
static inline pud_t pfn_pud(unsigned long page_nr, pgprot_t pgprot)
{
	return __pud(((phys_addr_t)page_nr << PAGE_SHIFT) |
		     massage_pgprot(pgprot));
}

546 547 548 549 550 551 552 553
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
	pteval_t val = pte_val(pte);

	/*
	 * Chop off the NX bit (if present), and add the NX portion of
	 * the newprot (if present):
	 */
554
	val &= _PAGE_CHG_MASK;
555
	val |= massage_pgprot(newprot) & ~_PAGE_CHG_MASK;
556 557 558 559

	return __pte(val);
}

J
Johannes Weiner 已提交
560 561 562 563 564 565 566 567 568 569
static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
{
	pmdval_t val = pmd_val(pmd);

	val &= _HPAGE_CHG_MASK;
	val |= massage_pgprot(newprot) & ~_HPAGE_CHG_MASK;

	return __pmd(val);
}

570 571 572 573 574 575 576 577 578
/* mprotect needs to preserve PAT bits when updating vm_page_prot */
#define pgprot_modify pgprot_modify
static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
{
	pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK;
	pgprotval_t addbits = pgprot_val(newprot);
	return __pgprot(preservebits | addbits);
}

579 580 581
#define pte_pgprot(x) __pgprot(pte_flags(x))
#define pmd_pgprot(x) __pgprot(pmd_flags(x))
#define pud_pgprot(x) __pgprot(pud_flags(x))
582
#define p4d_pgprot(x) __pgprot(p4d_flags(x))
A
Andi Kleen 已提交
583

584
#define canon_pgprot(p) __pgprot(massage_pgprot(p))
A
Andi Kleen 已提交
585

586
static inline int is_new_memtype_allowed(u64 paddr, unsigned long size,
587 588
					 enum page_cache_mode pcm,
					 enum page_cache_mode new_pcm)
589
{
590
	/*
591
	 * PAT type is always WB for untracked ranges, so no need to check.
592
	 */
593
	if (x86_platform.is_untracked_pat_range(paddr, paddr + size))
594 595
		return 1;

596 597 598 599 600
	/*
	 * Certain new memtypes are not allowed with certain
	 * requested memtype:
	 * - request is uncached, return cannot be write-back
	 * - request is write-combine, return cannot be write-back
601 602
	 * - request is write-through, return cannot be write-back
	 * - request is write-through, return cannot be write-combine
603
	 */
604 605 606
	if ((pcm == _PAGE_CACHE_MODE_UC_MINUS &&
	     new_pcm == _PAGE_CACHE_MODE_WB) ||
	    (pcm == _PAGE_CACHE_MODE_WC &&
607 608 609 610 611
	     new_pcm == _PAGE_CACHE_MODE_WB) ||
	    (pcm == _PAGE_CACHE_MODE_WT &&
	     new_pcm == _PAGE_CACHE_MODE_WB) ||
	    (pcm == _PAGE_CACHE_MODE_WT &&
	     new_pcm == _PAGE_CACHE_MODE_WC)) {
612 613 614 615 616 617
		return 0;
	}

	return 1;
}

618 619
pmd_t *populate_extra_pmd(unsigned long vaddr);
pte_t *populate_extra_pte(unsigned long vaddr);
620 621
#endif	/* __ASSEMBLY__ */

622
#ifdef CONFIG_X86_32
623
# include <asm/pgtable_32.h>
624
#else
625
# include <asm/pgtable_64.h>
626
#endif
627

628
#ifndef __ASSEMBLY__
J
Jeremy Fitzhardinge 已提交
629
#include <linux/mm_types.h>
630
#include <linux/mmdebug.h>
631
#include <linux/log2.h>
632
#include <asm/fixmap.h>
633

J
Jeremy Fitzhardinge 已提交
634 635
static inline int pte_none(pte_t pte)
{
636
	return !(pte.pte & ~(_PAGE_KNL_ERRATUM_MASK));
J
Jeremy Fitzhardinge 已提交
637 638
}

J
Jeremy Fitzhardinge 已提交
639 640 641 642 643 644
#define __HAVE_ARCH_PTE_SAME
static inline int pte_same(pte_t a, pte_t b)
{
	return a.pte == b.pte;
}

J
Jeremy Fitzhardinge 已提交
645
static inline int pte_present(pte_t a)
646 647 648 649
{
	return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);
}

650 651 652 653 654 655 656
#ifdef __HAVE_ARCH_PTE_DEVMAP
static inline int pte_devmap(pte_t a)
{
	return (pte_flags(a) & _PAGE_DEVMAP) == _PAGE_DEVMAP;
}
#endif

R
Rik van Riel 已提交
657
#define pte_accessible pte_accessible
658
static inline bool pte_accessible(struct mm_struct *mm, pte_t a)
R
Rik van Riel 已提交
659
{
660 661 662
	if (pte_flags(a) & _PAGE_PRESENT)
		return true;

663
	if ((pte_flags(a) & _PAGE_PROTNONE) &&
664 665 666 667
			mm_tlb_flush_pending(mm))
		return true;

	return false;
R
Rik van Riel 已提交
668 669
}

J
Jeremy Fitzhardinge 已提交
670 671
static inline int pmd_present(pmd_t pmd)
{
672 673 674 675 676 677
	/*
	 * Checking for _PAGE_PSE is needed too because
	 * split_huge_page will temporarily clear the present bit (but
	 * the _PAGE_PSE flag will remain set at all times while the
	 * _PAGE_PRESENT bit is clear).
	 */
678
	return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE);
J
Jeremy Fitzhardinge 已提交
679 680
}

681 682 683 684 685 686 687
#ifdef CONFIG_NUMA_BALANCING
/*
 * These work without NUMA balancing but the kernel does not care. See the
 * comment in include/asm-generic/pgtable.h
 */
static inline int pte_protnone(pte_t pte)
{
688 689
	return (pte_flags(pte) & (_PAGE_PROTNONE | _PAGE_PRESENT))
		== _PAGE_PROTNONE;
690 691 692 693
}

static inline int pmd_protnone(pmd_t pmd)
{
694 695
	return (pmd_flags(pmd) & (_PAGE_PROTNONE | _PAGE_PRESENT))
		== _PAGE_PROTNONE;
696 697 698
}
#endif /* CONFIG_NUMA_BALANCING */

J
Jeremy Fitzhardinge 已提交
699 700 701 702
static inline int pmd_none(pmd_t pmd)
{
	/* Only check low word on 32-bit platforms, since it might be
	   out of sync with upper half. */
703 704
	unsigned long val = native_pmd_val(pmd);
	return (val & ~_PAGE_KNL_ERRATUM_MASK) == 0;
J
Jeremy Fitzhardinge 已提交
705 706
}

J
Jeremy Fitzhardinge 已提交
707 708
static inline unsigned long pmd_page_vaddr(pmd_t pmd)
{
709
	return (unsigned long)__va(pmd_val(pmd) & pmd_pfn_mask(pmd));
J
Jeremy Fitzhardinge 已提交
710 711
}

712 713 714 715
/*
 * Currently stuck as a macro due to indirect forward reference to
 * linux/mmzone.h's __section_mem_map_addr() definition:
 */
716
#define pmd_page(pmd)	pfn_to_page(pmd_pfn(pmd))
J
Jeremy Fitzhardinge 已提交
717

J
Jeremy Fitzhardinge 已提交
718 719 720 721 722 723
/*
 * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
 *
 * this macro returns the index of the entry in the pmd page which would
 * control the given virtual address
 */
F
Figo.zhang 已提交
724
static inline unsigned long pmd_index(unsigned long address)
J
Jeremy Fitzhardinge 已提交
725 726 727 728
{
	return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
}

J
Jeremy Fitzhardinge 已提交
729 730 731 732 733 734 735 736 737
/*
 * Conversion functions: convert a page and protection to a page entry,
 * and a page entry and page directory to the page they refer to.
 *
 * (Currently stuck as a macro because of indirect forward reference
 * to linux/mm.h:page_to_nid())
 */
#define mk_pte(page, pgprot)   pfn_pte(page_to_pfn(page), (pgprot))

J
Jeremy Fitzhardinge 已提交
738 739 740 741 742 743
/*
 * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
 *
 * this function returns the index of the entry in the pte page which would
 * control the given virtual address
 */
F
Figo.zhang 已提交
744
static inline unsigned long pte_index(unsigned long address)
J
Jeremy Fitzhardinge 已提交
745 746 747 748
{
	return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
}

749 750 751 752 753
static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
{
	return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
}

J
Jeremy Fitzhardinge 已提交
754 755
static inline int pmd_bad(pmd_t pmd)
{
756
	return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE;
J
Jeremy Fitzhardinge 已提交
757 758
}

J
Jeremy Fitzhardinge 已提交
759 760 761 762 763
static inline unsigned long pages_to_mb(unsigned long npg)
{
	return npg >> (20 - PAGE_SHIFT);
}

764
#if CONFIG_PGTABLE_LEVELS > 2
J
Jeremy Fitzhardinge 已提交
765 766
static inline int pud_none(pud_t pud)
{
767
	return (native_pud_val(pud) & ~(_PAGE_KNL_ERRATUM_MASK)) == 0;
J
Jeremy Fitzhardinge 已提交
768 769
}

J
Jeremy Fitzhardinge 已提交
770 771
static inline int pud_present(pud_t pud)
{
772
	return pud_flags(pud) & _PAGE_PRESENT;
J
Jeremy Fitzhardinge 已提交
773
}
J
Jeremy Fitzhardinge 已提交
774 775 776

static inline unsigned long pud_page_vaddr(pud_t pud)
{
777
	return (unsigned long)__va(pud_val(pud) & pud_pfn_mask(pud));
J
Jeremy Fitzhardinge 已提交
778
}
J
Jeremy Fitzhardinge 已提交
779

780 781 782 783
/*
 * Currently stuck as a macro due to indirect forward reference to
 * linux/mmzone.h's __section_mem_map_addr() definition:
 */
784
#define pud_page(pud)	pfn_to_page(pud_pfn(pud))
J
Jeremy Fitzhardinge 已提交
785 786 787 788 789 790

/* Find an entry in the second-level page table.. */
static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
{
	return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
}
J
Jeremy Fitzhardinge 已提交
791

J
Jeremy Fitzhardinge 已提交
792 793
static inline int pud_large(pud_t pud)
{
794
	return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) ==
J
Jeremy Fitzhardinge 已提交
795 796
		(_PAGE_PSE | _PAGE_PRESENT);
}
J
Jeremy Fitzhardinge 已提交
797 798 799

static inline int pud_bad(pud_t pud)
{
800
	return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0;
J
Jeremy Fitzhardinge 已提交
801
}
802 803 804 805 806
#else
static inline int pud_large(pud_t pud)
{
	return 0;
}
807
#endif	/* CONFIG_PGTABLE_LEVELS > 2 */
J
Jeremy Fitzhardinge 已提交
808

809 810 811 812 813
static inline unsigned long pud_index(unsigned long address)
{
	return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
}

814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833
#if CONFIG_PGTABLE_LEVELS > 3
static inline int p4d_none(p4d_t p4d)
{
	return (native_p4d_val(p4d) & ~(_PAGE_KNL_ERRATUM_MASK)) == 0;
}

static inline int p4d_present(p4d_t p4d)
{
	return p4d_flags(p4d) & _PAGE_PRESENT;
}

static inline unsigned long p4d_page_vaddr(p4d_t p4d)
{
	return (unsigned long)__va(p4d_val(p4d) & p4d_pfn_mask(p4d));
}

/*
 * Currently stuck as a macro due to indirect forward reference to
 * linux/mmzone.h's __section_mem_map_addr() definition:
 */
834
#define p4d_page(p4d)	pfn_to_page(p4d_pfn(p4d))
835 836 837 838 839 840 841 842 843 844 845 846 847

/* Find an entry in the third-level page table.. */
static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
{
	return (pud_t *)p4d_page_vaddr(*p4d) + pud_index(address);
}

static inline int p4d_bad(p4d_t p4d)
{
	return (p4d_flags(p4d) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0;
}
#endif  /* CONFIG_PGTABLE_LEVELS > 3 */

848 849 850 851 852
static inline unsigned long p4d_index(unsigned long address)
{
	return (address >> P4D_SHIFT) & (PTRS_PER_P4D - 1);
}

853
#if CONFIG_PGTABLE_LEVELS > 4
J
Jeremy Fitzhardinge 已提交
854 855
static inline int pgd_present(pgd_t pgd)
{
856
	return pgd_flags(pgd) & _PAGE_PRESENT;
J
Jeremy Fitzhardinge 已提交
857
}
J
Jeremy Fitzhardinge 已提交
858 859 860 861 862

static inline unsigned long pgd_page_vaddr(pgd_t pgd)
{
	return (unsigned long)__va((unsigned long)pgd_val(pgd) & PTE_PFN_MASK);
}
J
Jeremy Fitzhardinge 已提交
863

864 865 866 867
/*
 * Currently stuck as a macro due to indirect forward reference to
 * linux/mmzone.h's __section_mem_map_addr() definition:
 */
868
#define pgd_page(pgd)	pfn_to_page(pgd_pfn(pgd))
J
Jeremy Fitzhardinge 已提交
869 870

/* to find an entry in a page-table-directory. */
871
static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
J
Jeremy Fitzhardinge 已提交
872
{
873
	return (p4d_t *)pgd_page_vaddr(*pgd) + p4d_index(address);
J
Jeremy Fitzhardinge 已提交
874
}
J
Jeremy Fitzhardinge 已提交
875 876 877

static inline int pgd_bad(pgd_t pgd)
{
878
	return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
J
Jeremy Fitzhardinge 已提交
879
}
J
Jeremy Fitzhardinge 已提交
880 881 882

static inline int pgd_none(pgd_t pgd)
{
883 884 885 886 887 888
	/*
	 * There is no need to do a workaround for the KNL stray
	 * A/D bit erratum here.  PGDs only point to page tables
	 * except on 32-bit non-PAE which is not supported on
	 * KNL.
	 */
889
	return !native_pgd_val(pgd);
J
Jeremy Fitzhardinge 已提交
890
}
891
#endif	/* CONFIG_PGTABLE_LEVELS > 4 */
J
Jeremy Fitzhardinge 已提交
892

893 894
#endif	/* __ASSEMBLY__ */

J
Jeremy Fitzhardinge 已提交
895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914
/*
 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
 *
 * this macro returns the index of the entry in the pgd page which would
 * control the given virtual address
 */
#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))

/*
 * pgd_offset() returns a (pgd_t *)
 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
 */
#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
/*
 * a shortcut which implies the use of the kernel's pgd, instead
 * of a process's
 */
#define pgd_offset_k(address) pgd_offset(&init_mm, (address))


J
Jeremy Fitzhardinge 已提交
915 916 917
#define KERNEL_PGD_BOUNDARY	pgd_index(PAGE_OFFSET)
#define KERNEL_PGD_PTRS		(PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)

918 919
#ifndef __ASSEMBLY__

920
extern int direct_gbpages;
921
void init_mem_mapping(void);
Y
Yinghai Lu 已提交
922
void early_alloc_pgt_buf(void);
923
extern void memblock_find_dma_reserve(void);
924

925 926 927
#ifdef CONFIG_X86_64
/* Realmode trampoline initialization. */
extern pgd_t trampoline_pgd_entry;
928
static inline void __meminit init_trampoline_default(void)
929 930
{
	/* Default trampoline pgd value */
931
	trampoline_pgd_entry = init_top_pgt[pgd_index(__PAGE_OFFSET)];
932
}
933 934 935 936 937
# ifdef CONFIG_RANDOMIZE_MEMORY
void __meminit init_trampoline(void);
# else
#  define init_trampoline init_trampoline_default
# endif
938 939 940 941
#else
static inline void init_trampoline(void) { }
#endif

942 943 944 945 946 947 948 949 950 951
/* local pte updates need not use xchg for locking */
static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
{
	pte_t res = *ptep;

	/* Pure native function needs no input for mm, addr */
	native_pte_clear(NULL, 0, ptep);
	return res;
}

J
Johannes Weiner 已提交
952 953 954 955 956 957 958 959
static inline pmd_t native_local_pmdp_get_and_clear(pmd_t *pmdp)
{
	pmd_t res = *pmdp;

	native_pmd_clear(pmdp);
	return res;
}

960 961 962 963 964 965 966 967
static inline pud_t native_local_pudp_get_and_clear(pud_t *pudp)
{
	pud_t res = *pudp;

	native_pud_clear(pudp);
	return res;
}

968 969 970 971 972 973
static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
				     pte_t *ptep , pte_t pte)
{
	native_set_pte(ptep, pte);
}

974 975
static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
			      pmd_t *pmdp, pmd_t pmd)
A
Andrea Arcangeli 已提交
976 977 978 979
{
	native_set_pmd(pmdp, pmd);
}

980 981
static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
			      pud_t *pudp, pud_t pud)
982 983 984 985
{
	native_set_pud(pudp, pud);
}

986 987 988 989 990 991 992
/*
 * We only update the dirty/accessed state if we set
 * the dirty bit by hand in the kernel, since the hardware
 * will do the accessed bit for us, and we don't want to
 * race with other CPU's that might be updating the dirty
 * bit at the same time.
 */
993 994
struct vm_area_struct;

995
#define  __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
996 997 998
extern int ptep_set_access_flags(struct vm_area_struct *vma,
				 unsigned long address, pte_t *ptep,
				 pte_t entry, int dirty);
999 1000

#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
1001 1002
extern int ptep_test_and_clear_young(struct vm_area_struct *vma,
				     unsigned long addr, pte_t *ptep);
1003 1004

#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
1005 1006
extern int ptep_clear_flush_young(struct vm_area_struct *vma,
				  unsigned long address, pte_t *ptep);
1007 1008

#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
1009 1010
static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
				       pte_t *ptep)
1011 1012 1013 1014 1015 1016
{
	pte_t pte = native_ptep_get_and_clear(ptep);
	return pte;
}

#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
1017 1018 1019
static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
					    unsigned long addr, pte_t *ptep,
					    int full)
1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034
{
	pte_t pte;
	if (full) {
		/*
		 * Full address destruction in progress; paravirt does not
		 * care about updates and native needs no locking
		 */
		pte = native_local_ptep_get_and_clear(ptep);
	} else {
		pte = ptep_get_and_clear(mm, addr, ptep);
	}
	return pte;
}

#define __HAVE_ARCH_PTEP_SET_WRPROTECT
1035 1036
static inline void ptep_set_wrprotect(struct mm_struct *mm,
				      unsigned long addr, pte_t *ptep)
1037
{
J
Jeremy Fitzhardinge 已提交
1038
	clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte);
1039 1040
}

1041
#define flush_tlb_fix_spurious_fault(vma, address) do { } while (0)
1042

J
Johannes Weiner 已提交
1043 1044 1045 1046 1047 1048
#define mk_pmd(page, pgprot)   pfn_pmd(page_to_pfn(page), (pgprot))

#define  __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
extern int pmdp_set_access_flags(struct vm_area_struct *vma,
				 unsigned long address, pmd_t *pmdp,
				 pmd_t entry, int dirty);
1049 1050 1051
extern int pudp_set_access_flags(struct vm_area_struct *vma,
				 unsigned long address, pud_t *pudp,
				 pud_t entry, int dirty);
J
Johannes Weiner 已提交
1052 1053 1054 1055

#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
				     unsigned long addr, pmd_t *pmdp);
1056 1057
extern int pudp_test_and_clear_young(struct vm_area_struct *vma,
				     unsigned long addr, pud_t *pudp);
J
Johannes Weiner 已提交
1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069

#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
				  unsigned long address, pmd_t *pmdp);


#define __HAVE_ARCH_PMD_WRITE
static inline int pmd_write(pmd_t pmd)
{
	return pmd_flags(pmd) & _PAGE_RW;
}

1070 1071
#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long addr,
J
Johannes Weiner 已提交
1072 1073
				       pmd_t *pmdp)
{
1074
	return native_pmdp_get_and_clear(pmdp);
J
Johannes Weiner 已提交
1075 1076
}

1077 1078 1079 1080 1081 1082 1083
#define __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR
static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm,
					unsigned long addr, pud_t *pudp)
{
	return native_pudp_get_and_clear(pudp);
}

J
Johannes Weiner 已提交
1084 1085 1086 1087 1088 1089 1090
#define __HAVE_ARCH_PMDP_SET_WRPROTECT
static inline void pmdp_set_wrprotect(struct mm_struct *mm,
				      unsigned long addr, pmd_t *pmdp)
{
	clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp);
}

1091 1092 1093 1094 1095 1096
#define pud_write pud_write
static inline int pud_write(pud_t pud)
{
	return pud_flags(pud) & _PAGE_RW;
}

J
Jeremy Fitzhardinge 已提交
1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111
/*
 * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
 *
 *  dst - pointer to pgd range anwhere on a pgd page
 *  src - ""
 *  count - the number of pgds to copy.
 *
 * dst and src can be on the same page, but the range must not overlap,
 * and must not cross a page boundary.
 */
static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
{
       memcpy(dst, src, count * sizeof(pgd_t));
}

1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124
#define PTE_SHIFT ilog2(PTRS_PER_PTE)
static inline int page_level_shift(enum pg_level level)
{
	return (PAGE_SHIFT - PTE_SHIFT) + level * PTE_SHIFT;
}
static inline unsigned long page_level_size(enum pg_level level)
{
	return 1UL << page_level_shift(level);
}
static inline unsigned long page_level_mask(enum pg_level level)
{
	return ~(page_level_size(level) - 1);
}
J
Jeremy Fitzhardinge 已提交
1125

1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137
/*
 * The x86 doesn't have any external MMU info: the kernel page
 * tables contain all the necessary information.
 */
static inline void update_mmu_cache(struct vm_area_struct *vma,
		unsigned long addr, pte_t *ptep)
{
}
static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
		unsigned long addr, pmd_t *pmd)
{
}
1138 1139 1140 1141
static inline void update_mmu_cache_pud(struct vm_area_struct *vma,
		unsigned long addr, pud_t *pud)
{
}
J
Jeremy Fitzhardinge 已提交
1142

1143
#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157
static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
{
	return pte_set_flags(pte, _PAGE_SWP_SOFT_DIRTY);
}

static inline int pte_swp_soft_dirty(pte_t pte)
{
	return pte_flags(pte) & _PAGE_SWP_SOFT_DIRTY;
}

static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
{
	return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY);
}
1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174

#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
{
	return pmd_set_flags(pmd, _PAGE_SWP_SOFT_DIRTY);
}

static inline int pmd_swp_soft_dirty(pmd_t pmd)
{
	return pmd_flags(pmd) & _PAGE_SWP_SOFT_DIRTY;
}

static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
{
	return pmd_clear_flags(pmd, _PAGE_SWP_SOFT_DIRTY);
}
#endif
1175
#endif
1176

1177 1178
#define PKRU_AD_BIT 0x1
#define PKRU_WD_BIT 0x2
1179
#define PKRU_BITS_PER_PKEY 2
1180 1181 1182

static inline bool __pkru_allows_read(u32 pkru, u16 pkey)
{
1183
	int pkru_pkey_bits = pkey * PKRU_BITS_PER_PKEY;
1184 1185 1186 1187 1188
	return !(pkru & (PKRU_AD_BIT << pkru_pkey_bits));
}

static inline bool __pkru_allows_write(u32 pkru, u16 pkey)
{
1189
	int pkru_pkey_bits = pkey * PKRU_BITS_PER_PKEY;
1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206
	/*
	 * Access-disable disables writes too so we need to check
	 * both bits here.
	 */
	return !(pkru & ((PKRU_AD_BIT|PKRU_WD_BIT) << pkru_pkey_bits));
}

static inline u16 pte_flags_pkey(unsigned long pte_flags)
{
#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
	/* ifdef to avoid doing 59-bit shift on 32-bit values */
	return (pte_flags & _PAGE_PKEY_MASK) >> _PAGE_BIT_PKEY_BIT0;
#else
	return 0;
#endif
}

1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254
static inline bool __pkru_allows_pkey(u16 pkey, bool write)
{
	u32 pkru = read_pkru();

	if (!__pkru_allows_read(pkru, pkey))
		return false;
	if (write && !__pkru_allows_write(pkru, pkey))
		return false;

	return true;
}

/*
 * 'pteval' can come from a PTE, PMD or PUD.  We only check
 * _PAGE_PRESENT, _PAGE_USER, and _PAGE_RW in here which are the
 * same value on all 3 types.
 */
static inline bool __pte_access_permitted(unsigned long pteval, bool write)
{
	unsigned long need_pte_bits = _PAGE_PRESENT|_PAGE_USER;

	if (write)
		need_pte_bits |= _PAGE_RW;

	if ((pteval & need_pte_bits) != need_pte_bits)
		return 0;

	return __pkru_allows_pkey(pte_flags_pkey(pteval), write);
}

#define pte_access_permitted pte_access_permitted
static inline bool pte_access_permitted(pte_t pte, bool write)
{
	return __pte_access_permitted(pte_val(pte), write);
}

#define pmd_access_permitted pmd_access_permitted
static inline bool pmd_access_permitted(pmd_t pmd, bool write)
{
	return __pte_access_permitted(pmd_val(pmd), write);
}

#define pud_access_permitted pud_access_permitted
static inline bool pud_access_permitted(pud_t pud, bool write)
{
	return __pte_access_permitted(pud_val(pud), write);
}

1255 1256 1257
#include <asm-generic/pgtable.h>
#endif	/* __ASSEMBLY__ */

H
H. Peter Anvin 已提交
1258
#endif /* _ASM_X86_PGTABLE_H */