pgtable.h 28.8 KB
Newer Older
H
H. Peter Anvin 已提交
1 2
#ifndef _ASM_X86_PGTABLE_H
#define _ASM_X86_PGTABLE_H
3

4
#include <linux/mem_encrypt.h>
5
#include <asm/page.h>
6
#include <asm/pgtable_types.h>
7

8 9 10
/*
 * Macro to mark a page protection value as UC-
 */
11 12 13 14
#define pgprot_noncached(prot)						\
	((boot_cpu_data.x86 > 3)					\
	 ? (__pgprot(pgprot_val(prot) |					\
		     cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS)))	\
15 16
	 : (prot))

17 18 19 20 21 22
/*
 * Macros to add or remove encryption attribute
 */
#define pgprot_encrypted(prot)	__pgprot(__sme_set(pgprot_val(prot)))
#define pgprot_decrypted(prot)	__pgprot(__sme_clr(pgprot_val(prot)))

23
#ifndef __ASSEMBLY__
24 25
#include <asm/x86_init.h>

26 27 28
extern pgd_t early_top_pgt[PTRS_PER_PGD];
int __init __early_make_pgtable(unsigned long address, pmdval_t pmd);

29
void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd);
S
Stephen Smalley 已提交
30 31 32 33 34 35 36
void ptdump_walk_pgd_level_checkwx(void);

#ifdef CONFIG_DEBUG_WX
#define debug_checkwx() ptdump_walk_pgd_level_checkwx()
#else
#define debug_checkwx() do { } while (0)
#endif
37

38 39 40 41
/*
 * ZERO_PAGE is a global shared page that is always zero: used
 * for zero-mapped memory areas etc..
 */
42 43
extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
	__visible;
44 45
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))

46 47
extern spinlock_t pgd_lock;
extern struct list_head pgd_list;
48

49 50
extern struct mm_struct *pgd_page_get_mm(struct page *page);

51 52
extern pmdval_t early_pmd_flags;

53 54 55 56 57 58 59 60 61 62 63
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
#else  /* !CONFIG_PARAVIRT */
#define set_pte(ptep, pte)		native_set_pte(ptep, pte)
#define set_pte_at(mm, addr, ptep, pte)	native_set_pte_at(mm, addr, ptep, pte)

#define set_pte_atomic(ptep, pte)					\
	native_set_pte_atomic(ptep, pte)

#define set_pmd(pmdp, pmd)		native_set_pmd(pmdp, pmd)

64
#ifndef __PAGETABLE_P4D_FOLDED
65 66 67 68
#define set_pgd(pgdp, pgd)		native_set_pgd(pgdp, pgd)
#define pgd_clear(pgd)			native_pgd_clear(pgd)
#endif

69 70 71 72 73 74 75 76
#ifndef set_p4d
# define set_p4d(p4dp, p4d)		native_set_p4d(p4dp, p4d)
#endif

#ifndef __PAGETABLE_PUD_FOLDED
#define p4d_clear(p4d)			native_p4d_clear(p4d)
#endif

77 78 79 80
#ifndef set_pud
# define set_pud(pudp, pud)		native_set_pud(pudp, pud)
#endif

81
#ifndef __PAGETABLE_PUD_FOLDED
82 83 84 85 86 87 88 89 90
#define pud_clear(pud)			native_pud_clear(pud)
#endif

#define pte_clear(mm, addr, ptep)	native_pte_clear(mm, addr, ptep)
#define pmd_clear(pmd)			native_pmd_clear(pmd)

#define pgd_val(x)	native_pgd_val(x)
#define __pgd(x)	native_make_pgd(x)

91 92 93 94 95
#ifndef __PAGETABLE_P4D_FOLDED
#define p4d_val(x)	native_p4d_val(x)
#define __p4d(x)	native_make_p4d(x)
#endif

96 97 98 99 100 101 102 103 104 105 106 107 108
#ifndef __PAGETABLE_PUD_FOLDED
#define pud_val(x)	native_pud_val(x)
#define __pud(x)	native_make_pud(x)
#endif

#ifndef __PAGETABLE_PMD_FOLDED
#define pmd_val(x)	native_pmd_val(x)
#define __pmd(x)	native_make_pmd(x)
#endif

#define pte_val(x)	native_pte_val(x)
#define __pte(x)	native_make_pte(x)

109 110
#define arch_end_context_switch(prev)	do {} while(0)

111 112
#endif	/* CONFIG_PARAVIRT */

113 114 115 116
/*
 * The following only work if pte_present() is true.
 * Undefined behaviour if not..
 */
117 118
static inline int pte_dirty(pte_t pte)
{
119
	return pte_flags(pte) & _PAGE_DIRTY;
120 121
}

122 123 124 125 126 127 128 129

static inline u32 read_pkru(void)
{
	if (boot_cpu_has(X86_FEATURE_OSPKE))
		return __read_pkru();
	return 0;
}

130 131 132 133 134 135
static inline void write_pkru(u32 pkru)
{
	if (boot_cpu_has(X86_FEATURE_OSPKE))
		__write_pkru(pkru);
}

136 137
static inline int pte_young(pte_t pte)
{
138
	return pte_flags(pte) & _PAGE_ACCESSED;
139 140
}

141 142 143 144
static inline int pmd_dirty(pmd_t pmd)
{
	return pmd_flags(pmd) & _PAGE_DIRTY;
}
145

J
Johannes Weiner 已提交
146 147 148 149 150
static inline int pmd_young(pmd_t pmd)
{
	return pmd_flags(pmd) & _PAGE_ACCESSED;
}

151 152 153 154 155 156 157 158 159 160
static inline int pud_dirty(pud_t pud)
{
	return pud_flags(pud) & _PAGE_DIRTY;
}

static inline int pud_young(pud_t pud)
{
	return pud_flags(pud) & _PAGE_ACCESSED;
}

161 162
static inline int pte_write(pte_t pte)
{
163
	return pte_flags(pte) & _PAGE_RW;
164 165 166 167
}

static inline int pte_huge(pte_t pte)
{
168
	return pte_flags(pte) & _PAGE_PSE;
169 170
}

171 172
static inline int pte_global(pte_t pte)
{
173
	return pte_flags(pte) & _PAGE_GLOBAL;
174 175 176 177
}

static inline int pte_exec(pte_t pte)
{
178
	return !(pte_flags(pte) & _PAGE_NX);
179 180
}

N
Nick Piggin 已提交
181 182
static inline int pte_special(pte_t pte)
{
183
	return pte_flags(pte) & _PAGE_SPECIAL;
N
Nick Piggin 已提交
184 185
}

H
Hugh Dickins 已提交
186 187 188 189 190
static inline unsigned long pte_pfn(pte_t pte)
{
	return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT;
}

A
Akinobu Mita 已提交
191 192
static inline unsigned long pmd_pfn(pmd_t pmd)
{
193
	return (pmd_val(pmd) & pmd_pfn_mask(pmd)) >> PAGE_SHIFT;
A
Akinobu Mita 已提交
194 195
}

196 197
static inline unsigned long pud_pfn(pud_t pud)
{
198
	return (pud_val(pud) & pud_pfn_mask(pud)) >> PAGE_SHIFT;
199 200
}

201 202 203 204 205
static inline unsigned long p4d_pfn(p4d_t p4d)
{
	return (p4d_val(p4d) & p4d_pfn_mask(p4d)) >> PAGE_SHIFT;
}

206 207 208 209 210
static inline unsigned long pgd_pfn(pgd_t pgd)
{
	return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT;
}

211 212 213 214 215 216
static inline int p4d_large(p4d_t p4d)
{
	/* No 512 GiB pages yet */
	return 0;
}

H
Hugh Dickins 已提交
217 218
#define pte_page(pte)	pfn_to_page(pte_pfn(pte))

219 220
static inline int pmd_large(pmd_t pte)
{
221
	return pmd_flags(pte) & _PAGE_PSE;
222 223
}

J
Johannes Weiner 已提交
224 225 226
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline int pmd_trans_huge(pmd_t pmd)
{
227
	return (pmd_val(pmd) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE;
J
Johannes Weiner 已提交
228
}
229

230 231 232 233 234 235 236
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
static inline int pud_trans_huge(pud_t pud)
{
	return (pud_val(pud) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE;
}
#endif

237
#define has_transparent_hugepage has_transparent_hugepage
238 239
static inline int has_transparent_hugepage(void)
{
240
	return boot_cpu_has(X86_FEATURE_PSE);
241
}
242 243 244 245 246 247

#ifdef __HAVE_ARCH_PTE_DEVMAP
static inline int pmd_devmap(pmd_t pmd)
{
	return !!(pmd_val(pmd) & _PAGE_DEVMAP);
}
248 249 250 251 252 253 254 255 256 257 258 259

#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
static inline int pud_devmap(pud_t pud)
{
	return !!(pud_val(pud) & _PAGE_DEVMAP);
}
#else
static inline int pud_devmap(pud_t pud)
{
	return 0;
}
#endif
260 261 262 263 264

static inline int pgd_devmap(pgd_t pgd)
{
	return 0;
}
265
#endif
J
Johannes Weiner 已提交
266 267
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */

268 269 270 271 272 273 274 275 276 277 278 279 280 281
static inline pte_t pte_set_flags(pte_t pte, pteval_t set)
{
	pteval_t v = native_pte_val(pte);

	return native_make_pte(v | set);
}

static inline pte_t pte_clear_flags(pte_t pte, pteval_t clear)
{
	pteval_t v = native_pte_val(pte);

	return native_make_pte(v & ~clear);
}

282 283
static inline pte_t pte_mkclean(pte_t pte)
{
284
	return pte_clear_flags(pte, _PAGE_DIRTY);
285 286 287 288
}

static inline pte_t pte_mkold(pte_t pte)
{
289
	return pte_clear_flags(pte, _PAGE_ACCESSED);
290 291 292 293
}

static inline pte_t pte_wrprotect(pte_t pte)
{
294
	return pte_clear_flags(pte, _PAGE_RW);
295 296 297 298
}

static inline pte_t pte_mkexec(pte_t pte)
{
299
	return pte_clear_flags(pte, _PAGE_NX);
300 301 302 303
}

static inline pte_t pte_mkdirty(pte_t pte)
{
304
	return pte_set_flags(pte, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
305 306 307 308
}

static inline pte_t pte_mkyoung(pte_t pte)
{
309
	return pte_set_flags(pte, _PAGE_ACCESSED);
310 311 312 313
}

static inline pte_t pte_mkwrite(pte_t pte)
{
314
	return pte_set_flags(pte, _PAGE_RW);
315 316 317 318
}

static inline pte_t pte_mkhuge(pte_t pte)
{
319
	return pte_set_flags(pte, _PAGE_PSE);
320 321 322 323
}

static inline pte_t pte_clrhuge(pte_t pte)
{
324
	return pte_clear_flags(pte, _PAGE_PSE);
325 326 327 328
}

static inline pte_t pte_mkglobal(pte_t pte)
{
329
	return pte_set_flags(pte, _PAGE_GLOBAL);
330 331 332 333
}

static inline pte_t pte_clrglobal(pte_t pte)
{
334
	return pte_clear_flags(pte, _PAGE_GLOBAL);
335
}
336

N
Nick Piggin 已提交
337 338
static inline pte_t pte_mkspecial(pte_t pte)
{
339
	return pte_set_flags(pte, _PAGE_SPECIAL);
N
Nick Piggin 已提交
340 341
}

342 343 344 345 346
static inline pte_t pte_mkdevmap(pte_t pte)
{
	return pte_set_flags(pte, _PAGE_SPECIAL|_PAGE_DEVMAP);
}

J
Johannes Weiner 已提交
347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365
static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set)
{
	pmdval_t v = native_pmd_val(pmd);

	return __pmd(v | set);
}

static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear)
{
	pmdval_t v = native_pmd_val(pmd);

	return __pmd(v & ~clear);
}

static inline pmd_t pmd_mkold(pmd_t pmd)
{
	return pmd_clear_flags(pmd, _PAGE_ACCESSED);
}

366 367 368 369 370
static inline pmd_t pmd_mkclean(pmd_t pmd)
{
	return pmd_clear_flags(pmd, _PAGE_DIRTY);
}

J
Johannes Weiner 已提交
371 372 373 374 375 376 377
static inline pmd_t pmd_wrprotect(pmd_t pmd)
{
	return pmd_clear_flags(pmd, _PAGE_RW);
}

static inline pmd_t pmd_mkdirty(pmd_t pmd)
{
378
	return pmd_set_flags(pmd, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
J
Johannes Weiner 已提交
379 380
}

381 382 383 384 385
static inline pmd_t pmd_mkdevmap(pmd_t pmd)
{
	return pmd_set_flags(pmd, _PAGE_DEVMAP);
}

J
Johannes Weiner 已提交
386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402
static inline pmd_t pmd_mkhuge(pmd_t pmd)
{
	return pmd_set_flags(pmd, _PAGE_PSE);
}

static inline pmd_t pmd_mkyoung(pmd_t pmd)
{
	return pmd_set_flags(pmd, _PAGE_ACCESSED);
}

static inline pmd_t pmd_mkwrite(pmd_t pmd)
{
	return pmd_set_flags(pmd, _PAGE_RW);
}

static inline pmd_t pmd_mknotpresent(pmd_t pmd)
{
403
	return pmd_clear_flags(pmd, _PAGE_PRESENT | _PAGE_PROTNONE);
J
Johannes Weiner 已提交
404 405
}

406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464
static inline pud_t pud_set_flags(pud_t pud, pudval_t set)
{
	pudval_t v = native_pud_val(pud);

	return __pud(v | set);
}

static inline pud_t pud_clear_flags(pud_t pud, pudval_t clear)
{
	pudval_t v = native_pud_val(pud);

	return __pud(v & ~clear);
}

static inline pud_t pud_mkold(pud_t pud)
{
	return pud_clear_flags(pud, _PAGE_ACCESSED);
}

static inline pud_t pud_mkclean(pud_t pud)
{
	return pud_clear_flags(pud, _PAGE_DIRTY);
}

static inline pud_t pud_wrprotect(pud_t pud)
{
	return pud_clear_flags(pud, _PAGE_RW);
}

static inline pud_t pud_mkdirty(pud_t pud)
{
	return pud_set_flags(pud, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
}

static inline pud_t pud_mkdevmap(pud_t pud)
{
	return pud_set_flags(pud, _PAGE_DEVMAP);
}

static inline pud_t pud_mkhuge(pud_t pud)
{
	return pud_set_flags(pud, _PAGE_PSE);
}

static inline pud_t pud_mkyoung(pud_t pud)
{
	return pud_set_flags(pud, _PAGE_ACCESSED);
}

static inline pud_t pud_mkwrite(pud_t pud)
{
	return pud_set_flags(pud, _PAGE_RW);
}

static inline pud_t pud_mknotpresent(pud_t pud)
{
	return pud_clear_flags(pud, _PAGE_PRESENT | _PAGE_PROTNONE);
}

465
#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
466 467 468 469 470 471 472 473 474 475
static inline int pte_soft_dirty(pte_t pte)
{
	return pte_flags(pte) & _PAGE_SOFT_DIRTY;
}

static inline int pmd_soft_dirty(pmd_t pmd)
{
	return pmd_flags(pmd) & _PAGE_SOFT_DIRTY;
}

476 477 478 479 480
static inline int pud_soft_dirty(pud_t pud)
{
	return pud_flags(pud) & _PAGE_SOFT_DIRTY;
}

481 482 483 484 485 486 487 488 489 490
static inline pte_t pte_mksoft_dirty(pte_t pte)
{
	return pte_set_flags(pte, _PAGE_SOFT_DIRTY);
}

static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
{
	return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY);
}

491 492 493 494 495
static inline pud_t pud_mksoft_dirty(pud_t pud)
{
	return pud_set_flags(pud, _PAGE_SOFT_DIRTY);
}

496 497 498 499 500 501 502 503 504 505
static inline pte_t pte_clear_soft_dirty(pte_t pte)
{
	return pte_clear_flags(pte, _PAGE_SOFT_DIRTY);
}

static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
{
	return pmd_clear_flags(pmd, _PAGE_SOFT_DIRTY);
}

506 507 508 509 510
static inline pud_t pud_clear_soft_dirty(pud_t pud)
{
	return pud_clear_flags(pud, _PAGE_SOFT_DIRTY);
}

511 512
#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */

513 514 515 516 517 518 519 520 521 522 523 524 525 526
/*
 * Mask out unsupported bits in a present pgprot.  Non-present pgprots
 * can use those bits for other purposes, so leave them be.
 */
static inline pgprotval_t massage_pgprot(pgprot_t pgprot)
{
	pgprotval_t protval = pgprot_val(pgprot);

	if (protval & _PAGE_PRESENT)
		protval &= __supported_pte_mask;

	return protval;
}

527 528
static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
{
529 530
	return __pte(((phys_addr_t)page_nr << PAGE_SHIFT) |
		     massage_pgprot(pgprot));
531 532 533 534
}

static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
{
535 536
	return __pmd(((phys_addr_t)page_nr << PAGE_SHIFT) |
		     massage_pgprot(pgprot));
537 538
}

539 540 541 542 543 544
static inline pud_t pfn_pud(unsigned long page_nr, pgprot_t pgprot)
{
	return __pud(((phys_addr_t)page_nr << PAGE_SHIFT) |
		     massage_pgprot(pgprot));
}

545 546 547 548 549 550 551 552
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
	pteval_t val = pte_val(pte);

	/*
	 * Chop off the NX bit (if present), and add the NX portion of
	 * the newprot (if present):
	 */
553
	val &= _PAGE_CHG_MASK;
554
	val |= massage_pgprot(newprot) & ~_PAGE_CHG_MASK;
555 556 557 558

	return __pte(val);
}

J
Johannes Weiner 已提交
559 560 561 562 563 564 565 566 567 568
static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
{
	pmdval_t val = pmd_val(pmd);

	val &= _HPAGE_CHG_MASK;
	val |= massage_pgprot(newprot) & ~_HPAGE_CHG_MASK;

	return __pmd(val);
}

569 570 571 572 573 574 575 576 577
/* mprotect needs to preserve PAT bits when updating vm_page_prot */
#define pgprot_modify pgprot_modify
static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
{
	pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK;
	pgprotval_t addbits = pgprot_val(newprot);
	return __pgprot(preservebits | addbits);
}

578 579 580
#define pte_pgprot(x) __pgprot(pte_flags(x))
#define pmd_pgprot(x) __pgprot(pmd_flags(x))
#define pud_pgprot(x) __pgprot(pud_flags(x))
581
#define p4d_pgprot(x) __pgprot(p4d_flags(x))
A
Andi Kleen 已提交
582

583
#define canon_pgprot(p) __pgprot(massage_pgprot(p))
A
Andi Kleen 已提交
584

585
static inline int is_new_memtype_allowed(u64 paddr, unsigned long size,
586 587
					 enum page_cache_mode pcm,
					 enum page_cache_mode new_pcm)
588
{
589
	/*
590
	 * PAT type is always WB for untracked ranges, so no need to check.
591
	 */
592
	if (x86_platform.is_untracked_pat_range(paddr, paddr + size))
593 594
		return 1;

595 596 597 598 599
	/*
	 * Certain new memtypes are not allowed with certain
	 * requested memtype:
	 * - request is uncached, return cannot be write-back
	 * - request is write-combine, return cannot be write-back
600 601
	 * - request is write-through, return cannot be write-back
	 * - request is write-through, return cannot be write-combine
602
	 */
603 604 605
	if ((pcm == _PAGE_CACHE_MODE_UC_MINUS &&
	     new_pcm == _PAGE_CACHE_MODE_WB) ||
	    (pcm == _PAGE_CACHE_MODE_WC &&
606 607 608 609 610
	     new_pcm == _PAGE_CACHE_MODE_WB) ||
	    (pcm == _PAGE_CACHE_MODE_WT &&
	     new_pcm == _PAGE_CACHE_MODE_WB) ||
	    (pcm == _PAGE_CACHE_MODE_WT &&
	     new_pcm == _PAGE_CACHE_MODE_WC)) {
611 612 613 614 615 616
		return 0;
	}

	return 1;
}

617 618
pmd_t *populate_extra_pmd(unsigned long vaddr);
pte_t *populate_extra_pte(unsigned long vaddr);
619 620
#endif	/* __ASSEMBLY__ */

621
#ifdef CONFIG_X86_32
622
# include <asm/pgtable_32.h>
623
#else
624
# include <asm/pgtable_64.h>
625
#endif
626

627
#ifndef __ASSEMBLY__
J
Jeremy Fitzhardinge 已提交
628
#include <linux/mm_types.h>
629
#include <linux/mmdebug.h>
630
#include <linux/log2.h>
631
#include <asm/fixmap.h>
632

J
Jeremy Fitzhardinge 已提交
633 634
static inline int pte_none(pte_t pte)
{
635
	return !(pte.pte & ~(_PAGE_KNL_ERRATUM_MASK));
J
Jeremy Fitzhardinge 已提交
636 637
}

J
Jeremy Fitzhardinge 已提交
638 639 640 641 642 643
#define __HAVE_ARCH_PTE_SAME
static inline int pte_same(pte_t a, pte_t b)
{
	return a.pte == b.pte;
}

J
Jeremy Fitzhardinge 已提交
644
static inline int pte_present(pte_t a)
645 646 647 648
{
	return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);
}

649 650 651 652 653 654 655
#ifdef __HAVE_ARCH_PTE_DEVMAP
static inline int pte_devmap(pte_t a)
{
	return (pte_flags(a) & _PAGE_DEVMAP) == _PAGE_DEVMAP;
}
#endif

R
Rik van Riel 已提交
656
#define pte_accessible pte_accessible
657
static inline bool pte_accessible(struct mm_struct *mm, pte_t a)
R
Rik van Riel 已提交
658
{
659 660 661
	if (pte_flags(a) & _PAGE_PRESENT)
		return true;

662
	if ((pte_flags(a) & _PAGE_PROTNONE) &&
663 664 665 666
			mm_tlb_flush_pending(mm))
		return true;

	return false;
R
Rik van Riel 已提交
667 668
}

J
Jeremy Fitzhardinge 已提交
669
static inline int pte_hidden(pte_t pte)
670
{
J
Jeremy Fitzhardinge 已提交
671
	return pte_flags(pte) & _PAGE_HIDDEN;
672 673
}

J
Jeremy Fitzhardinge 已提交
674 675
static inline int pmd_present(pmd_t pmd)
{
676 677 678 679 680 681
	/*
	 * Checking for _PAGE_PSE is needed too because
	 * split_huge_page will temporarily clear the present bit (but
	 * the _PAGE_PSE flag will remain set at all times while the
	 * _PAGE_PRESENT bit is clear).
	 */
682
	return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE);
J
Jeremy Fitzhardinge 已提交
683 684
}

685 686 687 688 689 690 691
#ifdef CONFIG_NUMA_BALANCING
/*
 * These work without NUMA balancing but the kernel does not care. See the
 * comment in include/asm-generic/pgtable.h
 */
static inline int pte_protnone(pte_t pte)
{
692 693
	return (pte_flags(pte) & (_PAGE_PROTNONE | _PAGE_PRESENT))
		== _PAGE_PROTNONE;
694 695 696 697
}

static inline int pmd_protnone(pmd_t pmd)
{
698 699
	return (pmd_flags(pmd) & (_PAGE_PROTNONE | _PAGE_PRESENT))
		== _PAGE_PROTNONE;
700 701 702
}
#endif /* CONFIG_NUMA_BALANCING */

J
Jeremy Fitzhardinge 已提交
703 704 705 706
static inline int pmd_none(pmd_t pmd)
{
	/* Only check low word on 32-bit platforms, since it might be
	   out of sync with upper half. */
707 708
	unsigned long val = native_pmd_val(pmd);
	return (val & ~_PAGE_KNL_ERRATUM_MASK) == 0;
J
Jeremy Fitzhardinge 已提交
709 710
}

J
Jeremy Fitzhardinge 已提交
711 712
static inline unsigned long pmd_page_vaddr(pmd_t pmd)
{
713
	return (unsigned long)__va(pmd_val(pmd) & pmd_pfn_mask(pmd));
J
Jeremy Fitzhardinge 已提交
714 715
}

716 717 718 719
/*
 * Currently stuck as a macro due to indirect forward reference to
 * linux/mmzone.h's __section_mem_map_addr() definition:
 */
720
#define pmd_page(pmd)	pfn_to_page(pmd_pfn(pmd))
J
Jeremy Fitzhardinge 已提交
721

J
Jeremy Fitzhardinge 已提交
722 723 724 725 726 727
/*
 * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
 *
 * this macro returns the index of the entry in the pmd page which would
 * control the given virtual address
 */
F
Figo.zhang 已提交
728
static inline unsigned long pmd_index(unsigned long address)
J
Jeremy Fitzhardinge 已提交
729 730 731 732
{
	return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
}

J
Jeremy Fitzhardinge 已提交
733 734 735 736 737 738 739 740 741
/*
 * Conversion functions: convert a page and protection to a page entry,
 * and a page entry and page directory to the page they refer to.
 *
 * (Currently stuck as a macro because of indirect forward reference
 * to linux/mm.h:page_to_nid())
 */
#define mk_pte(page, pgprot)   pfn_pte(page_to_pfn(page), (pgprot))

J
Jeremy Fitzhardinge 已提交
742 743 744 745 746 747
/*
 * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
 *
 * this function returns the index of the entry in the pte page which would
 * control the given virtual address
 */
F
Figo.zhang 已提交
748
static inline unsigned long pte_index(unsigned long address)
J
Jeremy Fitzhardinge 已提交
749 750 751 752
{
	return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
}

753 754 755 756 757
static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
{
	return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
}

J
Jeremy Fitzhardinge 已提交
758 759
static inline int pmd_bad(pmd_t pmd)
{
760
	return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE;
J
Jeremy Fitzhardinge 已提交
761 762
}

J
Jeremy Fitzhardinge 已提交
763 764 765 766 767
static inline unsigned long pages_to_mb(unsigned long npg)
{
	return npg >> (20 - PAGE_SHIFT);
}

768
#if CONFIG_PGTABLE_LEVELS > 2
J
Jeremy Fitzhardinge 已提交
769 770
static inline int pud_none(pud_t pud)
{
771
	return (native_pud_val(pud) & ~(_PAGE_KNL_ERRATUM_MASK)) == 0;
J
Jeremy Fitzhardinge 已提交
772 773
}

J
Jeremy Fitzhardinge 已提交
774 775
static inline int pud_present(pud_t pud)
{
776
	return pud_flags(pud) & _PAGE_PRESENT;
J
Jeremy Fitzhardinge 已提交
777
}
J
Jeremy Fitzhardinge 已提交
778 779 780

static inline unsigned long pud_page_vaddr(pud_t pud)
{
781
	return (unsigned long)__va(pud_val(pud) & pud_pfn_mask(pud));
J
Jeremy Fitzhardinge 已提交
782
}
J
Jeremy Fitzhardinge 已提交
783

784 785 786 787
/*
 * Currently stuck as a macro due to indirect forward reference to
 * linux/mmzone.h's __section_mem_map_addr() definition:
 */
788
#define pud_page(pud)	pfn_to_page(pud_pfn(pud))
J
Jeremy Fitzhardinge 已提交
789 790 791 792 793 794

/* Find an entry in the second-level page table.. */
static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
{
	return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
}
J
Jeremy Fitzhardinge 已提交
795

J
Jeremy Fitzhardinge 已提交
796 797
static inline int pud_large(pud_t pud)
{
798
	return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) ==
J
Jeremy Fitzhardinge 已提交
799 800
		(_PAGE_PSE | _PAGE_PRESENT);
}
J
Jeremy Fitzhardinge 已提交
801 802 803

static inline int pud_bad(pud_t pud)
{
804
	return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0;
J
Jeremy Fitzhardinge 已提交
805
}
806 807 808 809 810
#else
static inline int pud_large(pud_t pud)
{
	return 0;
}
811
#endif	/* CONFIG_PGTABLE_LEVELS > 2 */
J
Jeremy Fitzhardinge 已提交
812

813 814 815 816 817
static inline unsigned long pud_index(unsigned long address)
{
	return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
}

818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837
#if CONFIG_PGTABLE_LEVELS > 3
static inline int p4d_none(p4d_t p4d)
{
	return (native_p4d_val(p4d) & ~(_PAGE_KNL_ERRATUM_MASK)) == 0;
}

static inline int p4d_present(p4d_t p4d)
{
	return p4d_flags(p4d) & _PAGE_PRESENT;
}

static inline unsigned long p4d_page_vaddr(p4d_t p4d)
{
	return (unsigned long)__va(p4d_val(p4d) & p4d_pfn_mask(p4d));
}

/*
 * Currently stuck as a macro due to indirect forward reference to
 * linux/mmzone.h's __section_mem_map_addr() definition:
 */
838
#define p4d_page(p4d)	pfn_to_page(p4d_pfn(p4d))
839 840 841 842 843 844 845 846 847 848 849 850 851

/* Find an entry in the third-level page table.. */
static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
{
	return (pud_t *)p4d_page_vaddr(*p4d) + pud_index(address);
}

static inline int p4d_bad(p4d_t p4d)
{
	return (p4d_flags(p4d) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0;
}
#endif  /* CONFIG_PGTABLE_LEVELS > 3 */

852 853 854 855 856
static inline unsigned long p4d_index(unsigned long address)
{
	return (address >> P4D_SHIFT) & (PTRS_PER_P4D - 1);
}

857
#if CONFIG_PGTABLE_LEVELS > 4
J
Jeremy Fitzhardinge 已提交
858 859
static inline int pgd_present(pgd_t pgd)
{
860
	return pgd_flags(pgd) & _PAGE_PRESENT;
J
Jeremy Fitzhardinge 已提交
861
}
J
Jeremy Fitzhardinge 已提交
862 863 864 865 866

static inline unsigned long pgd_page_vaddr(pgd_t pgd)
{
	return (unsigned long)__va((unsigned long)pgd_val(pgd) & PTE_PFN_MASK);
}
J
Jeremy Fitzhardinge 已提交
867

868 869 870 871
/*
 * Currently stuck as a macro due to indirect forward reference to
 * linux/mmzone.h's __section_mem_map_addr() definition:
 */
872
#define pgd_page(pgd)	pfn_to_page(pgd_pfn(pgd))
J
Jeremy Fitzhardinge 已提交
873 874

/* to find an entry in a page-table-directory. */
875
static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
J
Jeremy Fitzhardinge 已提交
876
{
877
	return (p4d_t *)pgd_page_vaddr(*pgd) + p4d_index(address);
J
Jeremy Fitzhardinge 已提交
878
}
J
Jeremy Fitzhardinge 已提交
879 880 881

static inline int pgd_bad(pgd_t pgd)
{
882
	return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
J
Jeremy Fitzhardinge 已提交
883
}
J
Jeremy Fitzhardinge 已提交
884 885 886

static inline int pgd_none(pgd_t pgd)
{
887 888 889 890 891 892
	/*
	 * There is no need to do a workaround for the KNL stray
	 * A/D bit erratum here.  PGDs only point to page tables
	 * except on 32-bit non-PAE which is not supported on
	 * KNL.
	 */
893
	return !native_pgd_val(pgd);
J
Jeremy Fitzhardinge 已提交
894
}
895
#endif	/* CONFIG_PGTABLE_LEVELS > 4 */
J
Jeremy Fitzhardinge 已提交
896

897 898
#endif	/* __ASSEMBLY__ */

J
Jeremy Fitzhardinge 已提交
899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918
/*
 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
 *
 * this macro returns the index of the entry in the pgd page which would
 * control the given virtual address
 */
#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))

/*
 * pgd_offset() returns a (pgd_t *)
 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
 */
#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
/*
 * a shortcut which implies the use of the kernel's pgd, instead
 * of a process's
 */
#define pgd_offset_k(address) pgd_offset(&init_mm, (address))


J
Jeremy Fitzhardinge 已提交
919 920 921
#define KERNEL_PGD_BOUNDARY	pgd_index(PAGE_OFFSET)
#define KERNEL_PGD_PTRS		(PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)

922 923
#ifndef __ASSEMBLY__

924
extern int direct_gbpages;
925
void init_mem_mapping(void);
Y
Yinghai Lu 已提交
926
void early_alloc_pgt_buf(void);
927
extern void memblock_find_dma_reserve(void);
928

929 930 931
#ifdef CONFIG_X86_64
/* Realmode trampoline initialization. */
extern pgd_t trampoline_pgd_entry;
932
static inline void __meminit init_trampoline_default(void)
933 934
{
	/* Default trampoline pgd value */
935
	trampoline_pgd_entry = init_top_pgt[pgd_index(__PAGE_OFFSET)];
936
}
937 938 939 940 941
# ifdef CONFIG_RANDOMIZE_MEMORY
void __meminit init_trampoline(void);
# else
#  define init_trampoline init_trampoline_default
# endif
942 943 944 945
#else
static inline void init_trampoline(void) { }
#endif

946 947 948 949 950 951 952 953 954 955
/* local pte updates need not use xchg for locking */
static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
{
	pte_t res = *ptep;

	/* Pure native function needs no input for mm, addr */
	native_pte_clear(NULL, 0, ptep);
	return res;
}

J
Johannes Weiner 已提交
956 957 958 959 960 961 962 963
static inline pmd_t native_local_pmdp_get_and_clear(pmd_t *pmdp)
{
	pmd_t res = *pmdp;

	native_pmd_clear(pmdp);
	return res;
}

964 965 966 967 968 969 970 971
static inline pud_t native_local_pudp_get_and_clear(pud_t *pudp)
{
	pud_t res = *pudp;

	native_pud_clear(pudp);
	return res;
}

972 973 974 975 976 977
static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
				     pte_t *ptep , pte_t pte)
{
	native_set_pte(ptep, pte);
}

978 979
static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
			      pmd_t *pmdp, pmd_t pmd)
A
Andrea Arcangeli 已提交
980 981 982 983
{
	native_set_pmd(pmdp, pmd);
}

984 985
static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
			      pud_t *pudp, pud_t pud)
986 987 988 989
{
	native_set_pud(pudp, pud);
}

990 991 992 993 994 995 996
/*
 * We only update the dirty/accessed state if we set
 * the dirty bit by hand in the kernel, since the hardware
 * will do the accessed bit for us, and we don't want to
 * race with other CPU's that might be updating the dirty
 * bit at the same time.
 */
997 998
struct vm_area_struct;

999
#define  __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
1000 1001 1002
extern int ptep_set_access_flags(struct vm_area_struct *vma,
				 unsigned long address, pte_t *ptep,
				 pte_t entry, int dirty);
1003 1004

#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
1005 1006
extern int ptep_test_and_clear_young(struct vm_area_struct *vma,
				     unsigned long addr, pte_t *ptep);
1007 1008

#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
1009 1010
extern int ptep_clear_flush_young(struct vm_area_struct *vma,
				  unsigned long address, pte_t *ptep);
1011 1012

#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
1013 1014
static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
				       pte_t *ptep)
1015 1016 1017 1018 1019 1020
{
	pte_t pte = native_ptep_get_and_clear(ptep);
	return pte;
}

#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
1021 1022 1023
static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
					    unsigned long addr, pte_t *ptep,
					    int full)
1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038
{
	pte_t pte;
	if (full) {
		/*
		 * Full address destruction in progress; paravirt does not
		 * care about updates and native needs no locking
		 */
		pte = native_local_ptep_get_and_clear(ptep);
	} else {
		pte = ptep_get_and_clear(mm, addr, ptep);
	}
	return pte;
}

#define __HAVE_ARCH_PTEP_SET_WRPROTECT
1039 1040
static inline void ptep_set_wrprotect(struct mm_struct *mm,
				      unsigned long addr, pte_t *ptep)
1041
{
J
Jeremy Fitzhardinge 已提交
1042
	clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte);
1043 1044
}

1045
#define flush_tlb_fix_spurious_fault(vma, address) do { } while (0)
1046

J
Johannes Weiner 已提交
1047 1048 1049 1050 1051 1052
#define mk_pmd(page, pgprot)   pfn_pmd(page_to_pfn(page), (pgprot))

#define  __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
extern int pmdp_set_access_flags(struct vm_area_struct *vma,
				 unsigned long address, pmd_t *pmdp,
				 pmd_t entry, int dirty);
1053 1054 1055
extern int pudp_set_access_flags(struct vm_area_struct *vma,
				 unsigned long address, pud_t *pudp,
				 pud_t entry, int dirty);
J
Johannes Weiner 已提交
1056 1057 1058 1059

#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
				     unsigned long addr, pmd_t *pmdp);
1060 1061
extern int pudp_test_and_clear_young(struct vm_area_struct *vma,
				     unsigned long addr, pud_t *pudp);
J
Johannes Weiner 已提交
1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073

#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
				  unsigned long address, pmd_t *pmdp);


#define __HAVE_ARCH_PMD_WRITE
static inline int pmd_write(pmd_t pmd)
{
	return pmd_flags(pmd) & _PAGE_RW;
}

1074 1075
#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long addr,
J
Johannes Weiner 已提交
1076 1077
				       pmd_t *pmdp)
{
1078
	return native_pmdp_get_and_clear(pmdp);
J
Johannes Weiner 已提交
1079 1080
}

1081 1082 1083 1084 1085 1086 1087
#define __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR
static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm,
					unsigned long addr, pud_t *pudp)
{
	return native_pudp_get_and_clear(pudp);
}

J
Johannes Weiner 已提交
1088 1089 1090 1091 1092 1093 1094
#define __HAVE_ARCH_PMDP_SET_WRPROTECT
static inline void pmdp_set_wrprotect(struct mm_struct *mm,
				      unsigned long addr, pmd_t *pmdp)
{
	clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp);
}

J
Jeremy Fitzhardinge 已提交
1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109
/*
 * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
 *
 *  dst - pointer to pgd range anwhere on a pgd page
 *  src - ""
 *  count - the number of pgds to copy.
 *
 * dst and src can be on the same page, but the range must not overlap,
 * and must not cross a page boundary.
 */
static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
{
       memcpy(dst, src, count * sizeof(pgd_t));
}

1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122
#define PTE_SHIFT ilog2(PTRS_PER_PTE)
static inline int page_level_shift(enum pg_level level)
{
	return (PAGE_SHIFT - PTE_SHIFT) + level * PTE_SHIFT;
}
static inline unsigned long page_level_size(enum pg_level level)
{
	return 1UL << page_level_shift(level);
}
static inline unsigned long page_level_mask(enum pg_level level)
{
	return ~(page_level_size(level) - 1);
}
J
Jeremy Fitzhardinge 已提交
1123

1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135
/*
 * The x86 doesn't have any external MMU info: the kernel page
 * tables contain all the necessary information.
 */
static inline void update_mmu_cache(struct vm_area_struct *vma,
		unsigned long addr, pte_t *ptep)
{
}
static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
		unsigned long addr, pmd_t *pmd)
{
}
1136 1137 1138 1139
static inline void update_mmu_cache_pud(struct vm_area_struct *vma,
		unsigned long addr, pud_t *pud)
{
}
J
Jeremy Fitzhardinge 已提交
1140

1141
#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155
static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
{
	return pte_set_flags(pte, _PAGE_SWP_SOFT_DIRTY);
}

static inline int pte_swp_soft_dirty(pte_t pte)
{
	return pte_flags(pte) & _PAGE_SWP_SOFT_DIRTY;
}

static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
{
	return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY);
}
1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172

#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
{
	return pmd_set_flags(pmd, _PAGE_SWP_SOFT_DIRTY);
}

static inline int pmd_swp_soft_dirty(pmd_t pmd)
{
	return pmd_flags(pmd) & _PAGE_SWP_SOFT_DIRTY;
}

static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
{
	return pmd_clear_flags(pmd, _PAGE_SWP_SOFT_DIRTY);
}
#endif
1173
#endif
1174

1175 1176
#define PKRU_AD_BIT 0x1
#define PKRU_WD_BIT 0x2
1177
#define PKRU_BITS_PER_PKEY 2
1178 1179 1180

static inline bool __pkru_allows_read(u32 pkru, u16 pkey)
{
1181
	int pkru_pkey_bits = pkey * PKRU_BITS_PER_PKEY;
1182 1183 1184 1185 1186
	return !(pkru & (PKRU_AD_BIT << pkru_pkey_bits));
}

static inline bool __pkru_allows_write(u32 pkru, u16 pkey)
{
1187
	int pkru_pkey_bits = pkey * PKRU_BITS_PER_PKEY;
1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204
	/*
	 * Access-disable disables writes too so we need to check
	 * both bits here.
	 */
	return !(pkru & ((PKRU_AD_BIT|PKRU_WD_BIT) << pkru_pkey_bits));
}

static inline u16 pte_flags_pkey(unsigned long pte_flags)
{
#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
	/* ifdef to avoid doing 59-bit shift on 32-bit values */
	return (pte_flags & _PAGE_PKEY_MASK) >> _PAGE_BIT_PKEY_BIT0;
#else
	return 0;
#endif
}

1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252
static inline bool __pkru_allows_pkey(u16 pkey, bool write)
{
	u32 pkru = read_pkru();

	if (!__pkru_allows_read(pkru, pkey))
		return false;
	if (write && !__pkru_allows_write(pkru, pkey))
		return false;

	return true;
}

/*
 * 'pteval' can come from a PTE, PMD or PUD.  We only check
 * _PAGE_PRESENT, _PAGE_USER, and _PAGE_RW in here which are the
 * same value on all 3 types.
 */
static inline bool __pte_access_permitted(unsigned long pteval, bool write)
{
	unsigned long need_pte_bits = _PAGE_PRESENT|_PAGE_USER;

	if (write)
		need_pte_bits |= _PAGE_RW;

	if ((pteval & need_pte_bits) != need_pte_bits)
		return 0;

	return __pkru_allows_pkey(pte_flags_pkey(pteval), write);
}

#define pte_access_permitted pte_access_permitted
static inline bool pte_access_permitted(pte_t pte, bool write)
{
	return __pte_access_permitted(pte_val(pte), write);
}

#define pmd_access_permitted pmd_access_permitted
static inline bool pmd_access_permitted(pmd_t pmd, bool write)
{
	return __pte_access_permitted(pmd_val(pmd), write);
}

#define pud_access_permitted pud_access_permitted
static inline bool pud_access_permitted(pud_t pud, bool write)
{
	return __pte_access_permitted(pud_val(pud), write);
}

1253 1254 1255
#include <asm-generic/pgtable.h>
#endif	/* __ASSEMBLY__ */

H
H. Peter Anvin 已提交
1256
#endif /* _ASM_X86_PGTABLE_H */