pgtable.h 26.2 KB
Newer Older
H
H. Peter Anvin 已提交
1 2
#ifndef _ASM_X86_PGTABLE_H
#define _ASM_X86_PGTABLE_H
3

4
#include <asm/page.h>
5
#include <asm/e820.h>
6

7
#include <asm/pgtable_types.h>
8

9 10 11
/*
 * Macro to mark a page protection value as UC-
 */
12 13 14 15
#define pgprot_noncached(prot)						\
	((boot_cpu_data.x86 > 3)					\
	 ? (__pgprot(pgprot_val(prot) |					\
		     cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS)))	\
16 17
	 : (prot))

18
#ifndef __ASSEMBLY__
19 20
#include <asm/x86_init.h>

21
void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd);
S
Stephen Smalley 已提交
22 23 24 25 26 27 28
void ptdump_walk_pgd_level_checkwx(void);

#ifdef CONFIG_DEBUG_WX
#define debug_checkwx() ptdump_walk_pgd_level_checkwx()
#else
#define debug_checkwx() do { } while (0)
#endif
29

30 31 32 33
/*
 * ZERO_PAGE is a global shared page that is always zero: used
 * for zero-mapped memory areas etc..
 */
34 35
extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
	__visible;
36 37
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))

38 39
extern spinlock_t pgd_lock;
extern struct list_head pgd_list;
40

41 42
extern struct mm_struct *pgd_page_get_mm(struct page *page);

43 44 45 46 47
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
#else  /* !CONFIG_PARAVIRT */
#define set_pte(ptep, pte)		native_set_pte(ptep, pte)
#define set_pte_at(mm, addr, ptep, pte)	native_set_pte_at(mm, addr, ptep, pte)
48
#define set_pmd_at(mm, addr, pmdp, pmd)	native_set_pmd_at(mm, addr, pmdp, pmd)
49
#define set_pud_at(mm, addr, pudp, pud)	native_set_pud_at(mm, addr, pudp, pud)
50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89

#define set_pte_atomic(ptep, pte)					\
	native_set_pte_atomic(ptep, pte)

#define set_pmd(pmdp, pmd)		native_set_pmd(pmdp, pmd)

#ifndef __PAGETABLE_PUD_FOLDED
#define set_pgd(pgdp, pgd)		native_set_pgd(pgdp, pgd)
#define pgd_clear(pgd)			native_pgd_clear(pgd)
#endif

#ifndef set_pud
# define set_pud(pudp, pud)		native_set_pud(pudp, pud)
#endif

#ifndef __PAGETABLE_PMD_FOLDED
#define pud_clear(pud)			native_pud_clear(pud)
#endif

#define pte_clear(mm, addr, ptep)	native_pte_clear(mm, addr, ptep)
#define pmd_clear(pmd)			native_pmd_clear(pmd)

#define pte_update(mm, addr, ptep)              do { } while (0)

#define pgd_val(x)	native_pgd_val(x)
#define __pgd(x)	native_make_pgd(x)

#ifndef __PAGETABLE_PUD_FOLDED
#define pud_val(x)	native_pud_val(x)
#define __pud(x)	native_make_pud(x)
#endif

#ifndef __PAGETABLE_PMD_FOLDED
#define pmd_val(x)	native_pmd_val(x)
#define __pmd(x)	native_make_pmd(x)
#endif

#define pte_val(x)	native_pte_val(x)
#define __pte(x)	native_make_pte(x)

90 91
#define arch_end_context_switch(prev)	do {} while(0)

92 93
#endif	/* CONFIG_PARAVIRT */

94 95 96 97
/*
 * The following only work if pte_present() is true.
 * Undefined behaviour if not..
 */
98 99
static inline int pte_dirty(pte_t pte)
{
100
	return pte_flags(pte) & _PAGE_DIRTY;
101 102
}

103 104 105 106 107 108 109 110

static inline u32 read_pkru(void)
{
	if (boot_cpu_has(X86_FEATURE_OSPKE))
		return __read_pkru();
	return 0;
}

111 112 113 114 115 116
static inline void write_pkru(u32 pkru)
{
	if (boot_cpu_has(X86_FEATURE_OSPKE))
		__write_pkru(pkru);
}

117 118
static inline int pte_young(pte_t pte)
{
119
	return pte_flags(pte) & _PAGE_ACCESSED;
120 121
}

122 123 124 125
static inline int pmd_dirty(pmd_t pmd)
{
	return pmd_flags(pmd) & _PAGE_DIRTY;
}
126

J
Johannes Weiner 已提交
127 128 129 130 131
static inline int pmd_young(pmd_t pmd)
{
	return pmd_flags(pmd) & _PAGE_ACCESSED;
}

132 133 134 135 136 137 138 139 140 141
static inline int pud_dirty(pud_t pud)
{
	return pud_flags(pud) & _PAGE_DIRTY;
}

static inline int pud_young(pud_t pud)
{
	return pud_flags(pud) & _PAGE_ACCESSED;
}

142 143
static inline int pte_write(pte_t pte)
{
144
	return pte_flags(pte) & _PAGE_RW;
145 146 147 148
}

static inline int pte_huge(pte_t pte)
{
149
	return pte_flags(pte) & _PAGE_PSE;
150 151
}

152 153
static inline int pte_global(pte_t pte)
{
154
	return pte_flags(pte) & _PAGE_GLOBAL;
155 156 157 158
}

static inline int pte_exec(pte_t pte)
{
159
	return !(pte_flags(pte) & _PAGE_NX);
160 161
}

N
Nick Piggin 已提交
162 163
static inline int pte_special(pte_t pte)
{
164
	return pte_flags(pte) & _PAGE_SPECIAL;
N
Nick Piggin 已提交
165 166
}

H
Hugh Dickins 已提交
167 168 169 170 171
static inline unsigned long pte_pfn(pte_t pte)
{
	return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT;
}

A
Akinobu Mita 已提交
172 173
static inline unsigned long pmd_pfn(pmd_t pmd)
{
174
	return (pmd_val(pmd) & pmd_pfn_mask(pmd)) >> PAGE_SHIFT;
A
Akinobu Mita 已提交
175 176
}

177 178
static inline unsigned long pud_pfn(pud_t pud)
{
179
	return (pud_val(pud) & pud_pfn_mask(pud)) >> PAGE_SHIFT;
180 181
}

H
Hugh Dickins 已提交
182 183
#define pte_page(pte)	pfn_to_page(pte_pfn(pte))

184 185
static inline int pmd_large(pmd_t pte)
{
186
	return pmd_flags(pte) & _PAGE_PSE;
187 188
}

J
Johannes Weiner 已提交
189 190 191
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline int pmd_trans_huge(pmd_t pmd)
{
192
	return (pmd_val(pmd) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE;
J
Johannes Weiner 已提交
193
}
194

195 196 197 198 199 200 201
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
static inline int pud_trans_huge(pud_t pud)
{
	return (pud_val(pud) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE;
}
#endif

202
#define has_transparent_hugepage has_transparent_hugepage
203 204
static inline int has_transparent_hugepage(void)
{
205
	return boot_cpu_has(X86_FEATURE_PSE);
206
}
207 208 209 210 211 212

#ifdef __HAVE_ARCH_PTE_DEVMAP
static inline int pmd_devmap(pmd_t pmd)
{
	return !!(pmd_val(pmd) & _PAGE_DEVMAP);
}
213 214 215 216 217 218 219 220 221 222 223 224

#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
static inline int pud_devmap(pud_t pud)
{
	return !!(pud_val(pud) & _PAGE_DEVMAP);
}
#else
static inline int pud_devmap(pud_t pud)
{
	return 0;
}
#endif
225
#endif
J
Johannes Weiner 已提交
226 227
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */

228 229 230 231 232 233 234 235 236 237 238 239 240 241
static inline pte_t pte_set_flags(pte_t pte, pteval_t set)
{
	pteval_t v = native_pte_val(pte);

	return native_make_pte(v | set);
}

static inline pte_t pte_clear_flags(pte_t pte, pteval_t clear)
{
	pteval_t v = native_pte_val(pte);

	return native_make_pte(v & ~clear);
}

242 243
static inline pte_t pte_mkclean(pte_t pte)
{
244
	return pte_clear_flags(pte, _PAGE_DIRTY);
245 246 247 248
}

static inline pte_t pte_mkold(pte_t pte)
{
249
	return pte_clear_flags(pte, _PAGE_ACCESSED);
250 251 252 253
}

static inline pte_t pte_wrprotect(pte_t pte)
{
254
	return pte_clear_flags(pte, _PAGE_RW);
255 256 257 258
}

static inline pte_t pte_mkexec(pte_t pte)
{
259
	return pte_clear_flags(pte, _PAGE_NX);
260 261 262 263
}

static inline pte_t pte_mkdirty(pte_t pte)
{
264
	return pte_set_flags(pte, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
265 266 267 268
}

static inline pte_t pte_mkyoung(pte_t pte)
{
269
	return pte_set_flags(pte, _PAGE_ACCESSED);
270 271 272 273
}

static inline pte_t pte_mkwrite(pte_t pte)
{
274
	return pte_set_flags(pte, _PAGE_RW);
275 276 277 278
}

static inline pte_t pte_mkhuge(pte_t pte)
{
279
	return pte_set_flags(pte, _PAGE_PSE);
280 281 282 283
}

static inline pte_t pte_clrhuge(pte_t pte)
{
284
	return pte_clear_flags(pte, _PAGE_PSE);
285 286 287 288
}

static inline pte_t pte_mkglobal(pte_t pte)
{
289
	return pte_set_flags(pte, _PAGE_GLOBAL);
290 291 292 293
}

static inline pte_t pte_clrglobal(pte_t pte)
{
294
	return pte_clear_flags(pte, _PAGE_GLOBAL);
295
}
296

N
Nick Piggin 已提交
297 298
static inline pte_t pte_mkspecial(pte_t pte)
{
299
	return pte_set_flags(pte, _PAGE_SPECIAL);
N
Nick Piggin 已提交
300 301
}

302 303 304 305 306
static inline pte_t pte_mkdevmap(pte_t pte)
{
	return pte_set_flags(pte, _PAGE_SPECIAL|_PAGE_DEVMAP);
}

J
Johannes Weiner 已提交
307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325
static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set)
{
	pmdval_t v = native_pmd_val(pmd);

	return __pmd(v | set);
}

static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear)
{
	pmdval_t v = native_pmd_val(pmd);

	return __pmd(v & ~clear);
}

static inline pmd_t pmd_mkold(pmd_t pmd)
{
	return pmd_clear_flags(pmd, _PAGE_ACCESSED);
}

326 327 328 329 330
static inline pmd_t pmd_mkclean(pmd_t pmd)
{
	return pmd_clear_flags(pmd, _PAGE_DIRTY);
}

J
Johannes Weiner 已提交
331 332 333 334 335 336 337
static inline pmd_t pmd_wrprotect(pmd_t pmd)
{
	return pmd_clear_flags(pmd, _PAGE_RW);
}

static inline pmd_t pmd_mkdirty(pmd_t pmd)
{
338
	return pmd_set_flags(pmd, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
J
Johannes Weiner 已提交
339 340
}

341 342 343 344 345
static inline pmd_t pmd_mkdevmap(pmd_t pmd)
{
	return pmd_set_flags(pmd, _PAGE_DEVMAP);
}

J
Johannes Weiner 已提交
346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362
static inline pmd_t pmd_mkhuge(pmd_t pmd)
{
	return pmd_set_flags(pmd, _PAGE_PSE);
}

static inline pmd_t pmd_mkyoung(pmd_t pmd)
{
	return pmd_set_flags(pmd, _PAGE_ACCESSED);
}

static inline pmd_t pmd_mkwrite(pmd_t pmd)
{
	return pmd_set_flags(pmd, _PAGE_RW);
}

static inline pmd_t pmd_mknotpresent(pmd_t pmd)
{
363
	return pmd_clear_flags(pmd, _PAGE_PRESENT | _PAGE_PROTNONE);
J
Johannes Weiner 已提交
364 365
}

366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424
static inline pud_t pud_set_flags(pud_t pud, pudval_t set)
{
	pudval_t v = native_pud_val(pud);

	return __pud(v | set);
}

static inline pud_t pud_clear_flags(pud_t pud, pudval_t clear)
{
	pudval_t v = native_pud_val(pud);

	return __pud(v & ~clear);
}

static inline pud_t pud_mkold(pud_t pud)
{
	return pud_clear_flags(pud, _PAGE_ACCESSED);
}

static inline pud_t pud_mkclean(pud_t pud)
{
	return pud_clear_flags(pud, _PAGE_DIRTY);
}

static inline pud_t pud_wrprotect(pud_t pud)
{
	return pud_clear_flags(pud, _PAGE_RW);
}

static inline pud_t pud_mkdirty(pud_t pud)
{
	return pud_set_flags(pud, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
}

static inline pud_t pud_mkdevmap(pud_t pud)
{
	return pud_set_flags(pud, _PAGE_DEVMAP);
}

static inline pud_t pud_mkhuge(pud_t pud)
{
	return pud_set_flags(pud, _PAGE_PSE);
}

static inline pud_t pud_mkyoung(pud_t pud)
{
	return pud_set_flags(pud, _PAGE_ACCESSED);
}

static inline pud_t pud_mkwrite(pud_t pud)
{
	return pud_set_flags(pud, _PAGE_RW);
}

static inline pud_t pud_mknotpresent(pud_t pud)
{
	return pud_clear_flags(pud, _PAGE_PRESENT | _PAGE_PROTNONE);
}

425
#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
426 427 428 429 430 431 432 433 434 435
static inline int pte_soft_dirty(pte_t pte)
{
	return pte_flags(pte) & _PAGE_SOFT_DIRTY;
}

static inline int pmd_soft_dirty(pmd_t pmd)
{
	return pmd_flags(pmd) & _PAGE_SOFT_DIRTY;
}

436 437 438 439 440
static inline int pud_soft_dirty(pud_t pud)
{
	return pud_flags(pud) & _PAGE_SOFT_DIRTY;
}

441 442 443 444 445 446 447 448 449 450
static inline pte_t pte_mksoft_dirty(pte_t pte)
{
	return pte_set_flags(pte, _PAGE_SOFT_DIRTY);
}

static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
{
	return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY);
}

451 452 453 454 455
static inline pud_t pud_mksoft_dirty(pud_t pud)
{
	return pud_set_flags(pud, _PAGE_SOFT_DIRTY);
}

456 457 458 459 460 461 462 463 464 465
static inline pte_t pte_clear_soft_dirty(pte_t pte)
{
	return pte_clear_flags(pte, _PAGE_SOFT_DIRTY);
}

static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
{
	return pmd_clear_flags(pmd, _PAGE_SOFT_DIRTY);
}

466 467 468 469 470
static inline pud_t pud_clear_soft_dirty(pud_t pud)
{
	return pud_clear_flags(pud, _PAGE_SOFT_DIRTY);
}

471 472
#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */

473 474 475 476 477 478 479 480 481 482 483 484 485 486
/*
 * Mask out unsupported bits in a present pgprot.  Non-present pgprots
 * can use those bits for other purposes, so leave them be.
 */
static inline pgprotval_t massage_pgprot(pgprot_t pgprot)
{
	pgprotval_t protval = pgprot_val(pgprot);

	if (protval & _PAGE_PRESENT)
		protval &= __supported_pte_mask;

	return protval;
}

487 488
static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
{
489 490
	return __pte(((phys_addr_t)page_nr << PAGE_SHIFT) |
		     massage_pgprot(pgprot));
491 492 493 494
}

static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
{
495 496
	return __pmd(((phys_addr_t)page_nr << PAGE_SHIFT) |
		     massage_pgprot(pgprot));
497 498
}

499 500 501 502 503 504
static inline pud_t pfn_pud(unsigned long page_nr, pgprot_t pgprot)
{
	return __pud(((phys_addr_t)page_nr << PAGE_SHIFT) |
		     massage_pgprot(pgprot));
}

505 506 507 508 509 510 511 512
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
	pteval_t val = pte_val(pte);

	/*
	 * Chop off the NX bit (if present), and add the NX portion of
	 * the newprot (if present):
	 */
513
	val &= _PAGE_CHG_MASK;
514
	val |= massage_pgprot(newprot) & ~_PAGE_CHG_MASK;
515 516 517 518

	return __pte(val);
}

J
Johannes Weiner 已提交
519 520 521 522 523 524 525 526 527 528
static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
{
	pmdval_t val = pmd_val(pmd);

	val &= _HPAGE_CHG_MASK;
	val |= massage_pgprot(newprot) & ~_HPAGE_CHG_MASK;

	return __pmd(val);
}

529 530 531 532 533 534 535 536 537
/* mprotect needs to preserve PAT bits when updating vm_page_prot */
#define pgprot_modify pgprot_modify
static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
{
	pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK;
	pgprotval_t addbits = pgprot_val(newprot);
	return __pgprot(preservebits | addbits);
}

538 539 540
#define pte_pgprot(x) __pgprot(pte_flags(x))
#define pmd_pgprot(x) __pgprot(pmd_flags(x))
#define pud_pgprot(x) __pgprot(pud_flags(x))
A
Andi Kleen 已提交
541

542
#define canon_pgprot(p) __pgprot(massage_pgprot(p))
A
Andi Kleen 已提交
543

544
static inline int is_new_memtype_allowed(u64 paddr, unsigned long size,
545 546
					 enum page_cache_mode pcm,
					 enum page_cache_mode new_pcm)
547
{
548
	/*
549
	 * PAT type is always WB for untracked ranges, so no need to check.
550
	 */
551
	if (x86_platform.is_untracked_pat_range(paddr, paddr + size))
552 553
		return 1;

554 555 556 557 558
	/*
	 * Certain new memtypes are not allowed with certain
	 * requested memtype:
	 * - request is uncached, return cannot be write-back
	 * - request is write-combine, return cannot be write-back
559 560
	 * - request is write-through, return cannot be write-back
	 * - request is write-through, return cannot be write-combine
561
	 */
562 563 564
	if ((pcm == _PAGE_CACHE_MODE_UC_MINUS &&
	     new_pcm == _PAGE_CACHE_MODE_WB) ||
	    (pcm == _PAGE_CACHE_MODE_WC &&
565 566 567 568 569
	     new_pcm == _PAGE_CACHE_MODE_WB) ||
	    (pcm == _PAGE_CACHE_MODE_WT &&
	     new_pcm == _PAGE_CACHE_MODE_WB) ||
	    (pcm == _PAGE_CACHE_MODE_WT &&
	     new_pcm == _PAGE_CACHE_MODE_WC)) {
570 571 572 573 574 575
		return 0;
	}

	return 1;
}

576 577
pmd_t *populate_extra_pmd(unsigned long vaddr);
pte_t *populate_extra_pte(unsigned long vaddr);
578 579
#endif	/* __ASSEMBLY__ */

580
#ifdef CONFIG_X86_32
581
# include <asm/pgtable_32.h>
582
#else
583
# include <asm/pgtable_64.h>
584
#endif
585

586
#ifndef __ASSEMBLY__
J
Jeremy Fitzhardinge 已提交
587
#include <linux/mm_types.h>
588
#include <linux/mmdebug.h>
589
#include <linux/log2.h>
590

J
Jeremy Fitzhardinge 已提交
591 592
static inline int pte_none(pte_t pte)
{
593
	return !(pte.pte & ~(_PAGE_KNL_ERRATUM_MASK));
J
Jeremy Fitzhardinge 已提交
594 595
}

J
Jeremy Fitzhardinge 已提交
596 597 598 599 600 601
#define __HAVE_ARCH_PTE_SAME
static inline int pte_same(pte_t a, pte_t b)
{
	return a.pte == b.pte;
}

J
Jeremy Fitzhardinge 已提交
602
static inline int pte_present(pte_t a)
603 604 605 606
{
	return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);
}

607 608 609 610 611 612 613
#ifdef __HAVE_ARCH_PTE_DEVMAP
static inline int pte_devmap(pte_t a)
{
	return (pte_flags(a) & _PAGE_DEVMAP) == _PAGE_DEVMAP;
}
#endif

R
Rik van Riel 已提交
614
#define pte_accessible pte_accessible
615
static inline bool pte_accessible(struct mm_struct *mm, pte_t a)
R
Rik van Riel 已提交
616
{
617 618 619
	if (pte_flags(a) & _PAGE_PRESENT)
		return true;

620
	if ((pte_flags(a) & _PAGE_PROTNONE) &&
621 622 623 624
			mm_tlb_flush_pending(mm))
		return true;

	return false;
R
Rik van Riel 已提交
625 626
}

J
Jeremy Fitzhardinge 已提交
627
static inline int pte_hidden(pte_t pte)
628
{
J
Jeremy Fitzhardinge 已提交
629
	return pte_flags(pte) & _PAGE_HIDDEN;
630 631
}

J
Jeremy Fitzhardinge 已提交
632 633
static inline int pmd_present(pmd_t pmd)
{
634 635 636 637 638 639
	/*
	 * Checking for _PAGE_PSE is needed too because
	 * split_huge_page will temporarily clear the present bit (but
	 * the _PAGE_PSE flag will remain set at all times while the
	 * _PAGE_PRESENT bit is clear).
	 */
640
	return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE);
J
Jeremy Fitzhardinge 已提交
641 642
}

643 644 645 646 647 648 649
#ifdef CONFIG_NUMA_BALANCING
/*
 * These work without NUMA balancing but the kernel does not care. See the
 * comment in include/asm-generic/pgtable.h
 */
static inline int pte_protnone(pte_t pte)
{
650 651
	return (pte_flags(pte) & (_PAGE_PROTNONE | _PAGE_PRESENT))
		== _PAGE_PROTNONE;
652 653 654 655
}

static inline int pmd_protnone(pmd_t pmd)
{
656 657
	return (pmd_flags(pmd) & (_PAGE_PROTNONE | _PAGE_PRESENT))
		== _PAGE_PROTNONE;
658 659 660
}
#endif /* CONFIG_NUMA_BALANCING */

J
Jeremy Fitzhardinge 已提交
661 662 663 664
static inline int pmd_none(pmd_t pmd)
{
	/* Only check low word on 32-bit platforms, since it might be
	   out of sync with upper half. */
665 666
	unsigned long val = native_pmd_val(pmd);
	return (val & ~_PAGE_KNL_ERRATUM_MASK) == 0;
J
Jeremy Fitzhardinge 已提交
667 668
}

J
Jeremy Fitzhardinge 已提交
669 670
static inline unsigned long pmd_page_vaddr(pmd_t pmd)
{
671
	return (unsigned long)__va(pmd_val(pmd) & pmd_pfn_mask(pmd));
J
Jeremy Fitzhardinge 已提交
672 673
}

674 675 676 677
/*
 * Currently stuck as a macro due to indirect forward reference to
 * linux/mmzone.h's __section_mem_map_addr() definition:
 */
678 679
#define pmd_page(pmd)		\
	pfn_to_page((pmd_val(pmd) & pmd_pfn_mask(pmd)) >> PAGE_SHIFT)
J
Jeremy Fitzhardinge 已提交
680

J
Jeremy Fitzhardinge 已提交
681 682 683 684 685 686
/*
 * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
 *
 * this macro returns the index of the entry in the pmd page which would
 * control the given virtual address
 */
F
Figo.zhang 已提交
687
static inline unsigned long pmd_index(unsigned long address)
J
Jeremy Fitzhardinge 已提交
688 689 690 691
{
	return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
}

J
Jeremy Fitzhardinge 已提交
692 693 694 695 696 697 698 699 700
/*
 * Conversion functions: convert a page and protection to a page entry,
 * and a page entry and page directory to the page they refer to.
 *
 * (Currently stuck as a macro because of indirect forward reference
 * to linux/mm.h:page_to_nid())
 */
#define mk_pte(page, pgprot)   pfn_pte(page_to_pfn(page), (pgprot))

J
Jeremy Fitzhardinge 已提交
701 702 703 704 705 706
/*
 * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
 *
 * this function returns the index of the entry in the pte page which would
 * control the given virtual address
 */
F
Figo.zhang 已提交
707
static inline unsigned long pte_index(unsigned long address)
J
Jeremy Fitzhardinge 已提交
708 709 710 711
{
	return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
}

712 713 714 715 716
static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
{
	return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
}

J
Jeremy Fitzhardinge 已提交
717 718
static inline int pmd_bad(pmd_t pmd)
{
719
	return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE;
J
Jeremy Fitzhardinge 已提交
720 721
}

J
Jeremy Fitzhardinge 已提交
722 723 724 725 726
static inline unsigned long pages_to_mb(unsigned long npg)
{
	return npg >> (20 - PAGE_SHIFT);
}

727
#if CONFIG_PGTABLE_LEVELS > 2
J
Jeremy Fitzhardinge 已提交
728 729
static inline int pud_none(pud_t pud)
{
730
	return (native_pud_val(pud) & ~(_PAGE_KNL_ERRATUM_MASK)) == 0;
J
Jeremy Fitzhardinge 已提交
731 732
}

J
Jeremy Fitzhardinge 已提交
733 734
static inline int pud_present(pud_t pud)
{
735
	return pud_flags(pud) & _PAGE_PRESENT;
J
Jeremy Fitzhardinge 已提交
736
}
J
Jeremy Fitzhardinge 已提交
737 738 739

static inline unsigned long pud_page_vaddr(pud_t pud)
{
740
	return (unsigned long)__va(pud_val(pud) & pud_pfn_mask(pud));
J
Jeremy Fitzhardinge 已提交
741
}
J
Jeremy Fitzhardinge 已提交
742

743 744 745 746
/*
 * Currently stuck as a macro due to indirect forward reference to
 * linux/mmzone.h's __section_mem_map_addr() definition:
 */
747 748
#define pud_page(pud)		\
	pfn_to_page((pud_val(pud) & pud_pfn_mask(pud)) >> PAGE_SHIFT)
J
Jeremy Fitzhardinge 已提交
749 750 751 752 753 754

/* Find an entry in the second-level page table.. */
static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
{
	return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
}
J
Jeremy Fitzhardinge 已提交
755

J
Jeremy Fitzhardinge 已提交
756 757
static inline int pud_large(pud_t pud)
{
758
	return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) ==
J
Jeremy Fitzhardinge 已提交
759 760
		(_PAGE_PSE | _PAGE_PRESENT);
}
J
Jeremy Fitzhardinge 已提交
761 762 763

static inline int pud_bad(pud_t pud)
{
764
	return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0;
J
Jeremy Fitzhardinge 已提交
765
}
766 767 768 769 770
#else
static inline int pud_large(pud_t pud)
{
	return 0;
}
771
#endif	/* CONFIG_PGTABLE_LEVELS > 2 */
J
Jeremy Fitzhardinge 已提交
772

773
#if CONFIG_PGTABLE_LEVELS > 3
J
Jeremy Fitzhardinge 已提交
774 775
static inline int pgd_present(pgd_t pgd)
{
776
	return pgd_flags(pgd) & _PAGE_PRESENT;
J
Jeremy Fitzhardinge 已提交
777
}
J
Jeremy Fitzhardinge 已提交
778 779 780 781 782

static inline unsigned long pgd_page_vaddr(pgd_t pgd)
{
	return (unsigned long)__va((unsigned long)pgd_val(pgd) & PTE_PFN_MASK);
}
J
Jeremy Fitzhardinge 已提交
783

784 785 786 787 788
/*
 * Currently stuck as a macro due to indirect forward reference to
 * linux/mmzone.h's __section_mem_map_addr() definition:
 */
#define pgd_page(pgd)		pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
J
Jeremy Fitzhardinge 已提交
789 790

/* to find an entry in a page-table-directory. */
F
Figo.zhang 已提交
791
static inline unsigned long pud_index(unsigned long address)
J
Jeremy Fitzhardinge 已提交
792 793 794
{
	return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
}
J
Jeremy Fitzhardinge 已提交
795 796 797 798 799

static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
{
	return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(address);
}
J
Jeremy Fitzhardinge 已提交
800 801 802

static inline int pgd_bad(pgd_t pgd)
{
803
	return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
J
Jeremy Fitzhardinge 已提交
804
}
J
Jeremy Fitzhardinge 已提交
805 806 807

static inline int pgd_none(pgd_t pgd)
{
808 809 810 811 812 813
	/*
	 * There is no need to do a workaround for the KNL stray
	 * A/D bit erratum here.  PGDs only point to page tables
	 * except on 32-bit non-PAE which is not supported on
	 * KNL.
	 */
814
	return !native_pgd_val(pgd);
J
Jeremy Fitzhardinge 已提交
815
}
816
#endif	/* CONFIG_PGTABLE_LEVELS > 3 */
J
Jeremy Fitzhardinge 已提交
817

818 819
#endif	/* __ASSEMBLY__ */

J
Jeremy Fitzhardinge 已提交
820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839
/*
 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
 *
 * this macro returns the index of the entry in the pgd page which would
 * control the given virtual address
 */
#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))

/*
 * pgd_offset() returns a (pgd_t *)
 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
 */
#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
/*
 * a shortcut which implies the use of the kernel's pgd, instead
 * of a process's
 */
#define pgd_offset_k(address) pgd_offset(&init_mm, (address))


J
Jeremy Fitzhardinge 已提交
840 841 842
#define KERNEL_PGD_BOUNDARY	pgd_index(PAGE_OFFSET)
#define KERNEL_PGD_PTRS		(PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)

843 844
#ifndef __ASSEMBLY__

845
extern int direct_gbpages;
846
void init_mem_mapping(void);
Y
Yinghai Lu 已提交
847
void early_alloc_pgt_buf(void);
848

849 850 851
#ifdef CONFIG_X86_64
/* Realmode trampoline initialization. */
extern pgd_t trampoline_pgd_entry;
852
static inline void __meminit init_trampoline_default(void)
853 854 855 856
{
	/* Default trampoline pgd value */
	trampoline_pgd_entry = init_level4_pgt[pgd_index(__PAGE_OFFSET)];
}
857 858 859 860 861
# ifdef CONFIG_RANDOMIZE_MEMORY
void __meminit init_trampoline(void);
# else
#  define init_trampoline init_trampoline_default
# endif
862 863 864 865
#else
static inline void init_trampoline(void) { }
#endif

866 867 868 869 870 871 872 873 874 875
/* local pte updates need not use xchg for locking */
static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
{
	pte_t res = *ptep;

	/* Pure native function needs no input for mm, addr */
	native_pte_clear(NULL, 0, ptep);
	return res;
}

J
Johannes Weiner 已提交
876 877 878 879 880 881 882 883
static inline pmd_t native_local_pmdp_get_and_clear(pmd_t *pmdp)
{
	pmd_t res = *pmdp;

	native_pmd_clear(pmdp);
	return res;
}

884 885 886 887 888 889 890 891
static inline pud_t native_local_pudp_get_and_clear(pud_t *pudp)
{
	pud_t res = *pudp;

	native_pud_clear(pudp);
	return res;
}

892 893 894 895 896 897
static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
				     pte_t *ptep , pte_t pte)
{
	native_set_pte(ptep, pte);
}

A
Andrea Arcangeli 已提交
898 899 900 901 902 903
static inline void native_set_pmd_at(struct mm_struct *mm, unsigned long addr,
				     pmd_t *pmdp , pmd_t pmd)
{
	native_set_pmd(pmdp, pmd);
}

904 905 906 907 908 909
static inline void native_set_pud_at(struct mm_struct *mm, unsigned long addr,
				     pud_t *pudp, pud_t pud)
{
	native_set_pud(pudp, pud);
}

910 911 912 913 914 915 916 917
#ifndef CONFIG_PARAVIRT
/*
 * Rules for using pte_update - it must be called after any PTE update which
 * has not been done using the set_pte / clear_pte interfaces.  It is used by
 * shadow mode hypervisors to resynchronize the shadow page tables.  Kernel PTE
 * updates should either be sets, clears, or set_pte_atomic for P->P
 * transitions, which means this hook should only be called for user PTEs.
 * This hook implies a P->P protection or access change has taken place, which
918
 * requires a subsequent TLB flush.
919 920 921 922 923 924 925 926 927 928 929
 */
#define pte_update(mm, addr, ptep)		do { } while (0)
#endif

/*
 * We only update the dirty/accessed state if we set
 * the dirty bit by hand in the kernel, since the hardware
 * will do the accessed bit for us, and we don't want to
 * race with other CPU's that might be updating the dirty
 * bit at the same time.
 */
930 931
struct vm_area_struct;

932
#define  __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
933 934 935
extern int ptep_set_access_flags(struct vm_area_struct *vma,
				 unsigned long address, pte_t *ptep,
				 pte_t entry, int dirty);
936 937

#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
938 939
extern int ptep_test_and_clear_young(struct vm_area_struct *vma,
				     unsigned long addr, pte_t *ptep);
940 941

#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
942 943
extern int ptep_clear_flush_young(struct vm_area_struct *vma,
				  unsigned long address, pte_t *ptep);
944 945

#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
946 947
static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
				       pte_t *ptep)
948 949 950 951 952 953 954
{
	pte_t pte = native_ptep_get_and_clear(ptep);
	pte_update(mm, addr, ptep);
	return pte;
}

#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
955 956 957
static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
					    unsigned long addr, pte_t *ptep,
					    int full)
958 959 960 961 962 963 964 965 966 967 968 969 970 971 972
{
	pte_t pte;
	if (full) {
		/*
		 * Full address destruction in progress; paravirt does not
		 * care about updates and native needs no locking
		 */
		pte = native_local_ptep_get_and_clear(ptep);
	} else {
		pte = ptep_get_and_clear(mm, addr, ptep);
	}
	return pte;
}

#define __HAVE_ARCH_PTEP_SET_WRPROTECT
973 974
static inline void ptep_set_wrprotect(struct mm_struct *mm,
				      unsigned long addr, pte_t *ptep)
975
{
J
Jeremy Fitzhardinge 已提交
976
	clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte);
977 978 979
	pte_update(mm, addr, ptep);
}

980
#define flush_tlb_fix_spurious_fault(vma, address) do { } while (0)
981

J
Johannes Weiner 已提交
982 983 984 985 986 987
#define mk_pmd(page, pgprot)   pfn_pmd(page_to_pfn(page), (pgprot))

#define  __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
extern int pmdp_set_access_flags(struct vm_area_struct *vma,
				 unsigned long address, pmd_t *pmdp,
				 pmd_t entry, int dirty);
988 989 990
extern int pudp_set_access_flags(struct vm_area_struct *vma,
				 unsigned long address, pud_t *pudp,
				 pud_t entry, int dirty);
J
Johannes Weiner 已提交
991 992 993 994

#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
				     unsigned long addr, pmd_t *pmdp);
995 996
extern int pudp_test_and_clear_young(struct vm_area_struct *vma,
				     unsigned long addr, pud_t *pudp);
J
Johannes Weiner 已提交
997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008

#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
				  unsigned long address, pmd_t *pmdp);


#define __HAVE_ARCH_PMD_WRITE
static inline int pmd_write(pmd_t pmd)
{
	return pmd_flags(pmd) & _PAGE_RW;
}

1009 1010
#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long addr,
J
Johannes Weiner 已提交
1011 1012
				       pmd_t *pmdp)
{
1013
	return native_pmdp_get_and_clear(pmdp);
J
Johannes Weiner 已提交
1014 1015
}

1016 1017 1018 1019 1020 1021 1022
#define __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR
static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm,
					unsigned long addr, pud_t *pudp)
{
	return native_pudp_get_and_clear(pudp);
}

J
Johannes Weiner 已提交
1023 1024 1025 1026 1027 1028 1029
#define __HAVE_ARCH_PMDP_SET_WRPROTECT
static inline void pmdp_set_wrprotect(struct mm_struct *mm,
				      unsigned long addr, pmd_t *pmdp)
{
	clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp);
}

J
Jeremy Fitzhardinge 已提交
1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044
/*
 * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
 *
 *  dst - pointer to pgd range anwhere on a pgd page
 *  src - ""
 *  count - the number of pgds to copy.
 *
 * dst and src can be on the same page, but the range must not overlap,
 * and must not cross a page boundary.
 */
static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
{
       memcpy(dst, src, count * sizeof(pgd_t));
}

1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057
#define PTE_SHIFT ilog2(PTRS_PER_PTE)
static inline int page_level_shift(enum pg_level level)
{
	return (PAGE_SHIFT - PTE_SHIFT) + level * PTE_SHIFT;
}
static inline unsigned long page_level_size(enum pg_level level)
{
	return 1UL << page_level_shift(level);
}
static inline unsigned long page_level_mask(enum pg_level level)
{
	return ~(page_level_size(level) - 1);
}
J
Jeremy Fitzhardinge 已提交
1058

1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070
/*
 * The x86 doesn't have any external MMU info: the kernel page
 * tables contain all the necessary information.
 */
static inline void update_mmu_cache(struct vm_area_struct *vma,
		unsigned long addr, pte_t *ptep)
{
}
static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
		unsigned long addr, pmd_t *pmd)
{
}
1071 1072 1073 1074
static inline void update_mmu_cache_pud(struct vm_area_struct *vma,
		unsigned long addr, pud_t *pud)
{
}
J
Jeremy Fitzhardinge 已提交
1075

1076
#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090
static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
{
	return pte_set_flags(pte, _PAGE_SWP_SOFT_DIRTY);
}

static inline int pte_swp_soft_dirty(pte_t pte)
{
	return pte_flags(pte) & _PAGE_SWP_SOFT_DIRTY;
}

static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
{
	return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY);
}
1091
#endif
1092

1093 1094
#define PKRU_AD_BIT 0x1
#define PKRU_WD_BIT 0x2
1095
#define PKRU_BITS_PER_PKEY 2
1096 1097 1098

static inline bool __pkru_allows_read(u32 pkru, u16 pkey)
{
1099
	int pkru_pkey_bits = pkey * PKRU_BITS_PER_PKEY;
1100 1101 1102 1103 1104
	return !(pkru & (PKRU_AD_BIT << pkru_pkey_bits));
}

static inline bool __pkru_allows_write(u32 pkru, u16 pkey)
{
1105
	int pkru_pkey_bits = pkey * PKRU_BITS_PER_PKEY;
1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122
	/*
	 * Access-disable disables writes too so we need to check
	 * both bits here.
	 */
	return !(pkru & ((PKRU_AD_BIT|PKRU_WD_BIT) << pkru_pkey_bits));
}

static inline u16 pte_flags_pkey(unsigned long pte_flags)
{
#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
	/* ifdef to avoid doing 59-bit shift on 32-bit values */
	return (pte_flags & _PAGE_PKEY_MASK) >> _PAGE_BIT_PKEY_BIT0;
#else
	return 0;
#endif
}

1123 1124 1125
#include <asm-generic/pgtable.h>
#endif	/* __ASSEMBLY__ */

H
H. Peter Anvin 已提交
1126
#endif /* _ASM_X86_PGTABLE_H */