pgtable.h 21.6 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3
#ifndef _ASM_GENERIC_PGTABLE_H
#define _ASM_GENERIC_PGTABLE_H

4
#ifndef __ASSEMBLY__
5
#ifdef CONFIG_MMU
6

7
#include <linux/mm_types.h>
8
#include <linux/bug.h>
9

10 11 12 13 14 15 16 17 18 19
/*
 * On almost all architectures and configurations, 0 can be used as the
 * upper ceiling to free_pgtables(): on many architectures it has the same
 * effect as using TASK_SIZE.  However, there is one configuration which
 * must impose a more careful limit, to avoid freeing kernel pgtables.
 */
#ifndef USER_PGTABLES_CEILING
#define USER_PGTABLES_CEILING	0UL
#endif

L
Linus Torvalds 已提交
20
#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
21 22 23 24 25 26 27 28 29
extern int ptep_set_access_flags(struct vm_area_struct *vma,
				 unsigned long address, pte_t *ptep,
				 pte_t entry, int dirty);
#endif

#ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
extern int pmdp_set_access_flags(struct vm_area_struct *vma,
				 unsigned long address, pmd_t *pmdp,
				 pmd_t entry, int dirty);
L
Linus Torvalds 已提交
30 31 32
#endif

#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69
static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
					    unsigned long address,
					    pte_t *ptep)
{
	pte_t pte = *ptep;
	int r = 1;
	if (!pte_young(pte))
		r = 0;
	else
		set_pte_at(vma->vm_mm, address, ptep, pte_mkold(pte));
	return r;
}
#endif

#ifndef __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
					    unsigned long address,
					    pmd_t *pmdp)
{
	pmd_t pmd = *pmdp;
	int r = 1;
	if (!pmd_young(pmd))
		r = 0;
	else
		set_pmd_at(vma->vm_mm, address, pmdp, pmd_mkold(pmd));
	return r;
}
#else /* CONFIG_TRANSPARENT_HUGEPAGE */
static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
					    unsigned long address,
					    pmd_t *pmdp)
{
	BUG();
	return 0;
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
L
Linus Torvalds 已提交
70 71 72
#endif

#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
73 74 75 76 77 78 79
int ptep_clear_flush_young(struct vm_area_struct *vma,
			   unsigned long address, pte_t *ptep);
#endif

#ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
int pmdp_clear_flush_young(struct vm_area_struct *vma,
			   unsigned long address, pmd_t *pmdp);
L
Linus Torvalds 已提交
80 81 82
#endif

#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR
83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99
static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
				       unsigned long address,
				       pte_t *ptep)
{
	pte_t pte = *ptep;
	pte_clear(mm, address, ptep);
	return pte;
}
#endif

#ifndef __HAVE_ARCH_PMDP_GET_AND_CLEAR
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
				       unsigned long address,
				       pmd_t *pmdp)
{
	pmd_t pmd = *pmdp;
100
	pmd_clear(pmdp);
101
	return pmd;
102
}
103
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
L
Linus Torvalds 已提交
104 105
#endif

106
#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
107 108 109 110 111 112 113 114
static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
					    unsigned long address, pte_t *ptep,
					    int full)
{
	pte_t pte;
	pte = ptep_get_and_clear(mm, address, ptep);
	return pte;
}
115 116
#endif

117 118 119 120 121 122
/*
 * Some architectures may be able to avoid expensive synchronization
 * primitives when modifications are made to PTE's which are already
 * not present, or in the process of an address space destruction.
 */
#ifndef __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL
123 124 125 126 127 128 129
static inline void pte_clear_not_present_full(struct mm_struct *mm,
					      unsigned long address,
					      pte_t *ptep,
					      int full)
{
	pte_clear(mm, address, ptep);
}
130 131
#endif

L
Linus Torvalds 已提交
132
#ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
133 134 135 136 137 138 139 140 141
extern pte_t ptep_clear_flush(struct vm_area_struct *vma,
			      unsigned long address,
			      pte_t *ptep);
#endif

#ifndef __HAVE_ARCH_PMDP_CLEAR_FLUSH
extern pmd_t pmdp_clear_flush(struct vm_area_struct *vma,
			      unsigned long address,
			      pmd_t *pmdp);
L
Linus Torvalds 已提交
142 143 144
#endif

#ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT
145
struct mm_struct;
L
Linus Torvalds 已提交
146 147 148 149 150 151 152
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
{
	pte_t old_pte = *ptep;
	set_pte_at(mm, address, ptep, pte_wrprotect(old_pte));
}
#endif

153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170
#ifndef __HAVE_ARCH_PMDP_SET_WRPROTECT
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline void pmdp_set_wrprotect(struct mm_struct *mm,
				      unsigned long address, pmd_t *pmdp)
{
	pmd_t old_pmd = *pmdp;
	set_pmd_at(mm, address, pmdp, pmd_wrprotect(old_pmd));
}
#else /* CONFIG_TRANSPARENT_HUGEPAGE */
static inline void pmdp_set_wrprotect(struct mm_struct *mm,
				      unsigned long address, pmd_t *pmdp)
{
	BUG();
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif

#ifndef __HAVE_ARCH_PMDP_SPLITTING_FLUSH
171 172
extern void pmdp_splitting_flush(struct vm_area_struct *vma,
				 unsigned long address, pmd_t *pmdp);
173 174
#endif

175
#ifndef __HAVE_ARCH_PGTABLE_DEPOSIT
176 177
extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
				       pgtable_t pgtable);
178 179 180
#endif

#ifndef __HAVE_ARCH_PGTABLE_WITHDRAW
181
extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
182 183
#endif

G
Gerald Schaefer 已提交
184 185 186 187 188
#ifndef __HAVE_ARCH_PMDP_INVALIDATE
extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
			    pmd_t *pmdp);
#endif

L
Linus Torvalds 已提交
189
#ifndef __HAVE_ARCH_PTE_SAME
190 191 192 193 194 195
static inline int pte_same(pte_t pte_a, pte_t pte_b)
{
	return pte_val(pte_a) == pte_val(pte_b);
}
#endif

196 197 198 199 200 201 202 203 204 205 206 207 208
#ifndef __HAVE_ARCH_PTE_UNUSED
/*
 * Some architectures provide facilities to virtualization guests
 * so that they can flag allocated pages as unused. This allows the
 * host to transparently reclaim unused pages. This function returns
 * whether the pte's page is unused.
 */
static inline int pte_unused(pte_t pte)
{
	return 0;
}
#endif

209 210 211 212 213 214 215 216 217 218 219 220 221
#ifndef __HAVE_ARCH_PMD_SAME
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
{
	return pmd_val(pmd_a) == pmd_val(pmd_b);
}
#else /* CONFIG_TRANSPARENT_HUGEPAGE */
static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
{
	BUG();
	return 0;
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
L
Linus Torvalds 已提交
222 223 224 225 226 227
#endif

#ifndef __HAVE_ARCH_PGD_OFFSET_GATE
#define pgd_offset_gate(mm, addr)	pgd_offset(mm, addr)
#endif

228
#ifndef __HAVE_ARCH_MOVE_PTE
229 230 231
#define move_pte(pte, prot, old_addr, new_addr)	(pte)
#endif

R
Rik van Riel 已提交
232
#ifndef pte_accessible
233
# define pte_accessible(mm, pte)	((void)(pte), 1)
R
Rik van Riel 已提交
234 235
#endif

236 237 238 239
#ifndef pte_present_nonuma
#define pte_present_nonuma(pte) pte_present(pte)
#endif

240 241 242 243
#ifndef flush_tlb_fix_spurious_fault
#define flush_tlb_fix_spurious_fault(vma, address) flush_tlb_page(vma, address)
#endif

244 245 246 247
#ifndef pgprot_noncached
#define pgprot_noncached(prot)	(prot)
#endif

248 249 250 251
#ifndef pgprot_writecombine
#define pgprot_writecombine pgprot_noncached
#endif

252 253 254 255
#ifndef pgprot_device
#define pgprot_device pgprot_noncached
#endif

256 257 258 259 260 261 262 263 264 265 266 267 268 269
#ifndef pgprot_modify
#define pgprot_modify pgprot_modify
static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
{
	if (pgprot_val(oldprot) == pgprot_val(pgprot_noncached(oldprot)))
		newprot = pgprot_noncached(newprot);
	if (pgprot_val(oldprot) == pgprot_val(pgprot_writecombine(oldprot)))
		newprot = pgprot_writecombine(newprot);
	if (pgprot_val(oldprot) == pgprot_val(pgprot_device(oldprot)))
		newprot = pgprot_device(newprot);
	return newprot;
}
#endif

L
Linus Torvalds 已提交
270
/*
271 272 273
 * When walking page tables, get the address of the next boundary,
 * or the end address of the range if that comes earlier.  Although no
 * vma end wraps to 0, rounded up __boundary may wrap to 0 throughout.
L
Linus Torvalds 已提交
274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335
 */

#define pgd_addr_end(addr, end)						\
({	unsigned long __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK;	\
	(__boundary - 1 < (end) - 1)? __boundary: (end);		\
})

#ifndef pud_addr_end
#define pud_addr_end(addr, end)						\
({	unsigned long __boundary = ((addr) + PUD_SIZE) & PUD_MASK;	\
	(__boundary - 1 < (end) - 1)? __boundary: (end);		\
})
#endif

#ifndef pmd_addr_end
#define pmd_addr_end(addr, end)						\
({	unsigned long __boundary = ((addr) + PMD_SIZE) & PMD_MASK;	\
	(__boundary - 1 < (end) - 1)? __boundary: (end);		\
})
#endif

/*
 * When walking page tables, we usually want to skip any p?d_none entries;
 * and any p?d_bad entries - reporting the error before resetting to none.
 * Do the tests inline, but report and clear the bad entry in mm/memory.c.
 */
void pgd_clear_bad(pgd_t *);
void pud_clear_bad(pud_t *);
void pmd_clear_bad(pmd_t *);

static inline int pgd_none_or_clear_bad(pgd_t *pgd)
{
	if (pgd_none(*pgd))
		return 1;
	if (unlikely(pgd_bad(*pgd))) {
		pgd_clear_bad(pgd);
		return 1;
	}
	return 0;
}

static inline int pud_none_or_clear_bad(pud_t *pud)
{
	if (pud_none(*pud))
		return 1;
	if (unlikely(pud_bad(*pud))) {
		pud_clear_bad(pud);
		return 1;
	}
	return 0;
}

static inline int pmd_none_or_clear_bad(pmd_t *pmd)
{
	if (pmd_none(*pmd))
		return 1;
	if (unlikely(pmd_bad(*pmd))) {
		pmd_clear_bad(pmd);
		return 1;
	}
	return 0;
}
336

337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392
static inline pte_t __ptep_modify_prot_start(struct mm_struct *mm,
					     unsigned long addr,
					     pte_t *ptep)
{
	/*
	 * Get the current pte state, but zero it out to make it
	 * non-present, preventing the hardware from asynchronously
	 * updating it.
	 */
	return ptep_get_and_clear(mm, addr, ptep);
}

static inline void __ptep_modify_prot_commit(struct mm_struct *mm,
					     unsigned long addr,
					     pte_t *ptep, pte_t pte)
{
	/*
	 * The pte is non-present, so there's no hardware state to
	 * preserve.
	 */
	set_pte_at(mm, addr, ptep, pte);
}

#ifndef __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
/*
 * Start a pte protection read-modify-write transaction, which
 * protects against asynchronous hardware modifications to the pte.
 * The intention is not to prevent the hardware from making pte
 * updates, but to prevent any updates it may make from being lost.
 *
 * This does not protect against other software modifications of the
 * pte; the appropriate pte lock must be held over the transation.
 *
 * Note that this interface is intended to be batchable, meaning that
 * ptep_modify_prot_commit may not actually update the pte, but merely
 * queue the update to be done at some later time.  The update must be
 * actually committed before the pte lock is released, however.
 */
static inline pte_t ptep_modify_prot_start(struct mm_struct *mm,
					   unsigned long addr,
					   pte_t *ptep)
{
	return __ptep_modify_prot_start(mm, addr, ptep);
}

/*
 * Commit an update to a pte, leaving any hardware-controlled bits in
 * the PTE unmodified.
 */
static inline void ptep_modify_prot_commit(struct mm_struct *mm,
					   unsigned long addr,
					   pte_t *ptep, pte_t pte)
{
	__ptep_modify_prot_commit(mm, addr, ptep, pte);
}
#endif /* __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION */
393
#endif /* CONFIG_MMU */
394

395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416
/*
 * A facility to provide lazy MMU batching.  This allows PTE updates and
 * page invalidations to be delayed until a call to leave lazy MMU mode
 * is issued.  Some architectures may benefit from doing this, and it is
 * beneficial for both shadow and direct mode hypervisors, which may batch
 * the PTE updates which happen during this window.  Note that using this
 * interface requires that read hazards be removed from the code.  A read
 * hazard could result in the direct mode hypervisor case, since the actual
 * write to the page tables may not yet have taken place, so reads though
 * a raw PTE pointer after it has been modified are not guaranteed to be
 * up to date.  This mode can only be entered and left under the protection of
 * the page table locks for all page tables which may be modified.  In the UP
 * case, this is required so that preemption is disabled, and in the SMP case,
 * it must synchronize the delayed page table writes properly on other CPUs.
 */
#ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE
#define arch_enter_lazy_mmu_mode()	do {} while (0)
#define arch_leave_lazy_mmu_mode()	do {} while (0)
#define arch_flush_lazy_mmu_mode()	do {} while (0)
#endif

/*
417 418 419 420 421 422 423 424 425
 * A facility to provide batching of the reload of page tables and
 * other process state with the actual context switch code for
 * paravirtualized guests.  By convention, only one of the batched
 * update (lazy) modes (CPU, MMU) should be active at any given time,
 * entry should never be nested, and entry and exits should always be
 * paired.  This is for sanity of maintaining and reasoning about the
 * kernel code.  In this case, the exit (end of the context switch) is
 * in architecture-specific code, and so doesn't need a generic
 * definition.
426
 */
427
#ifndef __HAVE_ARCH_START_CONTEXT_SWITCH
428
#define arch_start_context_switch(prev)	do {} while (0)
429 430
#endif

431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450
#ifndef CONFIG_HAVE_ARCH_SOFT_DIRTY
static inline int pte_soft_dirty(pte_t pte)
{
	return 0;
}

static inline int pmd_soft_dirty(pmd_t pmd)
{
	return 0;
}

static inline pte_t pte_mksoft_dirty(pte_t pte)
{
	return pte;
}

static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
{
	return pmd;
}
451 452 453 454 455 456 457 458 459 460 461 462 463 464 465

static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
{
	return pte;
}

static inline int pte_swp_soft_dirty(pte_t pte)
{
	return 0;
}

static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
{
	return pte;
}
466 467 468 469 470 471 472 473 474 475 476 477 478 479 480

static inline pte_t pte_file_clear_soft_dirty(pte_t pte)
{
       return pte;
}

static inline pte_t pte_file_mksoft_dirty(pte_t pte)
{
       return pte;
}

static inline int pte_file_soft_dirty(pte_t pte)
{
       return 0;
}
481 482
#endif

483 484
#ifndef __HAVE_PFNMAP_TRACKING
/*
485 486 487 488 489 490 491 492
 * Interfaces that can be used by architecture code to keep track of
 * memory type of pfn mappings specified by the remap_pfn_range,
 * vm_insert_pfn.
 */

/*
 * track_pfn_remap is called when a _new_ pfn mapping is being established
 * by remap_pfn_range() for physical range indicated by pfn and size.
493
 */
494
static inline int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
495 496
				  unsigned long pfn, unsigned long addr,
				  unsigned long size)
497 498 499 500 501
{
	return 0;
}

/*
502 503 504 505 506 507 508 509 510 511 512
 * track_pfn_insert is called when a _new_ single pfn is established
 * by vm_insert_pfn().
 */
static inline int track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
				   unsigned long pfn)
{
	return 0;
}

/*
 * track_pfn_copy is called when vma that is covering the pfnmap gets
513 514
 * copied through copy_page_range().
 */
515
static inline int track_pfn_copy(struct vm_area_struct *vma)
516 517 518 519 520 521 522
{
	return 0;
}

/*
 * untrack_pfn_vma is called while unmapping a pfnmap for a region.
 * untrack can be called for a specific region indicated by pfn and size or
523
 * can be for the entire vma (in which case pfn, size are zero).
524
 */
525 526
static inline void untrack_pfn(struct vm_area_struct *vma,
			       unsigned long pfn, unsigned long size)
527 528 529
{
}
#else
530
extern int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
531 532
			   unsigned long pfn, unsigned long addr,
			   unsigned long size);
533 534 535 536 537
extern int track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
			    unsigned long pfn);
extern int track_pfn_copy(struct vm_area_struct *vma);
extern void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
			unsigned long size);
538 539
#endif

540 541 542 543 544 545 546 547
#ifdef __HAVE_COLOR_ZERO_PAGE
static inline int is_zero_pfn(unsigned long pfn)
{
	extern unsigned long zero_pfn;
	unsigned long offset_from_zero_pfn = pfn - zero_pfn;
	return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT);
}

548 549
#define my_zero_pfn(addr)	page_to_pfn(ZERO_PAGE(addr))

550 551 552 553 554 555 556 557 558 559 560 561 562 563
#else
static inline int is_zero_pfn(unsigned long pfn)
{
	extern unsigned long zero_pfn;
	return pfn == zero_pfn;
}

static inline unsigned long my_zero_pfn(unsigned long addr)
{
	extern unsigned long zero_pfn;
	return zero_pfn;
}
#endif

564 565
#ifdef CONFIG_MMU

566 567 568 569 570 571 572 573 574
#ifndef CONFIG_TRANSPARENT_HUGEPAGE
static inline int pmd_trans_huge(pmd_t pmd)
{
	return 0;
}
static inline int pmd_trans_splitting(pmd_t pmd)
{
	return 0;
}
575 576 577 578 579 580 581
#ifndef __HAVE_ARCH_PMD_WRITE
static inline int pmd_write(pmd_t pmd)
{
	BUG();
	return 0;
}
#endif /* __HAVE_ARCH_PMD_WRITE */
582 583
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */

584 585 586 587 588 589 590 591 592 593 594 595
#ifndef pmd_read_atomic
static inline pmd_t pmd_read_atomic(pmd_t *pmdp)
{
	/*
	 * Depend on compiler for an atomic pmd read. NOTE: this is
	 * only going to work, if the pmdval_t isn't larger than
	 * an unsigned long.
	 */
	return *pmdp;
}
#endif

596 597 598 599 600 601 602 603 604 605 606 607
#ifndef pmd_move_must_withdraw
static inline int pmd_move_must_withdraw(spinlock_t *new_pmd_ptl,
					 spinlock_t *old_pmd_ptl)
{
	/*
	 * With split pmd lock we also need to move preallocated
	 * PTE page table if new_pmd is on different PMD page table.
	 */
	return new_pmd_ptl != old_pmd_ptl;
}
#endif

608 609 610 611 612 613 614 615 616 617 618 619 620
/*
 * This function is meant to be used by sites walking pagetables with
 * the mmap_sem hold in read mode to protect against MADV_DONTNEED and
 * transhuge page faults. MADV_DONTNEED can convert a transhuge pmd
 * into a null pmd and the transhuge page fault can convert a null pmd
 * into an hugepmd or into a regular pmd (if the hugepage allocation
 * fails). While holding the mmap_sem in read mode the pmd becomes
 * stable and stops changing under us only if it's not null and not a
 * transhuge pmd. When those races occurs and this function makes a
 * difference vs the standard pmd_none_or_clear_bad, the result is
 * undefined so behaving like if the pmd was none is safe (because it
 * can return none anyway). The compiler level barrier() is critically
 * important to compute the two checks atomically on the same pmdval.
621 622 623 624 625 626 627
 *
 * For 32bit kernels with a 64bit large pmd_t this automatically takes
 * care of reading the pmd atomically to avoid SMP race conditions
 * against pmd_populate() when the mmap_sem is hold for reading by the
 * caller (a special atomic read not done by "gcc" as in the generic
 * version above, is also needed when THP is disabled because the page
 * fault can populate the pmd from under us).
628 629 630
 */
static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd)
{
631
	pmd_t pmdval = pmd_read_atomic(pmd);
632 633 634
	/*
	 * The barrier will stabilize the pmdval in a register or on
	 * the stack so that it will stop changing under the code.
635 636 637 638 639 640 641 642 643 644
	 *
	 * When CONFIG_TRANSPARENT_HUGEPAGE=y on x86 32bit PAE,
	 * pmd_read_atomic is allowed to return a not atomic pmdval
	 * (for example pointing to an hugepage that has never been
	 * mapped in the pmd). The below checks will only care about
	 * the low part of the pmd with 32bit PAE x86 anyway, with the
	 * exception of pmd_none(). So the important thing is that if
	 * the low part of the pmd is found null, the high part will
	 * be also null or the pmd_none() check below would be
	 * confused.
645 646 647 648
	 */
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
	barrier();
#endif
649
	if (pmd_none(pmdval) || pmd_trans_huge(pmdval))
650 651
		return 1;
	if (unlikely(pmd_bad(pmdval))) {
652
		pmd_clear_bad(pmd);
653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676
		return 1;
	}
	return 0;
}

/*
 * This is a noop if Transparent Hugepage Support is not built into
 * the kernel. Otherwise it is equivalent to
 * pmd_none_or_trans_huge_or_clear_bad(), and shall only be called in
 * places that already verified the pmd is not none and they want to
 * walk ptes while holding the mmap sem in read mode (write mode don't
 * need this). If THP is not enabled, the pmd can't go away under the
 * code even if MADV_DONTNEED runs, but if THP is enabled we need to
 * run a pmd_trans_unstable before walking the ptes after
 * split_huge_page_pmd returns (because it may have run when the pmd
 * become null, but then a page fault can map in a THP and not a
 * regular page).
 */
static inline int pmd_trans_unstable(pmd_t *pmd)
{
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
	return pmd_none_or_trans_huge_or_clear_bad(pmd);
#else
	return 0;
677
#endif
678 679
}

680 681
#ifdef CONFIG_NUMA_BALANCING
/*
682 683 684 685 686
 * _PAGE_NUMA distinguishes between an unmapped page table entry, an entry that
 * is protected for PROT_NONE and a NUMA hinting fault entry. If the
 * architecture defines __PAGE_PROTNONE then it should take that into account
 * but those that do not can rely on the fact that the NUMA hinting scanner
 * skips inaccessible VMAs.
687 688 689 690 691 692 693 694
 *
 * pte/pmd_present() returns true if pte/pmd_numa returns true. Page
 * fault triggers on those regions if pte/pmd_numa returns true
 * (because _PAGE_PRESENT is not set).
 */
#ifndef pte_numa
static inline int pte_numa(pte_t pte)
{
695
	return ptenuma_flags(pte) == _PAGE_NUMA;
696 697 698 699 700 701
}
#endif

#ifndef pmd_numa
static inline int pmd_numa(pmd_t pmd)
{
702
	return pmdnuma_flags(pmd) == _PAGE_NUMA;
703 704 705 706 707 708 709 710 711 712 713 714 715 716
}
#endif

/*
 * pte/pmd_mknuma sets the _PAGE_ACCESSED bitflag automatically
 * because they're called by the NUMA hinting minor page fault. If we
 * wouldn't set the _PAGE_ACCESSED bitflag here, the TLB miss handler
 * would be forced to set it later while filling the TLB after we
 * return to userland. That would trigger a second write to memory
 * that we optimize away by setting _PAGE_ACCESSED here.
 */
#ifndef pte_mknonnuma
static inline pte_t pte_mknonnuma(pte_t pte)
{
717 718 719 720 721
	pteval_t val = pte_val(pte);

	val &= ~_PAGE_NUMA;
	val |= (_PAGE_PRESENT|_PAGE_ACCESSED);
	return __pte(val);
722 723 724 725 726 727
}
#endif

#ifndef pmd_mknonnuma
static inline pmd_t pmd_mknonnuma(pmd_t pmd)
{
728 729 730 731 732 733
	pmdval_t val = pmd_val(pmd);

	val &= ~_PAGE_NUMA;
	val |= (_PAGE_PRESENT|_PAGE_ACCESSED);

	return __pmd(val);
734 735 736 737 738 739
}
#endif

#ifndef pte_mknuma
static inline pte_t pte_mknuma(pte_t pte)
{
740 741
	pteval_t val = pte_val(pte);

742 743
	VM_BUG_ON(!(val & _PAGE_PRESENT));

744 745 746 747
	val &= ~_PAGE_PRESENT;
	val |= _PAGE_NUMA;

	return __pte(val);
748 749 750
}
#endif

751 752 753 754 755 756 757 758 759 760 761 762
#ifndef ptep_set_numa
static inline void ptep_set_numa(struct mm_struct *mm, unsigned long addr,
				 pte_t *ptep)
{
	pte_t ptent = *ptep;

	ptent = pte_mknuma(ptent);
	set_pte_at(mm, addr, ptep, ptent);
	return;
}
#endif

763 764 765
#ifndef pmd_mknuma
static inline pmd_t pmd_mknuma(pmd_t pmd)
{
766 767 768 769 770 771
	pmdval_t val = pmd_val(pmd);

	val &= ~_PAGE_PRESENT;
	val |= _PAGE_NUMA;

	return __pmd(val);
772 773
}
#endif
774 775 776 777 778 779 780 781 782 783 784 785

#ifndef pmdp_set_numa
static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr,
				 pmd_t *pmdp)
{
	pmd_t pmd = *pmdp;

	pmd = pmd_mknuma(pmd);
	set_pmd_at(mm, addr, pmdp, pmd);
	return;
}
#endif
786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811
#else
static inline int pmd_numa(pmd_t pmd)
{
	return 0;
}

static inline int pte_numa(pte_t pte)
{
	return 0;
}

static inline pte_t pte_mknonnuma(pte_t pte)
{
	return pte;
}

static inline pmd_t pmd_mknonnuma(pmd_t pmd)
{
	return pmd;
}

static inline pte_t pte_mknuma(pte_t pte)
{
	return pte;
}

812 813 814 815 816 817 818
static inline void ptep_set_numa(struct mm_struct *mm, unsigned long addr,
				 pte_t *ptep)
{
	return;
}


819 820 821 822
static inline pmd_t pmd_mknuma(pmd_t pmd)
{
	return pmd;
}
823 824 825 826 827 828

static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr,
				 pmd_t *pmdp)
{
	return ;
}
829 830
#endif /* CONFIG_NUMA_BALANCING */

831
#endif /* CONFIG_MMU */
832

L
Linus Torvalds 已提交
833 834
#endif /* !__ASSEMBLY__ */

835 836 837 838
#ifndef io_remap_pfn_range
#define io_remap_pfn_range remap_pfn_range
#endif

L
Linus Torvalds 已提交
839
#endif /* _ASM_GENERIC_PGTABLE_H */