tlb.h 19.3 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0-or-later */
2
/* include/asm-generic/tlb.h
L
Linus Torvalds 已提交
3 4 5 6 7 8
 *
 *	Generic TLB shootdown code
 *
 * Copyright 2001 Red Hat, Inc.
 * Based on code from mm/memory.c Copyright Linus Torvalds and others.
 *
9
 * Copyright 2011 Red Hat, Inc., Peter Zijlstra
L
Linus Torvalds 已提交
10 11 12 13
 */
#ifndef _ASM_GENERIC__TLB_H
#define _ASM_GENERIC__TLB_H

14
#include <linux/mmu_notifier.h>
L
Linus Torvalds 已提交
15
#include <linux/swap.h>
16
#include <linux/hugetlb_inline.h>
L
Linus Torvalds 已提交
17
#include <asm/tlbflush.h>
18
#include <asm/cacheflush.h>
L
Linus Torvalds 已提交
19

20 21 22 23 24 25 26 27 28
/*
 * Blindly accessing user memory from NMI context can be dangerous
 * if we're in the middle of switching the current user task or switching
 * the loaded mm.
 */
#ifndef nmi_uaccess_okay
# define nmi_uaccess_okay() true
#endif

29 30
#ifdef CONFIG_MMU

31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48
/*
 * Generic MMU-gather implementation.
 *
 * The mmu_gather data structure is used by the mm code to implement the
 * correct and efficient ordering of freeing pages and TLB invalidations.
 *
 * This correct ordering is:
 *
 *  1) unhook page
 *  2) TLB invalidate page
 *  3) free page
 *
 * That is, we must never free a page before we have ensured there are no live
 * translations left to it. Otherwise it might be possible to observe (or
 * worse, change) the page content after it has been reused.
 *
 * The mmu_gather API consists of:
 *
49 50 51
 *  - tlb_gather_mmu() / tlb_gather_mmu_fullmm() / tlb_finish_mmu()
 *
 *    start and finish a mmu_gather
52 53 54 55 56 57 58 59 60
 *
 *    Finish in particular will issue a (final) TLB invalidate and free
 *    all (remaining) queued pages.
 *
 *  - tlb_start_vma() / tlb_end_vma(); marks the start / end of a VMA
 *
 *    Defaults to flushing at tlb_end_vma() to reset the range; helps when
 *    there's large holes between the VMAs.
 *
61 62 63 64 65 66 67 68 69
 *  - tlb_remove_table()
 *
 *    tlb_remove_table() is the basic primitive to free page-table directories
 *    (__p*_free_tlb()).  In it's most primitive form it is an alias for
 *    tlb_remove_page() below, for when page directories are pages and have no
 *    additional constraints.
 *
 *    See also MMU_GATHER_TABLE_FREE and MMU_GATHER_RCU_TABLE_FREE.
 *
70 71 72 73 74 75 76 77 78 79 80
 *  - tlb_remove_page() / __tlb_remove_page()
 *  - tlb_remove_page_size() / __tlb_remove_page_size()
 *
 *    __tlb_remove_page_size() is the basic primitive that queues a page for
 *    freeing. __tlb_remove_page() assumes PAGE_SIZE. Both will return a
 *    boolean indicating if the queue is (now) full and a call to
 *    tlb_flush_mmu() is required.
 *
 *    tlb_remove_page() and tlb_remove_page_size() imply the call to
 *    tlb_flush_mmu() when required and has no return value.
 *
81
 *  - tlb_change_page_size()
82 83 84 85
 *
 *    call before __tlb_remove_page*() to set the current page-size; implies a
 *    possible tlb_flush_mmu() call.
 *
86
 *  - tlb_flush_mmu() / tlb_flush_mmu_tlbonly()
87 88 89 90
 *
 *    tlb_flush_mmu_tlbonly() - does the TLB invalidate (and resets
 *                              related state, like the range)
 *
91 92
 *    tlb_flush_mmu() - in addition to the above TLB invalidate, also frees
 *			whatever pages are still batched.
93 94 95
 *
 *  - mmu_gather::fullmm
 *
96
 *    A flag set by tlb_gather_mmu_fullmm() to indicate we're going to free
97 98 99 100 101 102 103 104 105 106 107 108 109 110
 *    the entire mm; this allows a number of optimizations.
 *
 *    - We can ignore tlb_{start,end}_vma(); because we don't
 *      care about ranges. Everything will be shot down.
 *
 *    - (RISC) architectures that use ASIDs can cycle to a new ASID
 *      and delay the invalidation until ASID space runs out.
 *
 *  - mmu_gather::need_flush_all
 *
 *    A flag that can be set by the arch code if it wants to force
 *    flush the entire TLB irrespective of the range. For instance
 *    x86-PAE needs this when changing top-level entries.
 *
111
 * And allows the architecture to provide and implement tlb_flush():
112 113 114 115 116 117 118 119 120 121 122 123 124 125 126
 *
 * tlb_flush() may, in addition to the above mentioned mmu_gather fields, make
 * use of:
 *
 *  - mmu_gather::start / mmu_gather::end
 *
 *    which provides the range that needs to be flushed to cover the pages to
 *    be freed.
 *
 *  - mmu_gather::freed_tables
 *
 *    set when we freed page table pages
 *
 *  - tlb_get_unmap_shift() / tlb_get_unmap_size()
 *
127 128 129
 *    returns the smallest TLB entry size unmapped in this range.
 *
 * If an architecture does not provide tlb_flush() a default implementation
130 131
 * based on flush_tlb_range() will be used, unless MMU_GATHER_NO_RANGE is
 * specified, in which case we'll default to flush_tlb_mm().
132 133 134
 *
 * Additionally there are a few opt-in features:
 *
135
 *  MMU_GATHER_PAGE_SIZE
136 137 138 139
 *
 *  This ensures we call tlb_flush() every time tlb_change_page_size() actually
 *  changes the size and provides mmu_gather::page_size to tlb_flush().
 *
140 141 142
 *  This might be useful if your architecture has size specific TLB
 *  invalidation instructions.
 *
143
 *  MMU_GATHER_TABLE_FREE
144 145
 *
 *  This provides tlb_remove_table(), to be used instead of tlb_remove_page()
146 147 148
 *  for page directores (__p*_free_tlb()).
 *
 *  Useful if your architecture has non-page page directories.
149 150 151 152
 *
 *  When used, an architecture is expected to provide __tlb_remove_table()
 *  which does the actual freeing of these pages.
 *
153 154 155 156 157 158 159 160
 *  MMU_GATHER_RCU_TABLE_FREE
 *
 *  Like MMU_GATHER_TABLE_FREE, and adds semi-RCU semantics to the free (see
 *  comment below).
 *
 *  Useful if your architecture doesn't use IPIs for remote TLB invalidates
 *  and therefore doesn't naturally serialize with software page-table walkers.
 *
161 162 163 164 165 166 167 168 169 170 171 172 173 174
 *  MMU_GATHER_NO_FLUSH_CACHE
 *
 *  Indicates the architecture has flush_cache_range() but it needs *NOT* be called
 *  before unmapping a VMA.
 *
 *  NOTE: strictly speaking we shouldn't have this knob and instead rely on
 *	  flush_cache_range() being a NOP, except Sparc64 seems to be
 *	  different here.
 *
 *  MMU_GATHER_MERGE_VMAS
 *
 *  Indicates the architecture wants to merge ranges over VMAs; typical when
 *  multiple range invalidates are more expensive than a full invalidate.
 *
175 176
 *  MMU_GATHER_NO_RANGE
 *
177 178
 *  Use this if your architecture lacks an efficient flush_tlb_range(). This
 *  option implies MMU_GATHER_MERGE_VMAS above.
179 180 181 182 183 184 185 186 187 188
 *
 *  MMU_GATHER_NO_GATHER
 *
 *  If the option is set the mmu_gather will not track individual pages for
 *  delayed page free anymore. A platform that enables the option needs to
 *  provide its own implementation of the __tlb_remove_page_size() function to
 *  free pages.
 *
 *  This is useful if your architecture already flushes TLB entries in the
 *  various ptep_get_and_clear() functions.
189 190
 */

191 192
#ifdef CONFIG_MMU_GATHER_TABLE_FREE

193
struct mmu_table_batch {
194
#ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE
195
	struct rcu_head		rcu;
196
#endif
197
	unsigned int		nr;
198
	void			*tables[];
199 200 201 202 203 204 205
};

#define MAX_TABLE_BATCH		\
	((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))

extern void tlb_remove_table(struct mmu_gather *tlb, void *table);

206 207 208 209 210 211 212 213 214 215 216
#else /* !CONFIG_MMU_GATHER_HAVE_TABLE_FREE */

/*
 * Without MMU_GATHER_TABLE_FREE the architecture is assumed to have page based
 * page directories and we can use the normal page batching to free them.
 */
#define tlb_remove_table(tlb, page) tlb_remove_page((tlb), (page))

#endif /* CONFIG_MMU_GATHER_TABLE_FREE */

#ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE
217 218 219 220 221 222
/*
 * This allows an architecture that does not use the linux page-tables for
 * hardware to skip the TLBI when freeing page tables.
 */
#ifndef tlb_needs_table_invalidate
#define tlb_needs_table_invalidate() (true)
223 224
#endif

225 226 227
#else

#ifdef tlb_needs_table_invalidate
228
#error tlb_needs_table_invalidate() requires MMU_GATHER_RCU_TABLE_FREE
229 230
#endif

231
#endif /* CONFIG_MMU_GATHER_RCU_TABLE_FREE */
232 233


234
#ifndef CONFIG_MMU_GATHER_NO_GATHER
P
Peter Zijlstra 已提交
235 236 237 238 239 240
/*
 * If we can't allocate a page to make a big batch of page pointers
 * to work on, then just handle a few from the on-stack structure.
 */
#define MMU_GATHER_BUNDLE	8

241 242 243 244
struct mmu_gather_batch {
	struct mmu_gather_batch	*next;
	unsigned int		nr;
	unsigned int		max;
245
	struct page		*pages[];
246 247 248 249 250
};

#define MAX_GATHER_BATCH	\
	((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *))

251 252 253 254 255 256 257 258
/*
 * Limit the maximum number of mmu_gather batches to reduce a risk of soft
 * lockups for non-preemptible kernels on huge machines when a lot of memory
 * is zapped during unmapping.
 * 10K pages freed at once should be safe even without a preemption point.
 */
#define MAX_GATHER_BATCH_COUNT	(10000UL/MAX_GATHER_BATCH)

259 260 261 262
extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
				   int page_size);
#endif

263 264
/*
 * struct mmu_gather is an opaque type used by the mm code for passing around
265
 * any data needed by arch specific code for tlb_remove_page.
L
Linus Torvalds 已提交
266 267 268
 */
struct mmu_gather {
	struct mm_struct	*mm;
269

270
#ifdef CONFIG_MMU_GATHER_TABLE_FREE
271 272
	struct mmu_table_batch	*batch;
#endif
273

274 275
	unsigned long		start;
	unsigned long		end;
276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291
	/*
	 * we are in the middle of an operation to clear
	 * a full mm and can make some optimizations
	 */
	unsigned int		fullmm : 1;

	/*
	 * we have performed an operation which
	 * requires a complete flush of the tlb
	 */
	unsigned int		need_flush_all : 1;

	/*
	 * we have removed page directories
	 */
	unsigned int		freed_tables : 1;
292

293 294 295 296 297 298 299 300
	/*
	 * at which levels have we cleared entries?
	 */
	unsigned int		cleared_ptes : 1;
	unsigned int		cleared_pmds : 1;
	unsigned int		cleared_puds : 1;
	unsigned int		cleared_p4ds : 1;

301 302 303 304 305
	/*
	 * tracks VM_EXEC | VM_HUGETLB in tlb_start_vma
	 */
	unsigned int		vma_exec : 1;
	unsigned int		vma_huge : 1;
306
	unsigned int		vma_pfn  : 1;
307

308 309
	unsigned int		batch_count;

310
#ifndef CONFIG_MMU_GATHER_NO_GATHER
311 312 313
	struct mmu_gather_batch *active;
	struct mmu_gather_batch	local;
	struct page		*__pages[MMU_GATHER_BUNDLE];
314

315
#ifdef CONFIG_MMU_GATHER_PAGE_SIZE
316 317
	unsigned int page_size;
#endif
318
#endif
L
Linus Torvalds 已提交
319 320
};

321
void tlb_flush_mmu(struct mmu_gather *tlb);
L
Linus Torvalds 已提交
322

323
static inline void __tlb_adjust_range(struct mmu_gather *tlb,
324 325
				      unsigned long address,
				      unsigned int range_size)
326 327
{
	tlb->start = min(tlb->start, address);
328
	tlb->end = max(tlb->end, address + range_size);
329 330 331 332
}

static inline void __tlb_reset_range(struct mmu_gather *tlb)
{
333 334 335 336 337 338
	if (tlb->fullmm) {
		tlb->start = tlb->end = ~0;
	} else {
		tlb->start = TASK_SIZE;
		tlb->end = 0;
	}
339
	tlb->freed_tables = 0;
340 341 342 343
	tlb->cleared_ptes = 0;
	tlb->cleared_pmds = 0;
	tlb->cleared_puds = 0;
	tlb->cleared_p4ds = 0;
344 345 346 347 348 349 350
	/*
	 * Do not reset mmu_gather::vma_* fields here, we do not
	 * call into tlb_start_vma() again to set them if there is an
	 * intermediate flush.
	 */
}

351 352
#ifdef CONFIG_MMU_GATHER_NO_RANGE

353 354
#if defined(tlb_flush)
#error MMU_GATHER_NO_RANGE relies on default tlb_flush()
355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372
#endif

/*
 * When an architecture does not have efficient means of range flushing TLBs
 * there is no point in doing intermediate flushes on tlb_end_vma() to keep the
 * range small. We equally don't have to worry about page granularity or other
 * things.
 *
 * All we need to do is issue a full flush for any !0 range.
 */
static inline void tlb_flush(struct mmu_gather *tlb)
{
	if (tlb->end)
		flush_tlb_mm(tlb->mm);
}

#else /* CONFIG_MMU_GATHER_NO_RANGE */

373
#ifndef tlb_flush
374 375 376 377 378
/*
 * When an architecture does not provide its own tlb_flush() implementation
 * but does have a reasonably efficient flush_vma_range() implementation
 * use that.
 */
379 380 381 382 383 384 385 386 387 388 389 390 391 392
static inline void tlb_flush(struct mmu_gather *tlb)
{
	if (tlb->fullmm || tlb->need_flush_all) {
		flush_tlb_mm(tlb->mm);
	} else if (tlb->end) {
		struct vm_area_struct vma = {
			.vm_mm = tlb->mm,
			.vm_flags = (tlb->vma_exec ? VM_EXEC    : 0) |
				    (tlb->vma_huge ? VM_HUGETLB : 0),
		};

		flush_tlb_range(&vma, tlb->start, tlb->end);
	}
}
393 394 395
#endif

#endif /* CONFIG_MMU_GATHER_NO_RANGE */
396 397 398 399 400 401 402 403 404 405 406 407 408 409 410

static inline void
tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma)
{
	/*
	 * flush_tlb_range() implementations that look at VM_HUGETLB (tile,
	 * mips-4k) flush only large pages.
	 *
	 * flush_tlb_range() implementations that flush I-TLB also flush D-TLB
	 * (tile, xtensa, arm), so it's ok to just add VM_EXEC to an existing
	 * range.
	 *
	 * We rely on tlb_end_vma() to issue a flush, such that when we reset
	 * these values the batch is empty.
	 */
411
	tlb->vma_huge = is_vm_hugetlb_page(vma);
412
	tlb->vma_exec = !!(vma->vm_flags & VM_EXEC);
413
	tlb->vma_pfn  = !!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP));
414 415
}

416 417
static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
{
418 419 420 421 422 423
	/*
	 * Anything calling __tlb_adjust_range() also sets at least one of
	 * these bits.
	 */
	if (!(tlb->freed_tables || tlb->cleared_ptes || tlb->cleared_pmds ||
	      tlb->cleared_puds || tlb->cleared_p4ds))
424 425 426 427 428 429 430
		return;

	tlb_flush(tlb);
	mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end);
	__tlb_reset_range(tlb);
}

431 432 433
static inline void tlb_remove_page_size(struct mmu_gather *tlb,
					struct page *page, int page_size)
{
434
	if (__tlb_remove_page_size(tlb, page, page_size))
435 436 437
		tlb_flush_mmu(tlb);
}

438
static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
439 440 441 442
{
	return __tlb_remove_page_size(tlb, page, PAGE_SIZE);
}

443 444 445 446 447 448
/* tlb_remove_page
 *	Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when
 *	required.
 */
static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
449
	return tlb_remove_page_size(tlb, page, PAGE_SIZE);
450 451
}

452
static inline void tlb_change_page_size(struct mmu_gather *tlb,
453 454
						     unsigned int page_size)
{
455
#ifdef CONFIG_MMU_GATHER_PAGE_SIZE
456
	if (tlb->page_size && tlb->page_size != page_size) {
457
		if (!tlb->fullmm && !tlb->need_flush_all)
458 459 460
			tlb_flush_mmu(tlb);
	}

461 462 463 464
	tlb->page_size = page_size;
#endif
}

465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483
static inline unsigned long tlb_get_unmap_shift(struct mmu_gather *tlb)
{
	if (tlb->cleared_ptes)
		return PAGE_SHIFT;
	if (tlb->cleared_pmds)
		return PMD_SHIFT;
	if (tlb->cleared_puds)
		return PUD_SHIFT;
	if (tlb->cleared_p4ds)
		return P4D_SHIFT;

	return PAGE_SHIFT;
}

static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb)
{
	return 1UL << tlb_get_unmap_shift(tlb);
}

484 485 486 487 488
/*
 * In the case of tlb vma handling, we can optimise these away in the
 * case where we're doing a full MM flush.  When we're doing a munmap,
 * the vmas are adjusted to only cover the region to be torn down.
 */
489 490 491 492 493 494
static inline void tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
{
	if (tlb->fullmm)
		return;

	tlb_update_vma_flags(tlb, vma);
495
#ifndef CONFIG_MMU_GATHER_NO_FLUSH_CACHE
496
	flush_cache_range(vma, vma->vm_start, vma->vm_end);
497
#endif
498
}
499

500 501
static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
{
502
	if (tlb->fullmm)
503 504 505
		return;

	/*
506 507 508 509
	 * VM_PFNMAP is more fragile because the core mm will not track the
	 * page mapcount -- there might not be page-frames for these PFNs after
	 * all. Force flush TLBs for such ranges to avoid munmap() vs
	 * unmap_mapping_range() races.
510
	 */
511 512 513 514 515 516 517
	if (tlb->vma_pfn || !IS_ENABLED(CONFIG_MMU_GATHER_MERGE_VMAS)) {
		/*
		 * Do a TLB flush and reset the range at VMA boundaries; this avoids
		 * the ranges growing with the unused space between consecutive VMAs.
		 */
		tlb_flush_mmu_tlbonly(tlb);
	}
518
}
519

520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551
/*
 * tlb_flush_{pte|pmd|pud|p4d}_range() adjust the tlb->start and tlb->end,
 * and set corresponding cleared_*.
 */
static inline void tlb_flush_pte_range(struct mmu_gather *tlb,
				     unsigned long address, unsigned long size)
{
	__tlb_adjust_range(tlb, address, size);
	tlb->cleared_ptes = 1;
}

static inline void tlb_flush_pmd_range(struct mmu_gather *tlb,
				     unsigned long address, unsigned long size)
{
	__tlb_adjust_range(tlb, address, size);
	tlb->cleared_pmds = 1;
}

static inline void tlb_flush_pud_range(struct mmu_gather *tlb,
				     unsigned long address, unsigned long size)
{
	__tlb_adjust_range(tlb, address, size);
	tlb->cleared_puds = 1;
}

static inline void tlb_flush_p4d_range(struct mmu_gather *tlb,
				     unsigned long address, unsigned long size)
{
	__tlb_adjust_range(tlb, address, size);
	tlb->cleared_p4ds = 1;
}

552 553 554 555
#ifndef __tlb_remove_tlb_entry
#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
#endif

L
Linus Torvalds 已提交
556 557 558
/**
 * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
 *
559 560 561
 * Record the fact that pte's were really unmapped by updating the range,
 * so we can later optimise away the tlb invalidate.   This helps when
 * userspace is unmapping already-unmapped pages, which happens quite a lot.
L
Linus Torvalds 已提交
562 563 564
 */
#define tlb_remove_tlb_entry(tlb, ptep, address)		\
	do {							\
565
		tlb_flush_pte_range(tlb, address, PAGE_SIZE);	\
L
Linus Torvalds 已提交
566 567 568
		__tlb_remove_tlb_entry(tlb, ptep, address);	\
	} while (0)

569 570 571
#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address)	\
	do {							\
		unsigned long _sz = huge_page_size(h);		\
572 573 574
		if (_sz >= P4D_SIZE)				\
			tlb_flush_p4d_range(tlb, address, _sz);	\
		else if (_sz >= PUD_SIZE)			\
575
			tlb_flush_pud_range(tlb, address, _sz);	\
576 577 578 579
		else if (_sz >= PMD_SIZE)			\
			tlb_flush_pmd_range(tlb, address, _sz);	\
		else						\
			tlb_flush_pte_range(tlb, address, _sz);	\
580
		__tlb_remove_tlb_entry(tlb, ptep, address);	\
581 582
	} while (0)

S
Shaohua Li 已提交
583 584 585 586 587 588 589 590
/**
 * tlb_remove_pmd_tlb_entry - remember a pmd mapping for later tlb invalidation
 * This is a nop so far, because only x86 needs it.
 */
#ifndef __tlb_remove_pmd_tlb_entry
#define __tlb_remove_pmd_tlb_entry(tlb, pmdp, address) do {} while (0)
#endif

591 592
#define tlb_remove_pmd_tlb_entry(tlb, pmdp, address)			\
	do {								\
593
		tlb_flush_pmd_range(tlb, address, HPAGE_PMD_SIZE);	\
594
		__tlb_remove_pmd_tlb_entry(tlb, pmdp, address);		\
S
Shaohua Li 已提交
595 596
	} while (0)

597 598 599 600 601 602 603 604 605 606
/**
 * tlb_remove_pud_tlb_entry - remember a pud mapping for later tlb
 * invalidation. This is a nop so far, because only x86 needs it.
 */
#ifndef __tlb_remove_pud_tlb_entry
#define __tlb_remove_pud_tlb_entry(tlb, pudp, address) do {} while (0)
#endif

#define tlb_remove_pud_tlb_entry(tlb, pudp, address)			\
	do {								\
607
		tlb_flush_pud_range(tlb, address, HPAGE_PUD_SIZE);	\
608 609 610
		__tlb_remove_pud_tlb_entry(tlb, pudp, address);		\
	} while (0)

611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628
/*
 * For things like page tables caches (ie caching addresses "inside" the
 * page tables, like x86 does), for legacy reasons, flushing an
 * individual page had better flush the page table caches behind it. This
 * is definitely how x86 works, for example. And if you have an
 * architected non-legacy page table cache (which I'm not aware of
 * anybody actually doing), you're going to have some architecturally
 * explicit flushing for that, likely *separate* from a regular TLB entry
 * flush, and thus you'd need more than just some range expansion..
 *
 * So if we ever find an architecture
 * that would want something that odd, I think it is up to that
 * architecture to do its own odd thing, not cause pain for others
 * http://lkml.kernel.org/r/CA+55aFzBggoXtNXQeng5d_mRoDnaMBE5Y+URs+PHR67nUpMtaw@mail.gmail.com
 *
 * For now w.r.t page table cache, mark the range_size as PAGE_SIZE
 */

629
#ifndef pte_free_tlb
630
#define pte_free_tlb(tlb, ptep, address)			\
L
Linus Torvalds 已提交
631
	do {							\
632
		tlb_flush_pmd_range(tlb, address, PAGE_SIZE);	\
633
		tlb->freed_tables = 1;				\
634
		__pte_free_tlb(tlb, ptep, address);		\
L
Linus Torvalds 已提交
635
	} while (0)
636
#endif
L
Linus Torvalds 已提交
637

638
#ifndef pmd_free_tlb
639 640
#define pmd_free_tlb(tlb, pmdp, address)			\
	do {							\
641
		tlb_flush_pud_range(tlb, address, PAGE_SIZE);	\
642
		tlb->freed_tables = 1;				\
643 644
		__pmd_free_tlb(tlb, pmdp, address);		\
	} while (0)
645
#endif
646

647
#ifndef pud_free_tlb
648
#define pud_free_tlb(tlb, pudp, address)			\
L
Linus Torvalds 已提交
649
	do {							\
650
		tlb_flush_p4d_range(tlb, address, PAGE_SIZE);	\
651
		tlb->freed_tables = 1;				\
652
		__pud_free_tlb(tlb, pudp, address);		\
L
Linus Torvalds 已提交
653 654 655
	} while (0)
#endif

656
#ifndef p4d_free_tlb
657
#define p4d_free_tlb(tlb, pudp, address)			\
L
Linus Torvalds 已提交
658
	do {							\
659
		__tlb_adjust_range(tlb, address, PAGE_SIZE);	\
660
		tlb->freed_tables = 1;				\
661
		__p4d_free_tlb(tlb, pudp, address);		\
L
Linus Torvalds 已提交
662
	} while (0)
663
#endif
L
Linus Torvalds 已提交
664

665 666 667 668 669 670 671 672 673 674 675 676 677 678
#ifndef pte_needs_flush
static inline bool pte_needs_flush(pte_t oldpte, pte_t newpte)
{
	return true;
}
#endif

#ifndef huge_pmd_needs_flush
static inline bool huge_pmd_needs_flush(pmd_t oldpmd, pmd_t newpmd)
{
	return true;
}
#endif

679 680
#endif /* CONFIG_MMU */

L
Linus Torvalds 已提交
681
#endif /* _ASM_GENERIC__TLB_H */