tlb.h 18.9 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0-or-later */
2
/* include/asm-generic/tlb.h
L
Linus Torvalds 已提交
3 4 5 6 7 8
 *
 *	Generic TLB shootdown code
 *
 * Copyright 2001 Red Hat, Inc.
 * Based on code from mm/memory.c Copyright Linus Torvalds and others.
 *
9
 * Copyright 2011 Red Hat, Inc., Peter Zijlstra
L
Linus Torvalds 已提交
10 11 12 13
 */
#ifndef _ASM_GENERIC__TLB_H
#define _ASM_GENERIC__TLB_H

14
#include <linux/mmu_notifier.h>
L
Linus Torvalds 已提交
15
#include <linux/swap.h>
16
#include <linux/hugetlb_inline.h>
L
Linus Torvalds 已提交
17
#include <asm/tlbflush.h>
18
#include <asm/cacheflush.h>
L
Linus Torvalds 已提交
19

20 21 22 23 24 25 26 27 28
/*
 * Blindly accessing user memory from NMI context can be dangerous
 * if we're in the middle of switching the current user task or switching
 * the loaded mm.
 */
#ifndef nmi_uaccess_okay
# define nmi_uaccess_okay() true
#endif

29 30
#ifdef CONFIG_MMU

31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48
/*
 * Generic MMU-gather implementation.
 *
 * The mmu_gather data structure is used by the mm code to implement the
 * correct and efficient ordering of freeing pages and TLB invalidations.
 *
 * This correct ordering is:
 *
 *  1) unhook page
 *  2) TLB invalidate page
 *  3) free page
 *
 * That is, we must never free a page before we have ensured there are no live
 * translations left to it. Otherwise it might be possible to observe (or
 * worse, change) the page content after it has been reused.
 *
 * The mmu_gather API consists of:
 *
49 50 51
 *  - tlb_gather_mmu() / tlb_gather_mmu_fullmm() / tlb_finish_mmu()
 *
 *    start and finish a mmu_gather
52 53 54 55 56 57 58 59 60
 *
 *    Finish in particular will issue a (final) TLB invalidate and free
 *    all (remaining) queued pages.
 *
 *  - tlb_start_vma() / tlb_end_vma(); marks the start / end of a VMA
 *
 *    Defaults to flushing at tlb_end_vma() to reset the range; helps when
 *    there's large holes between the VMAs.
 *
61 62 63 64 65 66 67 68 69
 *  - tlb_remove_table()
 *
 *    tlb_remove_table() is the basic primitive to free page-table directories
 *    (__p*_free_tlb()).  In it's most primitive form it is an alias for
 *    tlb_remove_page() below, for when page directories are pages and have no
 *    additional constraints.
 *
 *    See also MMU_GATHER_TABLE_FREE and MMU_GATHER_RCU_TABLE_FREE.
 *
70 71 72 73 74 75 76 77 78 79 80
 *  - tlb_remove_page() / __tlb_remove_page()
 *  - tlb_remove_page_size() / __tlb_remove_page_size()
 *
 *    __tlb_remove_page_size() is the basic primitive that queues a page for
 *    freeing. __tlb_remove_page() assumes PAGE_SIZE. Both will return a
 *    boolean indicating if the queue is (now) full and a call to
 *    tlb_flush_mmu() is required.
 *
 *    tlb_remove_page() and tlb_remove_page_size() imply the call to
 *    tlb_flush_mmu() when required and has no return value.
 *
81
 *  - tlb_change_page_size()
82 83 84 85
 *
 *    call before __tlb_remove_page*() to set the current page-size; implies a
 *    possible tlb_flush_mmu() call.
 *
86
 *  - tlb_flush_mmu() / tlb_flush_mmu_tlbonly()
87 88 89 90
 *
 *    tlb_flush_mmu_tlbonly() - does the TLB invalidate (and resets
 *                              related state, like the range)
 *
91 92
 *    tlb_flush_mmu() - in addition to the above TLB invalidate, also frees
 *			whatever pages are still batched.
93 94 95
 *
 *  - mmu_gather::fullmm
 *
96
 *    A flag set by tlb_gather_mmu_fullmm() to indicate we're going to free
97 98 99 100 101 102 103 104 105 106 107 108 109 110
 *    the entire mm; this allows a number of optimizations.
 *
 *    - We can ignore tlb_{start,end}_vma(); because we don't
 *      care about ranges. Everything will be shot down.
 *
 *    - (RISC) architectures that use ASIDs can cycle to a new ASID
 *      and delay the invalidation until ASID space runs out.
 *
 *  - mmu_gather::need_flush_all
 *
 *    A flag that can be set by the arch code if it wants to force
 *    flush the entire TLB irrespective of the range. For instance
 *    x86-PAE needs this when changing top-level entries.
 *
111
 * And allows the architecture to provide and implement tlb_flush():
112 113 114 115 116 117 118 119 120 121 122 123 124 125 126
 *
 * tlb_flush() may, in addition to the above mentioned mmu_gather fields, make
 * use of:
 *
 *  - mmu_gather::start / mmu_gather::end
 *
 *    which provides the range that needs to be flushed to cover the pages to
 *    be freed.
 *
 *  - mmu_gather::freed_tables
 *
 *    set when we freed page table pages
 *
 *  - tlb_get_unmap_shift() / tlb_get_unmap_size()
 *
127 128 129
 *    returns the smallest TLB entry size unmapped in this range.
 *
 * If an architecture does not provide tlb_flush() a default implementation
130 131
 * based on flush_tlb_range() will be used, unless MMU_GATHER_NO_RANGE is
 * specified, in which case we'll default to flush_tlb_mm().
132 133 134
 *
 * Additionally there are a few opt-in features:
 *
135
 *  MMU_GATHER_PAGE_SIZE
136 137 138 139
 *
 *  This ensures we call tlb_flush() every time tlb_change_page_size() actually
 *  changes the size and provides mmu_gather::page_size to tlb_flush().
 *
140 141 142
 *  This might be useful if your architecture has size specific TLB
 *  invalidation instructions.
 *
143
 *  MMU_GATHER_TABLE_FREE
144 145
 *
 *  This provides tlb_remove_table(), to be used instead of tlb_remove_page()
146 147 148
 *  for page directores (__p*_free_tlb()).
 *
 *  Useful if your architecture has non-page page directories.
149 150 151 152
 *
 *  When used, an architecture is expected to provide __tlb_remove_table()
 *  which does the actual freeing of these pages.
 *
153 154 155 156 157 158 159 160
 *  MMU_GATHER_RCU_TABLE_FREE
 *
 *  Like MMU_GATHER_TABLE_FREE, and adds semi-RCU semantics to the free (see
 *  comment below).
 *
 *  Useful if your architecture doesn't use IPIs for remote TLB invalidates
 *  and therefore doesn't naturally serialize with software page-table walkers.
 *
161 162 163
 *  MMU_GATHER_NO_RANGE
 *
 *  Use this if your architecture lacks an efficient flush_tlb_range().
164 165 166 167 168 169 170 171 172 173
 *
 *  MMU_GATHER_NO_GATHER
 *
 *  If the option is set the mmu_gather will not track individual pages for
 *  delayed page free anymore. A platform that enables the option needs to
 *  provide its own implementation of the __tlb_remove_page_size() function to
 *  free pages.
 *
 *  This is useful if your architecture already flushes TLB entries in the
 *  various ptep_get_and_clear() functions.
174 175
 */

176 177
#ifdef CONFIG_MMU_GATHER_TABLE_FREE

178
struct mmu_table_batch {
179
#ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE
180
	struct rcu_head		rcu;
181
#endif
182
	unsigned int		nr;
183
	void			*tables[];
184 185 186 187 188 189 190
};

#define MAX_TABLE_BATCH		\
	((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))

extern void tlb_remove_table(struct mmu_gather *tlb, void *table);

191 192 193 194 195 196 197 198 199 200 201
#else /* !CONFIG_MMU_GATHER_HAVE_TABLE_FREE */

/*
 * Without MMU_GATHER_TABLE_FREE the architecture is assumed to have page based
 * page directories and we can use the normal page batching to free them.
 */
#define tlb_remove_table(tlb, page) tlb_remove_page((tlb), (page))

#endif /* CONFIG_MMU_GATHER_TABLE_FREE */

#ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE
202 203 204 205 206 207
/*
 * This allows an architecture that does not use the linux page-tables for
 * hardware to skip the TLBI when freeing page tables.
 */
#ifndef tlb_needs_table_invalidate
#define tlb_needs_table_invalidate() (true)
208 209
#endif

210 211 212
#else

#ifdef tlb_needs_table_invalidate
213
#error tlb_needs_table_invalidate() requires MMU_GATHER_RCU_TABLE_FREE
214 215
#endif

216
#endif /* CONFIG_MMU_GATHER_RCU_TABLE_FREE */
217 218


219
#ifndef CONFIG_MMU_GATHER_NO_GATHER
P
Peter Zijlstra 已提交
220 221 222 223 224 225
/*
 * If we can't allocate a page to make a big batch of page pointers
 * to work on, then just handle a few from the on-stack structure.
 */
#define MMU_GATHER_BUNDLE	8

226 227 228 229
struct mmu_gather_batch {
	struct mmu_gather_batch	*next;
	unsigned int		nr;
	unsigned int		max;
230
	struct page		*pages[];
231 232 233 234 235
};

#define MAX_GATHER_BATCH	\
	((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *))

236 237 238 239 240 241 242 243
/*
 * Limit the maximum number of mmu_gather batches to reduce a risk of soft
 * lockups for non-preemptible kernels on huge machines when a lot of memory
 * is zapped during unmapping.
 * 10K pages freed at once should be safe even without a preemption point.
 */
#define MAX_GATHER_BATCH_COUNT	(10000UL/MAX_GATHER_BATCH)

244 245 246 247
extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
				   int page_size);
#endif

248 249
/*
 * struct mmu_gather is an opaque type used by the mm code for passing around
250
 * any data needed by arch specific code for tlb_remove_page.
L
Linus Torvalds 已提交
251 252 253
 */
struct mmu_gather {
	struct mm_struct	*mm;
254

255
#ifdef CONFIG_MMU_GATHER_TABLE_FREE
256 257
	struct mmu_table_batch	*batch;
#endif
258

259 260
	unsigned long		start;
	unsigned long		end;
261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276
	/*
	 * we are in the middle of an operation to clear
	 * a full mm and can make some optimizations
	 */
	unsigned int		fullmm : 1;

	/*
	 * we have performed an operation which
	 * requires a complete flush of the tlb
	 */
	unsigned int		need_flush_all : 1;

	/*
	 * we have removed page directories
	 */
	unsigned int		freed_tables : 1;
277

278 279 280 281 282 283 284 285
	/*
	 * at which levels have we cleared entries?
	 */
	unsigned int		cleared_ptes : 1;
	unsigned int		cleared_pmds : 1;
	unsigned int		cleared_puds : 1;
	unsigned int		cleared_p4ds : 1;

286 287 288 289 290 291
	/*
	 * tracks VM_EXEC | VM_HUGETLB in tlb_start_vma
	 */
	unsigned int		vma_exec : 1;
	unsigned int		vma_huge : 1;

292 293
	unsigned int		batch_count;

294
#ifndef CONFIG_MMU_GATHER_NO_GATHER
295 296 297
	struct mmu_gather_batch *active;
	struct mmu_gather_batch	local;
	struct page		*__pages[MMU_GATHER_BUNDLE];
298

299
#ifdef CONFIG_MMU_GATHER_PAGE_SIZE
300 301
	unsigned int page_size;
#endif
302
#endif
L
Linus Torvalds 已提交
303 304
};

305
void tlb_flush_mmu(struct mmu_gather *tlb);
L
Linus Torvalds 已提交
306

307
static inline void __tlb_adjust_range(struct mmu_gather *tlb,
308 309
				      unsigned long address,
				      unsigned int range_size)
310 311
{
	tlb->start = min(tlb->start, address);
312
	tlb->end = max(tlb->end, address + range_size);
313 314 315 316
}

static inline void __tlb_reset_range(struct mmu_gather *tlb)
{
317 318 319 320 321 322
	if (tlb->fullmm) {
		tlb->start = tlb->end = ~0;
	} else {
		tlb->start = TASK_SIZE;
		tlb->end = 0;
	}
323
	tlb->freed_tables = 0;
324 325 326 327
	tlb->cleared_ptes = 0;
	tlb->cleared_pmds = 0;
	tlb->cleared_puds = 0;
	tlb->cleared_p4ds = 0;
328 329 330 331 332 333 334
	/*
	 * Do not reset mmu_gather::vma_* fields here, we do not
	 * call into tlb_start_vma() again to set them if there is an
	 * intermediate flush.
	 */
}

335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362
#ifdef CONFIG_MMU_GATHER_NO_RANGE

#if defined(tlb_flush) || defined(tlb_start_vma) || defined(tlb_end_vma)
#error MMU_GATHER_NO_RANGE relies on default tlb_flush(), tlb_start_vma() and tlb_end_vma()
#endif

/*
 * When an architecture does not have efficient means of range flushing TLBs
 * there is no point in doing intermediate flushes on tlb_end_vma() to keep the
 * range small. We equally don't have to worry about page granularity or other
 * things.
 *
 * All we need to do is issue a full flush for any !0 range.
 */
static inline void tlb_flush(struct mmu_gather *tlb)
{
	if (tlb->end)
		flush_tlb_mm(tlb->mm);
}

static inline void
tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }

#define tlb_end_vma tlb_end_vma
static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) { }

#else /* CONFIG_MMU_GATHER_NO_RANGE */

363 364 365 366 367 368
#ifndef tlb_flush

#if defined(tlb_start_vma) || defined(tlb_end_vma)
#error Default tlb_flush() relies on default tlb_start_vma() and tlb_end_vma()
#endif

369 370 371 372 373
/*
 * When an architecture does not provide its own tlb_flush() implementation
 * but does have a reasonably efficient flush_vma_range() implementation
 * use that.
 */
374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402
static inline void tlb_flush(struct mmu_gather *tlb)
{
	if (tlb->fullmm || tlb->need_flush_all) {
		flush_tlb_mm(tlb->mm);
	} else if (tlb->end) {
		struct vm_area_struct vma = {
			.vm_mm = tlb->mm,
			.vm_flags = (tlb->vma_exec ? VM_EXEC    : 0) |
				    (tlb->vma_huge ? VM_HUGETLB : 0),
		};

		flush_tlb_range(&vma, tlb->start, tlb->end);
	}
}

static inline void
tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma)
{
	/*
	 * flush_tlb_range() implementations that look at VM_HUGETLB (tile,
	 * mips-4k) flush only large pages.
	 *
	 * flush_tlb_range() implementations that flush I-TLB also flush D-TLB
	 * (tile, xtensa, arm), so it's ok to just add VM_EXEC to an existing
	 * range.
	 *
	 * We rely on tlb_end_vma() to issue a flush, such that when we reset
	 * these values the batch is empty.
	 */
403
	tlb->vma_huge = is_vm_hugetlb_page(vma);
404
	tlb->vma_exec = !!(vma->vm_flags & VM_EXEC);
405 406
}

407 408 409 410 411 412 413
#else

static inline void
tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }

#endif

414 415
#endif /* CONFIG_MMU_GATHER_NO_RANGE */

416 417
static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
{
418 419 420 421 422 423
	/*
	 * Anything calling __tlb_adjust_range() also sets at least one of
	 * these bits.
	 */
	if (!(tlb->freed_tables || tlb->cleared_ptes || tlb->cleared_pmds ||
	      tlb->cleared_puds || tlb->cleared_p4ds))
424 425 426 427 428 429 430
		return;

	tlb_flush(tlb);
	mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end);
	__tlb_reset_range(tlb);
}

431 432 433
static inline void tlb_remove_page_size(struct mmu_gather *tlb,
					struct page *page, int page_size)
{
434
	if (__tlb_remove_page_size(tlb, page, page_size))
435 436 437
		tlb_flush_mmu(tlb);
}

438
static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
439 440 441 442
{
	return __tlb_remove_page_size(tlb, page, PAGE_SIZE);
}

443 444 445 446 447 448
/* tlb_remove_page
 *	Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when
 *	required.
 */
static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
449
	return tlb_remove_page_size(tlb, page, PAGE_SIZE);
450 451
}

452
static inline void tlb_change_page_size(struct mmu_gather *tlb,
453 454
						     unsigned int page_size)
{
455
#ifdef CONFIG_MMU_GATHER_PAGE_SIZE
456
	if (tlb->page_size && tlb->page_size != page_size) {
457
		if (!tlb->fullmm && !tlb->need_flush_all)
458 459 460
			tlb_flush_mmu(tlb);
	}

461 462 463 464
	tlb->page_size = page_size;
#endif
}

465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483
static inline unsigned long tlb_get_unmap_shift(struct mmu_gather *tlb)
{
	if (tlb->cleared_ptes)
		return PAGE_SHIFT;
	if (tlb->cleared_pmds)
		return PMD_SHIFT;
	if (tlb->cleared_puds)
		return PUD_SHIFT;
	if (tlb->cleared_p4ds)
		return P4D_SHIFT;

	return PAGE_SHIFT;
}

static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb)
{
	return 1UL << tlb_get_unmap_shift(tlb);
}

484 485 486 487 488 489
/*
 * In the case of tlb vma handling, we can optimise these away in the
 * case where we're doing a full MM flush.  When we're doing a munmap,
 * the vmas are adjusted to only cover the region to be torn down.
 */
#ifndef tlb_start_vma
490 491 492 493 494 495 496 497
static inline void tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
{
	if (tlb->fullmm)
		return;

	tlb_update_vma_flags(tlb, vma);
	flush_cache_range(vma, vma->vm_start, vma->vm_end);
}
498 499 500
#endif

#ifndef tlb_end_vma
501 502 503 504 505 506 507 508 509 510 511 512 513
static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
{
	if (tlb->fullmm)
		return;

	/*
	 * Do a TLB flush and reset the range at VMA boundaries; this avoids
	 * the ranges growing with the unused space between consecutive VMAs,
	 * but also the mmu_gather::vma_* flags from tlb_start_vma() rely on
	 * this.
	 */
	tlb_flush_mmu_tlbonly(tlb);
}
514 515
#endif

516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547
/*
 * tlb_flush_{pte|pmd|pud|p4d}_range() adjust the tlb->start and tlb->end,
 * and set corresponding cleared_*.
 */
static inline void tlb_flush_pte_range(struct mmu_gather *tlb,
				     unsigned long address, unsigned long size)
{
	__tlb_adjust_range(tlb, address, size);
	tlb->cleared_ptes = 1;
}

static inline void tlb_flush_pmd_range(struct mmu_gather *tlb,
				     unsigned long address, unsigned long size)
{
	__tlb_adjust_range(tlb, address, size);
	tlb->cleared_pmds = 1;
}

static inline void tlb_flush_pud_range(struct mmu_gather *tlb,
				     unsigned long address, unsigned long size)
{
	__tlb_adjust_range(tlb, address, size);
	tlb->cleared_puds = 1;
}

static inline void tlb_flush_p4d_range(struct mmu_gather *tlb,
				     unsigned long address, unsigned long size)
{
	__tlb_adjust_range(tlb, address, size);
	tlb->cleared_p4ds = 1;
}

548 549 550 551
#ifndef __tlb_remove_tlb_entry
#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
#endif

L
Linus Torvalds 已提交
552 553 554
/**
 * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
 *
555 556 557
 * Record the fact that pte's were really unmapped by updating the range,
 * so we can later optimise away the tlb invalidate.   This helps when
 * userspace is unmapping already-unmapped pages, which happens quite a lot.
L
Linus Torvalds 已提交
558 559 560
 */
#define tlb_remove_tlb_entry(tlb, ptep, address)		\
	do {							\
561
		tlb_flush_pte_range(tlb, address, PAGE_SIZE);	\
L
Linus Torvalds 已提交
562 563 564
		__tlb_remove_tlb_entry(tlb, ptep, address);	\
	} while (0)

565 566 567
#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address)	\
	do {							\
		unsigned long _sz = huge_page_size(h);		\
568 569 570
		if (_sz >= P4D_SIZE)				\
			tlb_flush_p4d_range(tlb, address, _sz);	\
		else if (_sz >= PUD_SIZE)			\
571
			tlb_flush_pud_range(tlb, address, _sz);	\
572 573 574 575
		else if (_sz >= PMD_SIZE)			\
			tlb_flush_pmd_range(tlb, address, _sz);	\
		else						\
			tlb_flush_pte_range(tlb, address, _sz);	\
576
		__tlb_remove_tlb_entry(tlb, ptep, address);	\
577 578
	} while (0)

S
Shaohua Li 已提交
579 580 581 582 583 584 585 586
/**
 * tlb_remove_pmd_tlb_entry - remember a pmd mapping for later tlb invalidation
 * This is a nop so far, because only x86 needs it.
 */
#ifndef __tlb_remove_pmd_tlb_entry
#define __tlb_remove_pmd_tlb_entry(tlb, pmdp, address) do {} while (0)
#endif

587 588
#define tlb_remove_pmd_tlb_entry(tlb, pmdp, address)			\
	do {								\
589
		tlb_flush_pmd_range(tlb, address, HPAGE_PMD_SIZE);	\
590
		__tlb_remove_pmd_tlb_entry(tlb, pmdp, address);		\
S
Shaohua Li 已提交
591 592
	} while (0)

593 594 595 596 597 598 599 600 601 602
/**
 * tlb_remove_pud_tlb_entry - remember a pud mapping for later tlb
 * invalidation. This is a nop so far, because only x86 needs it.
 */
#ifndef __tlb_remove_pud_tlb_entry
#define __tlb_remove_pud_tlb_entry(tlb, pudp, address) do {} while (0)
#endif

#define tlb_remove_pud_tlb_entry(tlb, pudp, address)			\
	do {								\
603
		tlb_flush_pud_range(tlb, address, HPAGE_PUD_SIZE);	\
604 605 606
		__tlb_remove_pud_tlb_entry(tlb, pudp, address);		\
	} while (0)

607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624
/*
 * For things like page tables caches (ie caching addresses "inside" the
 * page tables, like x86 does), for legacy reasons, flushing an
 * individual page had better flush the page table caches behind it. This
 * is definitely how x86 works, for example. And if you have an
 * architected non-legacy page table cache (which I'm not aware of
 * anybody actually doing), you're going to have some architecturally
 * explicit flushing for that, likely *separate* from a regular TLB entry
 * flush, and thus you'd need more than just some range expansion..
 *
 * So if we ever find an architecture
 * that would want something that odd, I think it is up to that
 * architecture to do its own odd thing, not cause pain for others
 * http://lkml.kernel.org/r/CA+55aFzBggoXtNXQeng5d_mRoDnaMBE5Y+URs+PHR67nUpMtaw@mail.gmail.com
 *
 * For now w.r.t page table cache, mark the range_size as PAGE_SIZE
 */

625
#ifndef pte_free_tlb
626
#define pte_free_tlb(tlb, ptep, address)			\
L
Linus Torvalds 已提交
627
	do {							\
628
		tlb_flush_pmd_range(tlb, address, PAGE_SIZE);	\
629
		tlb->freed_tables = 1;				\
630
		__pte_free_tlb(tlb, ptep, address);		\
L
Linus Torvalds 已提交
631
	} while (0)
632
#endif
L
Linus Torvalds 已提交
633

634
#ifndef pmd_free_tlb
635 636
#define pmd_free_tlb(tlb, pmdp, address)			\
	do {							\
637
		tlb_flush_pud_range(tlb, address, PAGE_SIZE);	\
638
		tlb->freed_tables = 1;				\
639 640
		__pmd_free_tlb(tlb, pmdp, address);		\
	} while (0)
641
#endif
642

643
#ifndef pud_free_tlb
644
#define pud_free_tlb(tlb, pudp, address)			\
L
Linus Torvalds 已提交
645
	do {							\
646
		tlb_flush_p4d_range(tlb, address, PAGE_SIZE);	\
647
		tlb->freed_tables = 1;				\
648
		__pud_free_tlb(tlb, pudp, address);		\
L
Linus Torvalds 已提交
649 650 651
	} while (0)
#endif

652
#ifndef p4d_free_tlb
653
#define p4d_free_tlb(tlb, pudp, address)			\
L
Linus Torvalds 已提交
654
	do {							\
655
		__tlb_adjust_range(tlb, address, PAGE_SIZE);	\
656
		tlb->freed_tables = 1;				\
657
		__p4d_free_tlb(tlb, pudp, address);		\
L
Linus Torvalds 已提交
658
	} while (0)
659
#endif
L
Linus Torvalds 已提交
660

661 662 663 664 665 666 667 668 669 670 671 672 673 674
#ifndef pte_needs_flush
static inline bool pte_needs_flush(pte_t oldpte, pte_t newpte)
{
	return true;
}
#endif

#ifndef huge_pmd_needs_flush
static inline bool huge_pmd_needs_flush(pmd_t oldpmd, pmd_t newpmd)
{
	return true;
}
#endif

675 676
#endif /* CONFIG_MMU */

L
Linus Torvalds 已提交
677
#endif /* _ASM_GENERIC__TLB_H */