tlb.h 17.8 KB
Newer Older
1
/* include/asm-generic/tlb.h
L
Linus Torvalds 已提交
2 3 4 5 6 7
 *
 *	Generic TLB shootdown code
 *
 * Copyright 2001 Red Hat, Inc.
 * Based on code from mm/memory.c Copyright Linus Torvalds and others.
 *
8
 * Copyright 2011 Red Hat, Inc., Peter Zijlstra
P
Peter Zijlstra 已提交
9
 *
L
Linus Torvalds 已提交
10 11 12 13 14 15 16 17
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version
 * 2 of the License, or (at your option) any later version.
 */
#ifndef _ASM_GENERIC__TLB_H
#define _ASM_GENERIC__TLB_H

18
#include <linux/mmu_notifier.h>
L
Linus Torvalds 已提交
19
#include <linux/swap.h>
I
Ingo Molnar 已提交
20
#include <asm/pgalloc.h>
L
Linus Torvalds 已提交
21
#include <asm/tlbflush.h>
22
#include <asm/cacheflush.h>
L
Linus Torvalds 已提交
23

24 25
#ifdef CONFIG_MMU

26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64
/*
 * Generic MMU-gather implementation.
 *
 * The mmu_gather data structure is used by the mm code to implement the
 * correct and efficient ordering of freeing pages and TLB invalidations.
 *
 * This correct ordering is:
 *
 *  1) unhook page
 *  2) TLB invalidate page
 *  3) free page
 *
 * That is, we must never free a page before we have ensured there are no live
 * translations left to it. Otherwise it might be possible to observe (or
 * worse, change) the page content after it has been reused.
 *
 * The mmu_gather API consists of:
 *
 *  - tlb_gather_mmu() / tlb_finish_mmu(); start and finish a mmu_gather
 *
 *    Finish in particular will issue a (final) TLB invalidate and free
 *    all (remaining) queued pages.
 *
 *  - tlb_start_vma() / tlb_end_vma(); marks the start / end of a VMA
 *
 *    Defaults to flushing at tlb_end_vma() to reset the range; helps when
 *    there's large holes between the VMAs.
 *
 *  - tlb_remove_page() / __tlb_remove_page()
 *  - tlb_remove_page_size() / __tlb_remove_page_size()
 *
 *    __tlb_remove_page_size() is the basic primitive that queues a page for
 *    freeing. __tlb_remove_page() assumes PAGE_SIZE. Both will return a
 *    boolean indicating if the queue is (now) full and a call to
 *    tlb_flush_mmu() is required.
 *
 *    tlb_remove_page() and tlb_remove_page_size() imply the call to
 *    tlb_flush_mmu() when required and has no return value.
 *
65
 *  - tlb_change_page_size()
66 67 68 69
 *
 *    call before __tlb_remove_page*() to set the current page-size; implies a
 *    possible tlb_flush_mmu() call.
 *
70
 *  - tlb_flush_mmu() / tlb_flush_mmu_tlbonly()
71 72 73 74
 *
 *    tlb_flush_mmu_tlbonly() - does the TLB invalidate (and resets
 *                              related state, like the range)
 *
75 76
 *    tlb_flush_mmu() - in addition to the above TLB invalidate, also frees
 *			whatever pages are still batched.
77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94
 *
 *  - mmu_gather::fullmm
 *
 *    A flag set by tlb_gather_mmu() to indicate we're going to free
 *    the entire mm; this allows a number of optimizations.
 *
 *    - We can ignore tlb_{start,end}_vma(); because we don't
 *      care about ranges. Everything will be shot down.
 *
 *    - (RISC) architectures that use ASIDs can cycle to a new ASID
 *      and delay the invalidation until ASID space runs out.
 *
 *  - mmu_gather::need_flush_all
 *
 *    A flag that can be set by the arch code if it wants to force
 *    flush the entire TLB irrespective of the range. For instance
 *    x86-PAE needs this when changing top-level entries.
 *
95
 * And allows the architecture to provide and implement tlb_flush():
96 97 98 99 100 101 102 103 104 105 106 107 108 109 110
 *
 * tlb_flush() may, in addition to the above mentioned mmu_gather fields, make
 * use of:
 *
 *  - mmu_gather::start / mmu_gather::end
 *
 *    which provides the range that needs to be flushed to cover the pages to
 *    be freed.
 *
 *  - mmu_gather::freed_tables
 *
 *    set when we freed page table pages
 *
 *  - tlb_get_unmap_shift() / tlb_get_unmap_size()
 *
111 112 113
 *    returns the smallest TLB entry size unmapped in this range.
 *
 * If an architecture does not provide tlb_flush() a default implementation
114 115
 * based on flush_tlb_range() will be used, unless MMU_GATHER_NO_RANGE is
 * specified, in which case we'll default to flush_tlb_mm().
116 117 118
 *
 * Additionally there are a few opt-in features:
 *
119 120 121 122 123
 *  HAVE_MMU_GATHER_PAGE_SIZE
 *
 *  This ensures we call tlb_flush() every time tlb_change_page_size() actually
 *  changes the size and provides mmu_gather::page_size to tlb_flush().
 *
124 125 126 127 128 129 130 131 132 133 134
 *  HAVE_RCU_TABLE_FREE
 *
 *  This provides tlb_remove_table(), to be used instead of tlb_remove_page()
 *  for page directores (__p*_free_tlb()). This provides separate freeing of
 *  the page-table pages themselves in a semi-RCU fashion (see comment below).
 *  Useful if your architecture doesn't use IPIs for remote TLB invalidates
 *  and therefore doesn't naturally serialize with software page-table walkers.
 *
 *  When used, an architecture is expected to provide __tlb_remove_table()
 *  which does the actual freeing of these pages.
 *
135
 *  HAVE_RCU_TABLE_NO_INVALIDATE
136
 *
137 138 139 140
 *  This makes HAVE_RCU_TABLE_FREE avoid calling tlb_flush_mmu_tlbonly() before
 *  freeing the page-table pages. This can be avoided if you use
 *  HAVE_RCU_TABLE_FREE and your architecture does _NOT_ use the Linux
 *  page-tables natively.
141
 *
142 143 144
 *  MMU_GATHER_NO_RANGE
 *
 *  Use this if your architecture lacks an efficient flush_tlb_range().
145 146
 */

147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
/*
 * Semi RCU freeing of the page directories.
 *
 * This is needed by some architectures to implement software pagetable walkers.
 *
 * gup_fast() and other software pagetable walkers do a lockless page-table
 * walk and therefore needs some synchronization with the freeing of the page
 * directories. The chosen means to accomplish that is by disabling IRQs over
 * the walk.
 *
 * Architectures that use IPIs to flush TLBs will then automagically DTRT,
 * since we unlink the page, flush TLBs, free the page. Since the disabling of
 * IRQs delays the completion of the TLB flush we can never observe an already
 * freed page.
 *
 * Architectures that do not have this (PPC) need to delay the freeing by some
 * other means, this is that means.
 *
 * What we do is batch the freed directory pages (tables) and RCU free them.
 * We use the sched RCU variant, as that guarantees that IRQ/preempt disabling
 * holds off grace periods.
 *
 * However, in order to batch these pages we need to allocate storage, this
 * allocation is deep inside the MM code and can thus easily fail on memory
 * pressure. To guarantee progress we fall back to single table freeing, see
 * the implementation of tlb_remove_table_one().
 *
 */
struct mmu_table_batch {
	struct rcu_head		rcu;
	unsigned int		nr;
	void			*tables[0];
};

#define MAX_TABLE_BATCH		\
	((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))

extern void tlb_remove_table(struct mmu_gather *tlb, void *table);

#endif

189
#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
P
Peter Zijlstra 已提交
190 191 192 193 194 195
/*
 * If we can't allocate a page to make a big batch of page pointers
 * to work on, then just handle a few from the on-stack structure.
 */
#define MMU_GATHER_BUNDLE	8

196 197 198 199 200 201 202 203 204 205
struct mmu_gather_batch {
	struct mmu_gather_batch	*next;
	unsigned int		nr;
	unsigned int		max;
	struct page		*pages[0];
};

#define MAX_GATHER_BATCH	\
	((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *))

206 207 208 209 210 211 212 213
/*
 * Limit the maximum number of mmu_gather batches to reduce a risk of soft
 * lockups for non-preemptible kernels on huge machines when a lot of memory
 * is zapped during unmapping.
 * 10K pages freed at once should be safe even without a preemption point.
 */
#define MAX_GATHER_BATCH_COUNT	(10000UL/MAX_GATHER_BATCH)

214 215 216 217
extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
				   int page_size);
#endif

218 219
/*
 * struct mmu_gather is an opaque type used by the mm code for passing around
220
 * any data needed by arch specific code for tlb_remove_page.
L
Linus Torvalds 已提交
221 222 223
 */
struct mmu_gather {
	struct mm_struct	*mm;
224

225 226 227
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
	struct mmu_table_batch	*batch;
#endif
228

229 230
	unsigned long		start;
	unsigned long		end;
231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246
	/*
	 * we are in the middle of an operation to clear
	 * a full mm and can make some optimizations
	 */
	unsigned int		fullmm : 1;

	/*
	 * we have performed an operation which
	 * requires a complete flush of the tlb
	 */
	unsigned int		need_flush_all : 1;

	/*
	 * we have removed page directories
	 */
	unsigned int		freed_tables : 1;
247

248 249 250 251 252 253 254 255
	/*
	 * at which levels have we cleared entries?
	 */
	unsigned int		cleared_ptes : 1;
	unsigned int		cleared_pmds : 1;
	unsigned int		cleared_puds : 1;
	unsigned int		cleared_p4ds : 1;

256 257 258 259 260 261
	/*
	 * tracks VM_EXEC | VM_HUGETLB in tlb_start_vma
	 */
	unsigned int		vma_exec : 1;
	unsigned int		vma_huge : 1;

262 263
	unsigned int		batch_count;

264
#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
265 266 267
	struct mmu_gather_batch *active;
	struct mmu_gather_batch	local;
	struct page		*__pages[MMU_GATHER_BUNDLE];
268 269 270 271

#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE
	unsigned int page_size;
#endif
272
#endif
L
Linus Torvalds 已提交
273 274
};

M
Minchan Kim 已提交
275 276
void arch_tlb_gather_mmu(struct mmu_gather *tlb,
	struct mm_struct *mm, unsigned long start, unsigned long end);
277
void tlb_flush_mmu(struct mmu_gather *tlb);
M
Minchan Kim 已提交
278
void arch_tlb_finish_mmu(struct mmu_gather *tlb,
279
			 unsigned long start, unsigned long end, bool force);
L
Linus Torvalds 已提交
280

281
static inline void __tlb_adjust_range(struct mmu_gather *tlb,
282 283
				      unsigned long address,
				      unsigned int range_size)
284 285
{
	tlb->start = min(tlb->start, address);
286
	tlb->end = max(tlb->end, address + range_size);
287 288 289 290
}

static inline void __tlb_reset_range(struct mmu_gather *tlb)
{
291 292 293 294 295 296
	if (tlb->fullmm) {
		tlb->start = tlb->end = ~0;
	} else {
		tlb->start = TASK_SIZE;
		tlb->end = 0;
	}
297
	tlb->freed_tables = 0;
298 299 300 301
	tlb->cleared_ptes = 0;
	tlb->cleared_pmds = 0;
	tlb->cleared_puds = 0;
	tlb->cleared_p4ds = 0;
302 303 304 305 306 307 308
	/*
	 * Do not reset mmu_gather::vma_* fields here, we do not
	 * call into tlb_start_vma() again to set them if there is an
	 * intermediate flush.
	 */
}

309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336
#ifdef CONFIG_MMU_GATHER_NO_RANGE

#if defined(tlb_flush) || defined(tlb_start_vma) || defined(tlb_end_vma)
#error MMU_GATHER_NO_RANGE relies on default tlb_flush(), tlb_start_vma() and tlb_end_vma()
#endif

/*
 * When an architecture does not have efficient means of range flushing TLBs
 * there is no point in doing intermediate flushes on tlb_end_vma() to keep the
 * range small. We equally don't have to worry about page granularity or other
 * things.
 *
 * All we need to do is issue a full flush for any !0 range.
 */
static inline void tlb_flush(struct mmu_gather *tlb)
{
	if (tlb->end)
		flush_tlb_mm(tlb->mm);
}

static inline void
tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }

#define tlb_end_vma tlb_end_vma
static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) { }

#else /* CONFIG_MMU_GATHER_NO_RANGE */

337 338 339 340 341 342
#ifndef tlb_flush

#if defined(tlb_start_vma) || defined(tlb_end_vma)
#error Default tlb_flush() relies on default tlb_start_vma() and tlb_end_vma()
#endif

343 344 345 346 347
/*
 * When an architecture does not provide its own tlb_flush() implementation
 * but does have a reasonably efficient flush_vma_range() implementation
 * use that.
 */
348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378
static inline void tlb_flush(struct mmu_gather *tlb)
{
	if (tlb->fullmm || tlb->need_flush_all) {
		flush_tlb_mm(tlb->mm);
	} else if (tlb->end) {
		struct vm_area_struct vma = {
			.vm_mm = tlb->mm,
			.vm_flags = (tlb->vma_exec ? VM_EXEC    : 0) |
				    (tlb->vma_huge ? VM_HUGETLB : 0),
		};

		flush_tlb_range(&vma, tlb->start, tlb->end);
	}
}

static inline void
tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma)
{
	/*
	 * flush_tlb_range() implementations that look at VM_HUGETLB (tile,
	 * mips-4k) flush only large pages.
	 *
	 * flush_tlb_range() implementations that flush I-TLB also flush D-TLB
	 * (tile, xtensa, arm), so it's ok to just add VM_EXEC to an existing
	 * range.
	 *
	 * We rely on tlb_end_vma() to issue a flush, such that when we reset
	 * these values the batch is empty.
	 */
	tlb->vma_huge = !!(vma->vm_flags & VM_HUGETLB);
	tlb->vma_exec = !!(vma->vm_flags & VM_EXEC);
379 380
}

381 382 383 384 385 386 387
#else

static inline void
tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }

#endif

388 389
#endif /* CONFIG_MMU_GATHER_NO_RANGE */

390 391 392 393 394 395 396 397 398 399
static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
{
	if (!tlb->end)
		return;

	tlb_flush(tlb);
	mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end);
	__tlb_reset_range(tlb);
}

400 401 402
static inline void tlb_remove_page_size(struct mmu_gather *tlb,
					struct page *page, int page_size)
{
403
	if (__tlb_remove_page_size(tlb, page, page_size))
404 405 406
		tlb_flush_mmu(tlb);
}

407
static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
408 409 410 411
{
	return __tlb_remove_page_size(tlb, page, PAGE_SIZE);
}

412 413 414 415 416 417
/* tlb_remove_page
 *	Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when
 *	required.
 */
static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
418
	return tlb_remove_page_size(tlb, page, PAGE_SIZE);
419 420
}

421
static inline void tlb_change_page_size(struct mmu_gather *tlb,
422 423
						     unsigned int page_size)
{
424 425 426 427 428 429
#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE
	if (tlb->page_size && tlb->page_size != page_size) {
		if (!tlb->fullmm)
			tlb_flush_mmu(tlb);
	}

430 431 432 433
	tlb->page_size = page_size;
#endif
}

434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452
static inline unsigned long tlb_get_unmap_shift(struct mmu_gather *tlb)
{
	if (tlb->cleared_ptes)
		return PAGE_SHIFT;
	if (tlb->cleared_pmds)
		return PMD_SHIFT;
	if (tlb->cleared_puds)
		return PUD_SHIFT;
	if (tlb->cleared_p4ds)
		return P4D_SHIFT;

	return PAGE_SHIFT;
}

static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb)
{
	return 1UL << tlb_get_unmap_shift(tlb);
}

453 454 455 456 457 458
/*
 * In the case of tlb vma handling, we can optimise these away in the
 * case where we're doing a full MM flush.  When we're doing a munmap,
 * the vmas are adjusted to only cover the region to be torn down.
 */
#ifndef tlb_start_vma
459 460 461 462 463 464 465 466
static inline void tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
{
	if (tlb->fullmm)
		return;

	tlb_update_vma_flags(tlb, vma);
	flush_cache_range(vma, vma->vm_start, vma->vm_end);
}
467 468 469
#endif

#ifndef tlb_end_vma
470 471 472 473 474 475 476 477 478 479 480 481 482
static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
{
	if (tlb->fullmm)
		return;

	/*
	 * Do a TLB flush and reset the range at VMA boundaries; this avoids
	 * the ranges growing with the unused space between consecutive VMAs,
	 * but also the mmu_gather::vma_* flags from tlb_start_vma() rely on
	 * this.
	 */
	tlb_flush_mmu_tlbonly(tlb);
}
483 484 485 486 487 488
#endif

#ifndef __tlb_remove_tlb_entry
#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
#endif

L
Linus Torvalds 已提交
489 490 491
/**
 * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
 *
492 493 494
 * Record the fact that pte's were really unmapped by updating the range,
 * so we can later optimise away the tlb invalidate.   This helps when
 * userspace is unmapping already-unmapped pages, which happens quite a lot.
L
Linus Torvalds 已提交
495 496 497
 */
#define tlb_remove_tlb_entry(tlb, ptep, address)		\
	do {							\
498
		__tlb_adjust_range(tlb, address, PAGE_SIZE);	\
499
		tlb->cleared_ptes = 1;				\
L
Linus Torvalds 已提交
500 501 502
		__tlb_remove_tlb_entry(tlb, ptep, address);	\
	} while (0)

503 504 505 506 507 508 509 510 511
#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address)	\
	do {							\
		unsigned long _sz = huge_page_size(h);		\
		__tlb_adjust_range(tlb, address, _sz);		\
		if (_sz == PMD_SIZE)				\
			tlb->cleared_pmds = 1;			\
		else if (_sz == PUD_SIZE)			\
			tlb->cleared_puds = 1;			\
		__tlb_remove_tlb_entry(tlb, ptep, address);	\
512 513
	} while (0)

S
Shaohua Li 已提交
514 515 516 517 518 519 520 521
/**
 * tlb_remove_pmd_tlb_entry - remember a pmd mapping for later tlb invalidation
 * This is a nop so far, because only x86 needs it.
 */
#ifndef __tlb_remove_pmd_tlb_entry
#define __tlb_remove_pmd_tlb_entry(tlb, pmdp, address) do {} while (0)
#endif

522 523 524
#define tlb_remove_pmd_tlb_entry(tlb, pmdp, address)			\
	do {								\
		__tlb_adjust_range(tlb, address, HPAGE_PMD_SIZE);	\
525
		tlb->cleared_pmds = 1;					\
526
		__tlb_remove_pmd_tlb_entry(tlb, pmdp, address);		\
S
Shaohua Li 已提交
527 528
	} while (0)

529 530 531 532 533 534 535 536 537 538 539
/**
 * tlb_remove_pud_tlb_entry - remember a pud mapping for later tlb
 * invalidation. This is a nop so far, because only x86 needs it.
 */
#ifndef __tlb_remove_pud_tlb_entry
#define __tlb_remove_pud_tlb_entry(tlb, pudp, address) do {} while (0)
#endif

#define tlb_remove_pud_tlb_entry(tlb, pudp, address)			\
	do {								\
		__tlb_adjust_range(tlb, address, HPAGE_PUD_SIZE);	\
540
		tlb->cleared_puds = 1;					\
541 542 543
		__tlb_remove_pud_tlb_entry(tlb, pudp, address);		\
	} while (0)

544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561
/*
 * For things like page tables caches (ie caching addresses "inside" the
 * page tables, like x86 does), for legacy reasons, flushing an
 * individual page had better flush the page table caches behind it. This
 * is definitely how x86 works, for example. And if you have an
 * architected non-legacy page table cache (which I'm not aware of
 * anybody actually doing), you're going to have some architecturally
 * explicit flushing for that, likely *separate* from a regular TLB entry
 * flush, and thus you'd need more than just some range expansion..
 *
 * So if we ever find an architecture
 * that would want something that odd, I think it is up to that
 * architecture to do its own odd thing, not cause pain for others
 * http://lkml.kernel.org/r/CA+55aFzBggoXtNXQeng5d_mRoDnaMBE5Y+URs+PHR67nUpMtaw@mail.gmail.com
 *
 * For now w.r.t page table cache, mark the range_size as PAGE_SIZE
 */

562
#ifndef pte_free_tlb
563
#define pte_free_tlb(tlb, ptep, address)			\
L
Linus Torvalds 已提交
564
	do {							\
565
		__tlb_adjust_range(tlb, address, PAGE_SIZE);	\
566 567
		tlb->freed_tables = 1;				\
		tlb->cleared_pmds = 1;				\
568
		__pte_free_tlb(tlb, ptep, address);		\
L
Linus Torvalds 已提交
569
	} while (0)
570
#endif
L
Linus Torvalds 已提交
571

572
#ifndef pmd_free_tlb
573 574
#define pmd_free_tlb(tlb, pmdp, address)			\
	do {							\
575
		__tlb_adjust_range(tlb, address, PAGE_SIZE);	\
576 577
		tlb->freed_tables = 1;				\
		tlb->cleared_puds = 1;				\
578 579
		__pmd_free_tlb(tlb, pmdp, address);		\
	} while (0)
580
#endif
581

L
Linus Torvalds 已提交
582
#ifndef __ARCH_HAS_4LEVEL_HACK
583
#ifndef pud_free_tlb
584
#define pud_free_tlb(tlb, pudp, address)			\
L
Linus Torvalds 已提交
585
	do {							\
586
		__tlb_adjust_range(tlb, address, PAGE_SIZE);	\
587 588
		tlb->freed_tables = 1;				\
		tlb->cleared_p4ds = 1;				\
589
		__pud_free_tlb(tlb, pudp, address);		\
L
Linus Torvalds 已提交
590 591
	} while (0)
#endif
592
#endif
L
Linus Torvalds 已提交
593

594
#ifndef __ARCH_HAS_5LEVEL_HACK
595
#ifndef p4d_free_tlb
596
#define p4d_free_tlb(tlb, pudp, address)			\
L
Linus Torvalds 已提交
597
	do {							\
598
		__tlb_adjust_range(tlb, address, PAGE_SIZE);	\
599
		tlb->freed_tables = 1;				\
600
		__p4d_free_tlb(tlb, pudp, address);		\
L
Linus Torvalds 已提交
601
	} while (0)
602
#endif
603
#endif
L
Linus Torvalds 已提交
604

605 606
#endif /* CONFIG_MMU */

607
#ifndef tlb_migrate_finish
L
Linus Torvalds 已提交
608
#define tlb_migrate_finish(mm) do {} while (0)
609
#endif
L
Linus Torvalds 已提交
610 611

#endif /* _ASM_GENERIC__TLB_H */