tlbflush_64.h 2.7 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4
#ifndef _X8664_TLBFLUSH_H
#define _X8664_TLBFLUSH_H

#include <linux/mm.h>
A
Alexey Dobriyan 已提交
5
#include <linux/sched.h>
L
Linus Torvalds 已提交
6
#include <asm/processor.h>
7
#include <asm/system.h>
8 9 10

static inline void __flush_tlb(void)
{
11
	write_cr3(read_cr3());
12 13 14 15
}

static inline void __flush_tlb_all(void)
{
16 17 18
	unsigned long cr4 = read_cr4();
	write_cr4(cr4 & ~X86_CR4_PGE);	/* clear PGE */
	write_cr4(cr4);			/* write old PGE again and flush TLBs */
19
}
L
Linus Torvalds 已提交
20 21

#define __flush_tlb_one(addr) \
22
	__asm__ __volatile__("invlpg (%0)" :: "r" (addr) : "memory")
L
Linus Torvalds 已提交
23 24 25 26 27 28 29 30 31 32 33 34 35


/*
 * TLB flushing:
 *
 *  - flush_tlb() flushes the current mm struct TLBs
 *  - flush_tlb_all() flushes all processes TLBs
 *  - flush_tlb_mm(mm) flushes the specified mm context TLB's
 *  - flush_tlb_page(vma, vmaddr) flushes one page
 *  - flush_tlb_range(vma, start, end) flushes a range of pages
 *  - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
 *  - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
 *
36 37 38
 * x86-64 can only flush individual pages or full VMs. For a range flush
 * we always do the full VM. Might be worth trying if for a small
 * range a few INVLPGs in a row are a win.
L
Linus Torvalds 已提交
39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88
 */

#ifndef CONFIG_SMP

#define flush_tlb() __flush_tlb()
#define flush_tlb_all() __flush_tlb_all()
#define local_flush_tlb() __flush_tlb()

static inline void flush_tlb_mm(struct mm_struct *mm)
{
	if (mm == current->active_mm)
		__flush_tlb();
}

static inline void flush_tlb_page(struct vm_area_struct *vma,
	unsigned long addr)
{
	if (vma->vm_mm == current->active_mm)
		__flush_tlb_one(addr);
}

static inline void flush_tlb_range(struct vm_area_struct *vma,
	unsigned long start, unsigned long end)
{
	if (vma->vm_mm == current->active_mm)
		__flush_tlb();
}

#else

#include <asm/smp.h>

#define local_flush_tlb() \
	__flush_tlb()

extern void flush_tlb_all(void);
extern void flush_tlb_current_task(void);
extern void flush_tlb_mm(struct mm_struct *);
extern void flush_tlb_page(struct vm_area_struct *, unsigned long);

#define flush_tlb()	flush_tlb_current_task()

static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long start, unsigned long end)
{
	flush_tlb_mm(vma->vm_mm);
}

#define TLBSTATE_OK	1
#define TLBSTATE_LAZY	2

89 90 91 92
/* Roughly an IPI every 20MB with 4k pages for freeing page table
   ranges. Cost is about 42k of memory for each CPU. */
#define ARCH_FREE_PTE_NR 5350	

L
Linus Torvalds 已提交
93 94
#endif

95 96 97 98 99
static inline void flush_tlb_kernel_range(unsigned long start,
					unsigned long end)
{
	flush_tlb_all();
}
L
Linus Torvalds 已提交
100 101 102 103

static inline void flush_tlb_pgtables(struct mm_struct *mm,
				      unsigned long start, unsigned long end)
{
104 105 106
	/* x86_64 does not keep any page table caches in a software TLB.
	   The CPUs do in their hardware TLBs, but they are handled
	   by the normal TLB flushing algorithms. */
L
Linus Torvalds 已提交
107 108 109
}

#endif /* _X8664_TLBFLUSH_H */