tlbflush.h 4.0 KB
Newer Older
H
H. Peter Anvin 已提交
1 2
#ifndef _ASM_X86_TLBFLUSH_H
#define _ASM_X86_TLBFLUSH_H
T
Thomas Gleixner 已提交
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19

#include <linux/mm.h>
#include <linux/sched.h>

#include <asm/processor.h>
#include <asm/system.h>

#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
#else
#define __flush_tlb() __native_flush_tlb()
#define __flush_tlb_global() __native_flush_tlb_global()
#define __flush_tlb_single(addr) __native_flush_tlb_single(addr)
#endif

static inline void __native_flush_tlb(void)
{
20
	native_write_cr3(native_read_cr3());
T
Thomas Gleixner 已提交
21 22 23 24
}

static inline void __native_flush_tlb_global(void)
{
25 26
	unsigned long flags;
	unsigned long cr4;
T
Thomas Gleixner 已提交
27

28 29 30 31 32 33 34
	/*
	 * Read-modify-write to CR4 - protect it from preemption and
	 * from interrupts. (Use the raw variant because this code can
	 * be called from deep inside debugging code.)
	 */
	raw_local_irq_save(flags);

35
	cr4 = native_read_cr4();
T
Thomas Gleixner 已提交
36
	/* clear PGE */
37
	native_write_cr4(cr4 & ~X86_CR4_PGE);
T
Thomas Gleixner 已提交
38
	/* write old PGE again and flush TLBs */
39
	native_write_cr4(cr4);
40 41

	raw_local_irq_restore(flags);
T
Thomas Gleixner 已提交
42 43 44 45
}

static inline void __native_flush_tlb_single(unsigned long addr)
{
46
	asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
T
Thomas Gleixner 已提交
47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64
}

static inline void __flush_tlb_all(void)
{
	if (cpu_has_pge)
		__flush_tlb_global();
	else
		__flush_tlb();
}

static inline void __flush_tlb_one(unsigned long addr)
{
	if (cpu_has_invlpg)
		__flush_tlb_single(addr);
	else
		__flush_tlb();
}

65
#ifdef CONFIG_X86_32
T
Thomas Gleixner 已提交
66
# define TLB_FLUSH_ALL	0xffffffff
67
#else
T
Thomas Gleixner 已提交
68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115
# define TLB_FLUSH_ALL	-1ULL
#endif

/*
 * TLB flushing:
 *
 *  - flush_tlb() flushes the current mm struct TLBs
 *  - flush_tlb_all() flushes all processes TLBs
 *  - flush_tlb_mm(mm) flushes the specified mm context TLB's
 *  - flush_tlb_page(vma, vmaddr) flushes one page
 *  - flush_tlb_range(vma, start, end) flushes a range of pages
 *  - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
 *  - flush_tlb_others(cpumask, mm, va) flushes TLBs on other cpus
 *
 * ..but the i386 has somewhat limited tlb flushing capabilities,
 * and page-granular flushes are available only on i486 and up.
 *
 * x86-64 can only flush individual pages or full VMs. For a range flush
 * we always do the full VM. Might be worth trying if for a small
 * range a few INVLPGs in a row are a win.
 */

#ifndef CONFIG_SMP

#define flush_tlb() __flush_tlb()
#define flush_tlb_all() __flush_tlb_all()
#define local_flush_tlb() __flush_tlb()

static inline void flush_tlb_mm(struct mm_struct *mm)
{
	if (mm == current->active_mm)
		__flush_tlb();
}

static inline void flush_tlb_page(struct vm_area_struct *vma,
				  unsigned long addr)
{
	if (vma->vm_mm == current->active_mm)
		__flush_tlb_one(addr);
}

static inline void flush_tlb_range(struct vm_area_struct *vma,
				   unsigned long start, unsigned long end)
{
	if (vma->vm_mm == current->active_mm)
		__flush_tlb();
}

116
static inline void native_flush_tlb_others(const struct cpumask *cpumask,
T
Thomas Gleixner 已提交
117 118 119 120 121
					   struct mm_struct *mm,
					   unsigned long va)
{
}

A
Alex Nixon 已提交
122 123 124 125
static inline void reset_lazy_tlbstate(void)
{
}

T
Thomas Gleixner 已提交
126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144
#else  /* SMP */

#include <asm/smp.h>

#define local_flush_tlb() __flush_tlb()

extern void flush_tlb_all(void);
extern void flush_tlb_current_task(void);
extern void flush_tlb_mm(struct mm_struct *);
extern void flush_tlb_page(struct vm_area_struct *, unsigned long);

#define flush_tlb()	flush_tlb_current_task()

static inline void flush_tlb_range(struct vm_area_struct *vma,
				   unsigned long start, unsigned long end)
{
	flush_tlb_mm(vma->vm_mm);
}

145 146
void native_flush_tlb_others(const struct cpumask *cpumask,
			     struct mm_struct *mm, unsigned long va);
T
Thomas Gleixner 已提交
147 148 149 150

#define TLBSTATE_OK	1
#define TLBSTATE_LAZY	2

151
struct tlb_state {
T
Thomas Gleixner 已提交
152 153 154
	struct mm_struct *active_mm;
	int state;
};
155
DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
A
Alex Nixon 已提交
156 157 158

static inline void reset_lazy_tlbstate(void)
{
159 160
	percpu_write(cpu_tlbstate.state, 0);
	percpu_write(cpu_tlbstate.active_mm, &init_mm);
A
Alex Nixon 已提交
161
}
T
Thomas Gleixner 已提交
162 163 164 165

#endif	/* SMP */

#ifndef CONFIG_PARAVIRT
166
#define flush_tlb_others(mask, mm, va)	native_flush_tlb_others(mask, mm, va)
167
#endif
T
Thomas Gleixner 已提交
168 169 170 171 172 173 174

static inline void flush_tlb_kernel_range(unsigned long start,
					  unsigned long end)
{
	flush_tlb_all();
}

H
H. Peter Anvin 已提交
175
#endif /* _ASM_X86_TLBFLUSH_H */