tlbflush.h 7.8 KB
Newer Older
H
H. Peter Anvin 已提交
1 2
#ifndef _ASM_X86_TLBFLUSH_H
#define _ASM_X86_TLBFLUSH_H
T
Thomas Gleixner 已提交
3 4 5 6 7

#include <linux/mm.h>
#include <linux/sched.h>

#include <asm/processor.h>
8
#include <asm/special_insns.h>
T
Thomas Gleixner 已提交
9

A
Andy Lutomirski 已提交
10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57
static inline void __invpcid(unsigned long pcid, unsigned long addr,
			     unsigned long type)
{
	u64 desc[2] = { pcid, addr };

	/*
	 * The memory clobber is because the whole point is to invalidate
	 * stale TLB entries and, especially if we're flushing global
	 * mappings, we don't want the compiler to reorder any subsequent
	 * memory accesses before the TLB flush.
	 *
	 * The hex opcode is invpcid (%ecx), %eax in 32-bit mode and
	 * invpcid (%rcx), %rax in long mode.
	 */
	asm volatile (".byte 0x66, 0x0f, 0x38, 0x82, 0x01"
		      : : "m" (desc), "a" (type), "c" (desc) : "memory");
}

#define INVPCID_TYPE_INDIV_ADDR		0
#define INVPCID_TYPE_SINGLE_CTXT	1
#define INVPCID_TYPE_ALL_INCL_GLOBAL	2
#define INVPCID_TYPE_ALL_NON_GLOBAL	3

/* Flush all mappings for a given pcid and addr, not including globals. */
static inline void invpcid_flush_one(unsigned long pcid,
				     unsigned long addr)
{
	__invpcid(pcid, addr, INVPCID_TYPE_INDIV_ADDR);
}

/* Flush all mappings for a given PCID, not including globals. */
static inline void invpcid_flush_single_context(unsigned long pcid)
{
	__invpcid(pcid, 0, INVPCID_TYPE_SINGLE_CTXT);
}

/* Flush all mappings, including globals, for all PCIDs. */
static inline void invpcid_flush_all(void)
{
	__invpcid(0, 0, INVPCID_TYPE_ALL_INCL_GLOBAL);
}

/* Flush all mappings for all PCIDs except globals. */
static inline void invpcid_flush_all_nonglobals(void)
{
	__invpcid(0, 0, INVPCID_TYPE_ALL_NON_GLOBAL);
}

T
Thomas Gleixner 已提交
58 59 60 61 62 63 64 65
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
#else
#define __flush_tlb() __native_flush_tlb()
#define __flush_tlb_global() __native_flush_tlb_global()
#define __flush_tlb_single(addr) __native_flush_tlb_single(addr)
#endif

66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85
struct tlb_state {
#ifdef CONFIG_SMP
	struct mm_struct *active_mm;
	int state;
#endif

	/*
	 * Access to this CR4 shadow and to H/W CR4 is protected by
	 * disabling interrupts when modifying either one.
	 */
	unsigned long cr4;
};
DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);

/* Initialize cr4 shadow for this CPU. */
static inline void cr4_init_shadow(void)
{
	this_cpu_write(cpu_tlbstate.cr4, __read_cr4());
}

A
Andy Lutomirski 已提交
86 87 88 89 90
/* Set in this cpu's CR4. */
static inline void cr4_set_bits(unsigned long mask)
{
	unsigned long cr4;

91 92 93 94 95 96
	cr4 = this_cpu_read(cpu_tlbstate.cr4);
	if ((cr4 | mask) != cr4) {
		cr4 |= mask;
		this_cpu_write(cpu_tlbstate.cr4, cr4);
		__write_cr4(cr4);
	}
A
Andy Lutomirski 已提交
97 98 99 100 101 102 103
}

/* Clear in this cpu's CR4. */
static inline void cr4_clear_bits(unsigned long mask)
{
	unsigned long cr4;

104 105 106 107 108 109 110 111 112 113 114 115
	cr4 = this_cpu_read(cpu_tlbstate.cr4);
	if ((cr4 & ~mask) != cr4) {
		cr4 &= ~mask;
		this_cpu_write(cpu_tlbstate.cr4, cr4);
		__write_cr4(cr4);
	}
}

/* Read the CR4 shadow. */
static inline unsigned long cr4_read_shadow(void)
{
	return this_cpu_read(cpu_tlbstate.cr4);
A
Andy Lutomirski 已提交
116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134
}

/*
 * Save some of cr4 feature set we're using (e.g.  Pentium 4MB
 * enable and PPro Global page enable), so that any CPU's that boot
 * up after us can get the correct flags.  This should only be used
 * during boot on the boot cpu.
 */
extern unsigned long mmu_cr4_features;
extern u32 *trampoline_cr4_features;

static inline void cr4_set_bits_and_update_boot(unsigned long mask)
{
	mmu_cr4_features |= mask;
	if (trampoline_cr4_features)
		*trampoline_cr4_features = mmu_cr4_features;
	cr4_set_bits(mask);
}

T
Thomas Gleixner 已提交
135 136
static inline void __native_flush_tlb(void)
{
137
	native_write_cr3(native_read_cr3());
T
Thomas Gleixner 已提交
138 139
}

140 141 142 143
static inline void __native_flush_tlb_global_irq_disabled(void)
{
	unsigned long cr4;

144
	cr4 = this_cpu_read(cpu_tlbstate.cr4);
145 146 147 148 149 150
	/* clear PGE */
	native_write_cr4(cr4 & ~X86_CR4_PGE);
	/* write old PGE again and flush TLBs */
	native_write_cr4(cr4);
}

T
Thomas Gleixner 已提交
151 152
static inline void __native_flush_tlb_global(void)
{
153
	unsigned long flags;
T
Thomas Gleixner 已提交
154

155 156 157 158 159 160 161
	/*
	 * Read-modify-write to CR4 - protect it from preemption and
	 * from interrupts. (Use the raw variant because this code can
	 * be called from deep inside debugging code.)
	 */
	raw_local_irq_save(flags);

162
	__native_flush_tlb_global_irq_disabled();
163 164

	raw_local_irq_restore(flags);
T
Thomas Gleixner 已提交
165 166 167 168
}

static inline void __native_flush_tlb_single(unsigned long addr)
{
169
	asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
T
Thomas Gleixner 已提交
170 171 172 173 174 175 176 177 178 179 180 181
}

static inline void __flush_tlb_all(void)
{
	if (cpu_has_pge)
		__flush_tlb_global();
	else
		__flush_tlb();
}

static inline void __flush_tlb_one(unsigned long addr)
{
182
	count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
183
	__flush_tlb_single(addr);
T
Thomas Gleixner 已提交
184 185
}

186
#define TLB_FLUSH_ALL	-1UL
T
Thomas Gleixner 已提交
187 188 189 190 191 192 193 194 195 196

/*
 * TLB flushing:
 *
 *  - flush_tlb() flushes the current mm struct TLBs
 *  - flush_tlb_all() flushes all processes TLBs
 *  - flush_tlb_mm(mm) flushes the specified mm context TLB's
 *  - flush_tlb_page(vma, vmaddr) flushes one page
 *  - flush_tlb_range(vma, start, end) flushes a range of pages
 *  - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
197
 *  - flush_tlb_others(cpumask, mm, start, end) flushes TLBs on other cpus
T
Thomas Gleixner 已提交
198 199 200 201 202 203 204
 *
 * ..but the i386 has somewhat limited tlb flushing capabilities,
 * and page-granular flushes are available only on i486 and up.
 */

#ifndef CONFIG_SMP

205 206 207 208 209 210 211 212
/* "_up" is for UniProcessor.
 *
 * This is a helper for other header functions.  *Not* intended to be called
 * directly.  All global TLB flushes need to either call this, or to bump the
 * vm statistics themselves.
 */
static inline void __flush_tlb_up(void)
{
213
	count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
214 215 216 217 218
	__flush_tlb();
}

static inline void flush_tlb_all(void)
{
219
	count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
220 221 222 223 224 225 226 227 228 229 230 231
	__flush_tlb_all();
}

static inline void flush_tlb(void)
{
	__flush_tlb_up();
}

static inline void local_flush_tlb(void)
{
	__flush_tlb_up();
}
T
Thomas Gleixner 已提交
232 233 234 235

static inline void flush_tlb_mm(struct mm_struct *mm)
{
	if (mm == current->active_mm)
236
		__flush_tlb_up();
T
Thomas Gleixner 已提交
237 238 239 240 241 242 243 244 245 246 247 248 249
}

static inline void flush_tlb_page(struct vm_area_struct *vma,
				  unsigned long addr)
{
	if (vma->vm_mm == current->active_mm)
		__flush_tlb_one(addr);
}

static inline void flush_tlb_range(struct vm_area_struct *vma,
				   unsigned long start, unsigned long end)
{
	if (vma->vm_mm == current->active_mm)
250
		__flush_tlb_up();
T
Thomas Gleixner 已提交
251 252
}

253
static inline void flush_tlb_mm_range(struct mm_struct *mm,
254 255
	   unsigned long start, unsigned long end, unsigned long vmflag)
{
256
	if (mm == current->active_mm)
257
		__flush_tlb_up();
258 259
}

260
static inline void native_flush_tlb_others(const struct cpumask *cpumask,
T
Thomas Gleixner 已提交
261
					   struct mm_struct *mm,
262 263
					   unsigned long start,
					   unsigned long end)
T
Thomas Gleixner 已提交
264 265 266
{
}

A
Alex Nixon 已提交
267 268 269 270
static inline void reset_lazy_tlbstate(void)
{
}

271 272 273 274 275 276
static inline void flush_tlb_kernel_range(unsigned long start,
					  unsigned long end)
{
	flush_tlb_all();
}

T
Thomas Gleixner 已提交
277 278 279 280 281 282
#else  /* SMP */

#include <asm/smp.h>

#define local_flush_tlb() __flush_tlb()

283 284 285 286 287
#define flush_tlb_mm(mm)	flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL)

#define flush_tlb_range(vma, start, end)	\
		flush_tlb_mm_range(vma->vm_mm, start, end, vma->vm_flags)

T
Thomas Gleixner 已提交
288 289 290
extern void flush_tlb_all(void);
extern void flush_tlb_current_task(void);
extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
291 292
extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
				unsigned long end, unsigned long vmflag);
293
extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
T
Thomas Gleixner 已提交
294 295 296

#define flush_tlb()	flush_tlb_current_task()

297
void native_flush_tlb_others(const struct cpumask *cpumask,
298 299
				struct mm_struct *mm,
				unsigned long start, unsigned long end);
T
Thomas Gleixner 已提交
300 301 302 303

#define TLBSTATE_OK	1
#define TLBSTATE_LAZY	2

A
Alex Nixon 已提交
304 305
static inline void reset_lazy_tlbstate(void)
{
306 307
	this_cpu_write(cpu_tlbstate.state, 0);
	this_cpu_write(cpu_tlbstate.active_mm, &init_mm);
A
Alex Nixon 已提交
308
}
T
Thomas Gleixner 已提交
309 310 311

#endif	/* SMP */

312 313 314 315 316 317
/* Not inlined due to inc_irq_stat not being defined yet */
#define flush_tlb_local() {		\
	inc_irq_stat(irq_tlb_count);	\
	local_flush_tlb();		\
}

T
Thomas Gleixner 已提交
318
#ifndef CONFIG_PARAVIRT
319 320
#define flush_tlb_others(mask, mm, start, end)	\
	native_flush_tlb_others(mask, mm, start, end)
321
#endif
T
Thomas Gleixner 已提交
322

H
H. Peter Anvin 已提交
323
#endif /* _ASM_X86_TLBFLUSH_H */