tlbflush.h 10.6 KB
Newer Older
H
H. Peter Anvin 已提交
1 2
#ifndef _ASM_X86_TLBFLUSH_H
#define _ASM_X86_TLBFLUSH_H
T
Thomas Gleixner 已提交
3 4 5 6 7

#include <linux/mm.h>
#include <linux/sched.h>

#include <asm/processor.h>
8
#include <asm/cpufeature.h>
9
#include <asm/special_insns.h>
10
#include <asm/smp.h>
T
Thomas Gleixner 已提交
11

A
Andy Lutomirski 已提交
12 13 14
static inline void __invpcid(unsigned long pcid, unsigned long addr,
			     unsigned long type)
{
15
	struct { u64 d[2]; } desc = { { pcid, addr } };
A
Andy Lutomirski 已提交
16 17 18 19 20 21 22 23 24 25 26

	/*
	 * The memory clobber is because the whole point is to invalidate
	 * stale TLB entries and, especially if we're flushing global
	 * mappings, we don't want the compiler to reorder any subsequent
	 * memory accesses before the TLB flush.
	 *
	 * The hex opcode is invpcid (%ecx), %eax in 32-bit mode and
	 * invpcid (%rcx), %rax in long mode.
	 */
	asm volatile (".byte 0x66, 0x0f, 0x38, 0x82, 0x01"
27
		      : : "m" (desc), "a" (type), "c" (&desc) : "memory");
A
Andy Lutomirski 已提交
28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59
}

#define INVPCID_TYPE_INDIV_ADDR		0
#define INVPCID_TYPE_SINGLE_CTXT	1
#define INVPCID_TYPE_ALL_INCL_GLOBAL	2
#define INVPCID_TYPE_ALL_NON_GLOBAL	3

/* Flush all mappings for a given pcid and addr, not including globals. */
static inline void invpcid_flush_one(unsigned long pcid,
				     unsigned long addr)
{
	__invpcid(pcid, addr, INVPCID_TYPE_INDIV_ADDR);
}

/* Flush all mappings for a given PCID, not including globals. */
static inline void invpcid_flush_single_context(unsigned long pcid)
{
	__invpcid(pcid, 0, INVPCID_TYPE_SINGLE_CTXT);
}

/* Flush all mappings, including globals, for all PCIDs. */
static inline void invpcid_flush_all(void)
{
	__invpcid(0, 0, INVPCID_TYPE_ALL_INCL_GLOBAL);
}

/* Flush all mappings for all PCIDs except globals. */
static inline void invpcid_flush_all_nonglobals(void)
{
	__invpcid(0, 0, INVPCID_TYPE_ALL_NON_GLOBAL);
}

60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76
static inline u64 inc_mm_tlb_gen(struct mm_struct *mm)
{
	u64 new_tlb_gen;

	/*
	 * Bump the generation count.  This also serves as a full barrier
	 * that synchronizes with switch_mm(): callers are required to order
	 * their read of mm_cpumask after their writes to the paging
	 * structures.
	 */
	smp_mb__before_atomic();
	new_tlb_gen = atomic64_inc_return(&mm->context.tlb_gen);
	smp_mb__after_atomic();

	return new_tlb_gen;
}

T
Thomas Gleixner 已提交
77 78 79 80 81 82 83 84
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
#else
#define __flush_tlb() __native_flush_tlb()
#define __flush_tlb_global() __native_flush_tlb_global()
#define __flush_tlb_single(addr) __native_flush_tlb_single(addr)
#endif

85 86
static inline bool tlb_defer_switch_to_init_mm(void)
{
87 88 89 90 91 92 93 94 95 96 97 98
	/*
	 * If we have PCID, then switching to init_mm is reasonably
	 * fast.  If we don't have PCID, then switching to init_mm is
	 * quite slow, so we try to defer it in the hopes that we can
	 * avoid it entirely.  The latter approach runs the risk of
	 * receiving otherwise unnecessary IPIs.
	 *
	 * This choice is just a heuristic.  The tlb code can handle this
	 * function returning true or false regardless of whether we have
	 * PCID.
	 */
	return !static_cpu_has(X86_FEATURE_PCID);
99
}
100

101 102 103 104 105 106
/*
 * 6 because 6 should be plenty and struct tlb_state will fit in
 * two cache lines.
 */
#define TLB_NR_DYN_ASIDS 6

107 108 109 110 111
struct tlb_context {
	u64 ctx_id;
	u64 tlb_gen;
};

112
struct tlb_state {
113 114 115 116 117 118 119
	/*
	 * cpu_tlbstate.loaded_mm should match CR3 whenever interrupts
	 * are on.  This means that it may not match current->active_mm,
	 * which will contain the previous user mm when we're in lazy TLB
	 * mode even if we've already switched back to swapper_pg_dir.
	 */
	struct mm_struct *loaded_mm;
120 121
	u16 loaded_mm_asid;
	u16 next_asid;
122

123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139
	/*
	 * We can be in one of several states:
	 *
	 *  - Actively using an mm.  Our CPU's bit will be set in
	 *    mm_cpumask(loaded_mm) and is_lazy == false;
	 *
	 *  - Not using a real mm.  loaded_mm == &init_mm.  Our CPU's bit
	 *    will not be set in mm_cpumask(&init_mm) and is_lazy == false.
	 *
	 *  - Lazily using a real mm.  loaded_mm != &init_mm, our bit
	 *    is set in mm_cpumask(loaded_mm), but is_lazy == true.
	 *    We're heuristically guessing that the CR3 load we
	 *    skipped more than makes up for the overhead added by
	 *    lazy mode.
	 */
	bool is_lazy;

140 141 142 143 144
	/*
	 * Access to this CR4 shadow and to H/W CR4 is protected by
	 * disabling interrupts when modifying either one.
	 */
	unsigned long cr4;
145 146 147

	/*
	 * This is a list of all contexts that might exist in the TLB.
148 149
	 * There is one per ASID that we use, and the ASID (what the
	 * CPU calls PCID) is the index into ctxts.
150 151 152 153 154 155 156 157 158
	 *
	 * For each context, ctx_id indicates which mm the TLB's user
	 * entries came from.  As an invariant, the TLB will never
	 * contain entries that are out-of-date as when that mm reached
	 * the tlb_gen in the list.
	 *
	 * To be clear, this means that it's legal for the TLB code to
	 * flush the TLB without updating tlb_gen.  This can happen
	 * (for now, at least) due to paravirt remote flushes.
159 160 161 162 163
	 *
	 * NB: context 0 is a bit special, since it's also used by
	 * various bits of init code.  This is fine -- code that
	 * isn't aware of PCID will end up harmlessly flushing
	 * context 0.
164
	 */
165
	struct tlb_context ctxs[TLB_NR_DYN_ASIDS];
166 167 168 169 170 171
};
DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);

/* Initialize cr4 shadow for this CPU. */
static inline void cr4_init_shadow(void)
{
172
	this_cpu_write(cpu_tlbstate.cr4, __read_cr4());
173 174
}

A
Andy Lutomirski 已提交
175 176 177 178 179
/* Set in this cpu's CR4. */
static inline void cr4_set_bits(unsigned long mask)
{
	unsigned long cr4;

180 181 182 183 184 185
	cr4 = this_cpu_read(cpu_tlbstate.cr4);
	if ((cr4 | mask) != cr4) {
		cr4 |= mask;
		this_cpu_write(cpu_tlbstate.cr4, cr4);
		__write_cr4(cr4);
	}
A
Andy Lutomirski 已提交
186 187 188 189 190 191 192
}

/* Clear in this cpu's CR4. */
static inline void cr4_clear_bits(unsigned long mask)
{
	unsigned long cr4;

193 194 195 196 197 198 199 200
	cr4 = this_cpu_read(cpu_tlbstate.cr4);
	if ((cr4 & ~mask) != cr4) {
		cr4 &= ~mask;
		this_cpu_write(cpu_tlbstate.cr4, cr4);
		__write_cr4(cr4);
	}
}

201 202 203 204 205 206 207 208 209 210
static inline void cr4_toggle_bits(unsigned long mask)
{
	unsigned long cr4;

	cr4 = this_cpu_read(cpu_tlbstate.cr4);
	cr4 ^= mask;
	this_cpu_write(cpu_tlbstate.cr4, cr4);
	__write_cr4(cr4);
}

211 212 213 214
/* Read the CR4 shadow. */
static inline unsigned long cr4_read_shadow(void)
{
	return this_cpu_read(cpu_tlbstate.cr4);
A
Andy Lutomirski 已提交
215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233
}

/*
 * Save some of cr4 feature set we're using (e.g.  Pentium 4MB
 * enable and PPro Global page enable), so that any CPU's that boot
 * up after us can get the correct flags.  This should only be used
 * during boot on the boot cpu.
 */
extern unsigned long mmu_cr4_features;
extern u32 *trampoline_cr4_features;

static inline void cr4_set_bits_and_update_boot(unsigned long mask)
{
	mmu_cr4_features |= mask;
	if (trampoline_cr4_features)
		*trampoline_cr4_features = mmu_cr4_features;
	cr4_set_bits(mask);
}

234 235
extern void initialize_tlbstate_and_flush(void);

T
Thomas Gleixner 已提交
236 237
static inline void __native_flush_tlb(void)
{
238 239 240 241 242 243
	/*
	 * If current->mm == NULL then we borrow a mm which may change during a
	 * task switch and therefore we must not be preempted while we write CR3
	 * back:
	 */
	preempt_disable();
244
	native_write_cr3(__native_read_cr3());
245
	preempt_enable();
T
Thomas Gleixner 已提交
246 247
}

248 249 250 251
static inline void __native_flush_tlb_global_irq_disabled(void)
{
	unsigned long cr4;

252
	cr4 = this_cpu_read(cpu_tlbstate.cr4);
253 254 255 256 257 258
	/* clear PGE */
	native_write_cr4(cr4 & ~X86_CR4_PGE);
	/* write old PGE again and flush TLBs */
	native_write_cr4(cr4);
}

T
Thomas Gleixner 已提交
259 260
static inline void __native_flush_tlb_global(void)
{
261
	unsigned long flags;
T
Thomas Gleixner 已提交
262

263 264 265 266 267 268 269 270 271
	if (static_cpu_has(X86_FEATURE_INVPCID)) {
		/*
		 * Using INVPCID is considerably faster than a pair of writes
		 * to CR4 sandwiched inside an IRQ flag save/restore.
		 */
		invpcid_flush_all();
		return;
	}

272 273 274 275 276 277 278
	/*
	 * Read-modify-write to CR4 - protect it from preemption and
	 * from interrupts. (Use the raw variant because this code can
	 * be called from deep inside debugging code.)
	 */
	raw_local_irq_save(flags);

279
	__native_flush_tlb_global_irq_disabled();
280 281

	raw_local_irq_restore(flags);
T
Thomas Gleixner 已提交
282 283 284 285
}

static inline void __native_flush_tlb_single(unsigned long addr)
{
286
	asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
T
Thomas Gleixner 已提交
287 288 289 290
}

static inline void __flush_tlb_all(void)
{
291
	if (boot_cpu_has(X86_FEATURE_PGE))
T
Thomas Gleixner 已提交
292 293 294
		__flush_tlb_global();
	else
		__flush_tlb();
295 296 297 298 299 300 301 302

	/*
	 * Note: if we somehow had PCID but not PGE, then this wouldn't work --
	 * we'd end up flushing kernel translations for the current ASID but
	 * we might fail to flush kernel translations for other cached ASIDs.
	 *
	 * To avoid this issue, we force PCID off if PGE is off.
	 */
T
Thomas Gleixner 已提交
303 304 305 306
}

static inline void __flush_tlb_one(unsigned long addr)
{
307
	count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
308
	__flush_tlb_single(addr);
T
Thomas Gleixner 已提交
309 310
}

311
#define TLB_FLUSH_ALL	-1UL
T
Thomas Gleixner 已提交
312 313 314 315 316 317 318 319 320

/*
 * TLB flushing:
 *
 *  - flush_tlb_all() flushes all processes TLBs
 *  - flush_tlb_mm(mm) flushes the specified mm context TLB's
 *  - flush_tlb_page(vma, vmaddr) flushes one page
 *  - flush_tlb_range(vma, start, end) flushes a range of pages
 *  - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
321
 *  - flush_tlb_others(cpumask, info) flushes TLBs on other cpus
T
Thomas Gleixner 已提交
322 323 324 325
 *
 * ..but the i386 has somewhat limited tlb flushing capabilities,
 * and page-granular flushes are available only on i486 and up.
 */
326
struct flush_tlb_info {
327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346
	/*
	 * We support several kinds of flushes.
	 *
	 * - Fully flush a single mm.  .mm will be set, .end will be
	 *   TLB_FLUSH_ALL, and .new_tlb_gen will be the tlb_gen to
	 *   which the IPI sender is trying to catch us up.
	 *
	 * - Partially flush a single mm.  .mm will be set, .start and
	 *   .end will indicate the range, and .new_tlb_gen will be set
	 *   such that the changes between generation .new_tlb_gen-1 and
	 *   .new_tlb_gen are entirely contained in the indicated range.
	 *
	 * - Fully flush all mms whose tlb_gens have been updated.  .mm
	 *   will be NULL, .end will be TLB_FLUSH_ALL, and .new_tlb_gen
	 *   will be zero.
	 */
	struct mm_struct	*mm;
	unsigned long		start;
	unsigned long		end;
	u64			new_tlb_gen;
347 348
};

T
Thomas Gleixner 已提交
349 350
#define local_flush_tlb() __flush_tlb()

351 352 353 354 355
#define flush_tlb_mm(mm)	flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL)

#define flush_tlb_range(vma, start, end)	\
		flush_tlb_mm_range(vma->vm_mm, start, end, vma->vm_flags)

T
Thomas Gleixner 已提交
356
extern void flush_tlb_all(void);
357 358
extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
				unsigned long end, unsigned long vmflag);
359
extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
T
Thomas Gleixner 已提交
360

361 362 363 364 365
static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a)
{
	flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, VM_NONE);
}

366
void native_flush_tlb_others(const struct cpumask *cpumask,
367
			     const struct flush_tlb_info *info);
T
Thomas Gleixner 已提交
368

369 370 371
static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch,
					struct mm_struct *mm)
{
372
	inc_mm_tlb_gen(mm);
373 374 375 376 377
	cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm));
}

extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);

T
Thomas Gleixner 已提交
378
#ifndef CONFIG_PARAVIRT
379 380
#define flush_tlb_others(mask, info)	\
	native_flush_tlb_others(mask, info)
381
#endif
T
Thomas Gleixner 已提交
382

H
H. Peter Anvin 已提交
383
#endif /* _ASM_X86_TLBFLUSH_H */