tlbflush.h 12.1 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
H
H. Peter Anvin 已提交
2 3
#ifndef _ASM_X86_TLBFLUSH_H
#define _ASM_X86_TLBFLUSH_H
T
Thomas Gleixner 已提交
4 5 6 7 8

#include <linux/mm.h>
#include <linux/sched.h>

#include <asm/processor.h>
9
#include <asm/cpufeature.h>
10
#include <asm/special_insns.h>
11
#include <asm/smp.h>
T
Thomas Gleixner 已提交
12

A
Andy Lutomirski 已提交
13 14 15
static inline void __invpcid(unsigned long pcid, unsigned long addr,
			     unsigned long type)
{
16
	struct { u64 d[2]; } desc = { { pcid, addr } };
A
Andy Lutomirski 已提交
17 18 19 20 21 22 23 24 25 26 27

	/*
	 * The memory clobber is because the whole point is to invalidate
	 * stale TLB entries and, especially if we're flushing global
	 * mappings, we don't want the compiler to reorder any subsequent
	 * memory accesses before the TLB flush.
	 *
	 * The hex opcode is invpcid (%ecx), %eax in 32-bit mode and
	 * invpcid (%rcx), %rax in long mode.
	 */
	asm volatile (".byte 0x66, 0x0f, 0x38, 0x82, 0x01"
28
		      : : "m" (desc), "a" (type), "c" (&desc) : "memory");
A
Andy Lutomirski 已提交
29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60
}

#define INVPCID_TYPE_INDIV_ADDR		0
#define INVPCID_TYPE_SINGLE_CTXT	1
#define INVPCID_TYPE_ALL_INCL_GLOBAL	2
#define INVPCID_TYPE_ALL_NON_GLOBAL	3

/* Flush all mappings for a given pcid and addr, not including globals. */
static inline void invpcid_flush_one(unsigned long pcid,
				     unsigned long addr)
{
	__invpcid(pcid, addr, INVPCID_TYPE_INDIV_ADDR);
}

/* Flush all mappings for a given PCID, not including globals. */
static inline void invpcid_flush_single_context(unsigned long pcid)
{
	__invpcid(pcid, 0, INVPCID_TYPE_SINGLE_CTXT);
}

/* Flush all mappings, including globals, for all PCIDs. */
static inline void invpcid_flush_all(void)
{
	__invpcid(0, 0, INVPCID_TYPE_ALL_INCL_GLOBAL);
}

/* Flush all mappings for all PCIDs except globals. */
static inline void invpcid_flush_all_nonglobals(void)
{
	__invpcid(0, 0, INVPCID_TYPE_ALL_NON_GLOBAL);
}

61 62 63 64 65 66 67 68
static inline u64 inc_mm_tlb_gen(struct mm_struct *mm)
{
	/*
	 * Bump the generation count.  This also serves as a full barrier
	 * that synchronizes with switch_mm(): callers are required to order
	 * their read of mm_cpumask after their writes to the paging
	 * structures.
	 */
69
	return atomic64_inc_return(&mm->context.tlb_gen);
70 71
}

72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87
/* There are 12 bits of space for ASIDS in CR3 */
#define CR3_HW_ASID_BITS		12
/*
 * When enabled, PAGE_TABLE_ISOLATION consumes a single bit for
 * user/kernel switches
 */
#define PTI_CONSUMED_ASID_BITS		0

#define CR3_AVAIL_ASID_BITS (CR3_HW_ASID_BITS - PTI_CONSUMED_ASID_BITS)
/*
 * ASIDs are zero-based: 0->MAX_AVAIL_ASID are valid.  -1 below to account
 * for them being zero-based.  Another -1 is because ASID 0 is reserved for
 * use by non-PCID-aware users.
 */
#define MAX_ASID_AVAILABLE ((1 << CR3_AVAIL_ASID_BITS) - 2)

88 89 90 91 92 93 94 95 96 97 98 99 100 101 102
static inline u16 kern_pcid(u16 asid)
{
	VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE);
	/*
	 * If PCID is on, ASID-aware code paths put the ASID+1 into the
	 * PCID bits.  This serves two purposes.  It prevents a nasty
	 * situation in which PCID-unaware code saves CR3, loads some other
	 * value (with PCID == 0), and then restores CR3, thus corrupting
	 * the TLB for ASID 0 if the saved ASID was nonzero.  It also means
	 * that any bugs involving loading a PCID-enabled CR3 with
	 * CR4.PCIDE off will trigger deterministically.
	 */
	return asid + 1;
}

103 104 105 106
struct pgd_t;
static inline unsigned long build_cr3(pgd_t *pgd, u16 asid)
{
	if (static_cpu_has(X86_FEATURE_PCID)) {
107
		return __sme_pa(pgd) | kern_pcid(asid);
108 109 110 111 112 113 114 115
	} else {
		VM_WARN_ON_ONCE(asid != 0);
		return __sme_pa(pgd);
	}
}

static inline unsigned long build_cr3_noflush(pgd_t *pgd, u16 asid)
{
116
	VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE);
117 118
	VM_WARN_ON_ONCE(!this_cpu_has(X86_FEATURE_PCID));
	return __sme_pa(pgd) | kern_pcid(asid) | CR3_NOFLUSH;
119 120
}

T
Thomas Gleixner 已提交
121 122 123 124 125 126 127 128
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
#else
#define __flush_tlb() __native_flush_tlb()
#define __flush_tlb_global() __native_flush_tlb_global()
#define __flush_tlb_single(addr) __native_flush_tlb_single(addr)
#endif

129 130
static inline bool tlb_defer_switch_to_init_mm(void)
{
131 132 133 134 135 136 137 138 139 140 141 142
	/*
	 * If we have PCID, then switching to init_mm is reasonably
	 * fast.  If we don't have PCID, then switching to init_mm is
	 * quite slow, so we try to defer it in the hopes that we can
	 * avoid it entirely.  The latter approach runs the risk of
	 * receiving otherwise unnecessary IPIs.
	 *
	 * This choice is just a heuristic.  The tlb code can handle this
	 * function returning true or false regardless of whether we have
	 * PCID.
	 */
	return !static_cpu_has(X86_FEATURE_PCID);
143
}
144

145 146 147 148 149 150
/*
 * 6 because 6 should be plenty and struct tlb_state will fit in
 * two cache lines.
 */
#define TLB_NR_DYN_ASIDS 6

151 152 153 154 155
struct tlb_context {
	u64 ctx_id;
	u64 tlb_gen;
};

156
struct tlb_state {
157 158 159 160 161 162 163
	/*
	 * cpu_tlbstate.loaded_mm should match CR3 whenever interrupts
	 * are on.  This means that it may not match current->active_mm,
	 * which will contain the previous user mm when we're in lazy TLB
	 * mode even if we've already switched back to swapper_pg_dir.
	 */
	struct mm_struct *loaded_mm;
164 165
	u16 loaded_mm_asid;
	u16 next_asid;
166

167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183
	/*
	 * We can be in one of several states:
	 *
	 *  - Actively using an mm.  Our CPU's bit will be set in
	 *    mm_cpumask(loaded_mm) and is_lazy == false;
	 *
	 *  - Not using a real mm.  loaded_mm == &init_mm.  Our CPU's bit
	 *    will not be set in mm_cpumask(&init_mm) and is_lazy == false.
	 *
	 *  - Lazily using a real mm.  loaded_mm != &init_mm, our bit
	 *    is set in mm_cpumask(loaded_mm), but is_lazy == true.
	 *    We're heuristically guessing that the CR3 load we
	 *    skipped more than makes up for the overhead added by
	 *    lazy mode.
	 */
	bool is_lazy;

184 185 186 187 188
	/*
	 * Access to this CR4 shadow and to H/W CR4 is protected by
	 * disabling interrupts when modifying either one.
	 */
	unsigned long cr4;
189 190 191

	/*
	 * This is a list of all contexts that might exist in the TLB.
192 193
	 * There is one per ASID that we use, and the ASID (what the
	 * CPU calls PCID) is the index into ctxts.
194 195 196 197 198 199 200 201 202
	 *
	 * For each context, ctx_id indicates which mm the TLB's user
	 * entries came from.  As an invariant, the TLB will never
	 * contain entries that are out-of-date as when that mm reached
	 * the tlb_gen in the list.
	 *
	 * To be clear, this means that it's legal for the TLB code to
	 * flush the TLB without updating tlb_gen.  This can happen
	 * (for now, at least) due to paravirt remote flushes.
203 204 205 206 207
	 *
	 * NB: context 0 is a bit special, since it's also used by
	 * various bits of init code.  This is fine -- code that
	 * isn't aware of PCID will end up harmlessly flushing
	 * context 0.
208
	 */
209
	struct tlb_context ctxs[TLB_NR_DYN_ASIDS];
210 211 212 213 214 215
};
DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);

/* Initialize cr4 shadow for this CPU. */
static inline void cr4_init_shadow(void)
{
216
	this_cpu_write(cpu_tlbstate.cr4, __read_cr4());
217 218
}

A
Andy Lutomirski 已提交
219 220 221 222 223
/* Set in this cpu's CR4. */
static inline void cr4_set_bits(unsigned long mask)
{
	unsigned long cr4;

224 225 226 227 228 229
	cr4 = this_cpu_read(cpu_tlbstate.cr4);
	if ((cr4 | mask) != cr4) {
		cr4 |= mask;
		this_cpu_write(cpu_tlbstate.cr4, cr4);
		__write_cr4(cr4);
	}
A
Andy Lutomirski 已提交
230 231 232 233 234 235 236
}

/* Clear in this cpu's CR4. */
static inline void cr4_clear_bits(unsigned long mask)
{
	unsigned long cr4;

237 238 239 240 241 242 243 244
	cr4 = this_cpu_read(cpu_tlbstate.cr4);
	if ((cr4 & ~mask) != cr4) {
		cr4 &= ~mask;
		this_cpu_write(cpu_tlbstate.cr4, cr4);
		__write_cr4(cr4);
	}
}

245 246 247 248 249 250 251 252 253 254
static inline void cr4_toggle_bits(unsigned long mask)
{
	unsigned long cr4;

	cr4 = this_cpu_read(cpu_tlbstate.cr4);
	cr4 ^= mask;
	this_cpu_write(cpu_tlbstate.cr4, cr4);
	__write_cr4(cr4);
}

255 256 257 258
/* Read the CR4 shadow. */
static inline unsigned long cr4_read_shadow(void)
{
	return this_cpu_read(cpu_tlbstate.cr4);
A
Andy Lutomirski 已提交
259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277
}

/*
 * Save some of cr4 feature set we're using (e.g.  Pentium 4MB
 * enable and PPro Global page enable), so that any CPU's that boot
 * up after us can get the correct flags.  This should only be used
 * during boot on the boot cpu.
 */
extern unsigned long mmu_cr4_features;
extern u32 *trampoline_cr4_features;

static inline void cr4_set_bits_and_update_boot(unsigned long mask)
{
	mmu_cr4_features |= mask;
	if (trampoline_cr4_features)
		*trampoline_cr4_features = mmu_cr4_features;
	cr4_set_bits(mask);
}

278 279
extern void initialize_tlbstate_and_flush(void);

280 281 282
/*
 * flush the entire current user mapping
 */
T
Thomas Gleixner 已提交
283 284
static inline void __native_flush_tlb(void)
{
285 286 287 288 289 290
	/*
	 * If current->mm == NULL then we borrow a mm which may change during a
	 * task switch and therefore we must not be preempted while we write CR3
	 * back:
	 */
	preempt_disable();
291
	native_write_cr3(__native_read_cr3());
292
	preempt_enable();
T
Thomas Gleixner 已提交
293 294
}

295 296 297
/*
 * flush everything
 */
T
Thomas Gleixner 已提交
298 299
static inline void __native_flush_tlb_global(void)
{
300
	unsigned long cr4, flags;
T
Thomas Gleixner 已提交
301

302 303 304 305 306 307 308 309 310
	if (static_cpu_has(X86_FEATURE_INVPCID)) {
		/*
		 * Using INVPCID is considerably faster than a pair of writes
		 * to CR4 sandwiched inside an IRQ flag save/restore.
		 */
		invpcid_flush_all();
		return;
	}

311 312 313 314 315 316 317
	/*
	 * Read-modify-write to CR4 - protect it from preemption and
	 * from interrupts. (Use the raw variant because this code can
	 * be called from deep inside debugging code.)
	 */
	raw_local_irq_save(flags);

318 319 320 321 322
	cr4 = this_cpu_read(cpu_tlbstate.cr4);
	/* toggle PGE */
	native_write_cr4(cr4 ^ X86_CR4_PGE);
	/* write old PGE again and flush TLBs */
	native_write_cr4(cr4);
323 324

	raw_local_irq_restore(flags);
T
Thomas Gleixner 已提交
325 326
}

327 328 329
/*
 * flush one page in the user mapping
 */
T
Thomas Gleixner 已提交
330 331
static inline void __native_flush_tlb_single(unsigned long addr)
{
332
	asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
T
Thomas Gleixner 已提交
333 334
}

335 336 337
/*
 * flush everything
 */
T
Thomas Gleixner 已提交
338 339
static inline void __flush_tlb_all(void)
{
340
	if (boot_cpu_has(X86_FEATURE_PGE)) {
T
Thomas Gleixner 已提交
341
		__flush_tlb_global();
342 343 344 345
	} else {
		/*
		 * !PGE -> !PCID (setup_pcid()), thus every flush is total.
		 */
T
Thomas Gleixner 已提交
346
		__flush_tlb();
347
	}
348 349 350 351 352 353 354 355

	/*
	 * Note: if we somehow had PCID but not PGE, then this wouldn't work --
	 * we'd end up flushing kernel translations for the current ASID but
	 * we might fail to flush kernel translations for other cached ASIDs.
	 *
	 * To avoid this issue, we force PCID off if PGE is off.
	 */
T
Thomas Gleixner 已提交
356 357
}

358 359 360
/*
 * flush one page in the kernel mapping
 */
T
Thomas Gleixner 已提交
361 362
static inline void __flush_tlb_one(unsigned long addr)
{
363
	count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
364
	__flush_tlb_single(addr);
T
Thomas Gleixner 已提交
365 366
}

367
#define TLB_FLUSH_ALL	-1UL
T
Thomas Gleixner 已提交
368 369 370 371 372 373 374 375 376

/*
 * TLB flushing:
 *
 *  - flush_tlb_all() flushes all processes TLBs
 *  - flush_tlb_mm(mm) flushes the specified mm context TLB's
 *  - flush_tlb_page(vma, vmaddr) flushes one page
 *  - flush_tlb_range(vma, start, end) flushes a range of pages
 *  - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
377
 *  - flush_tlb_others(cpumask, info) flushes TLBs on other cpus
T
Thomas Gleixner 已提交
378 379 380 381
 *
 * ..but the i386 has somewhat limited tlb flushing capabilities,
 * and page-granular flushes are available only on i486 and up.
 */
382
struct flush_tlb_info {
383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402
	/*
	 * We support several kinds of flushes.
	 *
	 * - Fully flush a single mm.  .mm will be set, .end will be
	 *   TLB_FLUSH_ALL, and .new_tlb_gen will be the tlb_gen to
	 *   which the IPI sender is trying to catch us up.
	 *
	 * - Partially flush a single mm.  .mm will be set, .start and
	 *   .end will indicate the range, and .new_tlb_gen will be set
	 *   such that the changes between generation .new_tlb_gen-1 and
	 *   .new_tlb_gen are entirely contained in the indicated range.
	 *
	 * - Fully flush all mms whose tlb_gens have been updated.  .mm
	 *   will be NULL, .end will be TLB_FLUSH_ALL, and .new_tlb_gen
	 *   will be zero.
	 */
	struct mm_struct	*mm;
	unsigned long		start;
	unsigned long		end;
	u64			new_tlb_gen;
403 404
};

T
Thomas Gleixner 已提交
405 406
#define local_flush_tlb() __flush_tlb()

407 408 409 410 411
#define flush_tlb_mm(mm)	flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL)

#define flush_tlb_range(vma, start, end)	\
		flush_tlb_mm_range(vma->vm_mm, start, end, vma->vm_flags)

T
Thomas Gleixner 已提交
412
extern void flush_tlb_all(void);
413 414
extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
				unsigned long end, unsigned long vmflag);
415
extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
T
Thomas Gleixner 已提交
416

417 418 419 420 421
static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a)
{
	flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, VM_NONE);
}

422
void native_flush_tlb_others(const struct cpumask *cpumask,
423
			     const struct flush_tlb_info *info);
T
Thomas Gleixner 已提交
424

425 426 427
static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch,
					struct mm_struct *mm)
{
428
	inc_mm_tlb_gen(mm);
429 430 431 432 433
	cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm));
}

extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);

T
Thomas Gleixner 已提交
434
#ifndef CONFIG_PARAVIRT
435 436
#define flush_tlb_others(mask, info)	\
	native_flush_tlb_others(mask, info)
437
#endif
T
Thomas Gleixner 已提交
438

H
H. Peter Anvin 已提交
439
#endif /* _ASM_X86_TLBFLUSH_H */