tlbflush.h 14.6 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
H
H. Peter Anvin 已提交
2 3
#ifndef _ASM_X86_TLBFLUSH_H
#define _ASM_X86_TLBFLUSH_H
T
Thomas Gleixner 已提交
4 5 6 7 8

#include <linux/mm.h>
#include <linux/sched.h>

#include <asm/processor.h>
9
#include <asm/cpufeature.h>
10
#include <asm/special_insns.h>
11
#include <asm/smp.h>
P
Peter Zijlstra 已提交
12
#include <asm/invpcid.h>
13 14
#include <asm/pti.h>
#include <asm/processor-flags.h>
A
Andy Lutomirski 已提交
15

16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42
/*
 * The x86 feature is called PCID (Process Context IDentifier). It is similar
 * to what is traditionally called ASID on the RISC processors.
 *
 * We don't use the traditional ASID implementation, where each process/mm gets
 * its own ASID and flush/restart when we run out of ASID space.
 *
 * Instead we have a small per-cpu array of ASIDs and cache the last few mm's
 * that came by on this CPU, allowing cheaper switch_mm between processes on
 * this CPU.
 *
 * We end up with different spaces for different things. To avoid confusion we
 * use different names for each of them:
 *
 * ASID  - [0, TLB_NR_DYN_ASIDS-1]
 *         the canonical identifier for an mm
 *
 * kPCID - [1, TLB_NR_DYN_ASIDS]
 *         the value we write into the PCID part of CR3; corresponds to the
 *         ASID+1, because PCID 0 is special.
 *
 * uPCID - [2048 + 1, 2048 + TLB_NR_DYN_ASIDS]
 *         for KPTI each mm has two address spaces and thus needs two
 *         PCID values, but we can still do with a single ASID denomination
 *         for each mm. Corresponds to kPCID + 2048.
 *
 */
43

44 45
/* There are 12 bits of space for ASIDS in CR3 */
#define CR3_HW_ASID_BITS		12
46

47 48 49 50
/*
 * When enabled, PAGE_TABLE_ISOLATION consumes a single bit for
 * user/kernel switches
 */
51 52 53 54 55 56 57
#ifdef CONFIG_PAGE_TABLE_ISOLATION
# define PTI_CONSUMED_PCID_BITS	1
#else
# define PTI_CONSUMED_PCID_BITS	0
#endif

#define CR3_AVAIL_PCID_BITS (X86_CR3_PCID_BITS - PTI_CONSUMED_PCID_BITS)
58 59 60

/*
 * ASIDs are zero-based: 0->MAX_AVAIL_ASID are valid.  -1 below to account
61
 * for them being zero-based.  Another -1 is because PCID 0 is reserved for
62 63
 * use by non-PCID-aware users.
 */
64 65 66 67 68 69 70
#define MAX_ASID_AVAILABLE ((1 << CR3_AVAIL_PCID_BITS) - 2)

/*
 * 6 because 6 should be plenty and struct tlb_state will fit in two cache
 * lines.
 */
#define TLB_NR_DYN_ASIDS	6
71

72 73 74
/*
 * Given @asid, compute kPCID
 */
75 76 77
static inline u16 kern_pcid(u16 asid)
{
	VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE);
78 79

#ifdef CONFIG_PAGE_TABLE_ISOLATION
80
	/*
81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96
	 * Make sure that the dynamic ASID space does not confict with the
	 * bit we are using to switch between user and kernel ASIDs.
	 */
	BUILD_BUG_ON(TLB_NR_DYN_ASIDS >= (1 << X86_CR3_PTI_SWITCH_BIT));

	/*
	 * The ASID being passed in here should have respected the
	 * MAX_ASID_AVAILABLE and thus never have the switch bit set.
	 */
	VM_WARN_ON_ONCE(asid & (1 << X86_CR3_PTI_SWITCH_BIT));
#endif
	/*
	 * The dynamically-assigned ASIDs that get passed in are small
	 * (<TLB_NR_DYN_ASIDS).  They never have the high switch bit set,
	 * so do not bother to clear it.
	 *
97 98 99 100 101 102 103 104 105 106 107
	 * If PCID is on, ASID-aware code paths put the ASID+1 into the
	 * PCID bits.  This serves two purposes.  It prevents a nasty
	 * situation in which PCID-unaware code saves CR3, loads some other
	 * value (with PCID == 0), and then restores CR3, thus corrupting
	 * the TLB for ASID 0 if the saved ASID was nonzero.  It also means
	 * that any bugs involving loading a PCID-enabled CR3 with
	 * CR4.PCIDE off will trigger deterministically.
	 */
	return asid + 1;
}

108
/*
109
 * Given @asid, compute uPCID
110 111 112 113 114 115 116 117 118 119
 */
static inline u16 user_pcid(u16 asid)
{
	u16 ret = kern_pcid(asid);
#ifdef CONFIG_PAGE_TABLE_ISOLATION
	ret |= 1 << X86_CR3_PTI_SWITCH_BIT;
#endif
	return ret;
}

120 121 122 123
struct pgd_t;
static inline unsigned long build_cr3(pgd_t *pgd, u16 asid)
{
	if (static_cpu_has(X86_FEATURE_PCID)) {
124
		return __sme_pa(pgd) | kern_pcid(asid);
125 126 127 128 129 130 131 132
	} else {
		VM_WARN_ON_ONCE(asid != 0);
		return __sme_pa(pgd);
	}
}

static inline unsigned long build_cr3_noflush(pgd_t *pgd, u16 asid)
{
133
	VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE);
134 135
	VM_WARN_ON_ONCE(!this_cpu_has(X86_FEATURE_PCID));
	return __sme_pa(pgd) | kern_pcid(asid) | CR3_NOFLUSH;
136 137
}

T
Thomas Gleixner 已提交
138 139 140 141 142 143 144 145
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
#else
#define __flush_tlb() __native_flush_tlb()
#define __flush_tlb_global() __native_flush_tlb_global()
#define __flush_tlb_single(addr) __native_flush_tlb_single(addr)
#endif

146 147
static inline bool tlb_defer_switch_to_init_mm(void)
{
148 149 150 151 152 153 154 155 156 157 158 159
	/*
	 * If we have PCID, then switching to init_mm is reasonably
	 * fast.  If we don't have PCID, then switching to init_mm is
	 * quite slow, so we try to defer it in the hopes that we can
	 * avoid it entirely.  The latter approach runs the risk of
	 * receiving otherwise unnecessary IPIs.
	 *
	 * This choice is just a heuristic.  The tlb code can handle this
	 * function returning true or false regardless of whether we have
	 * PCID.
	 */
	return !static_cpu_has(X86_FEATURE_PCID);
160
}
161

162 163 164 165 166
struct tlb_context {
	u64 ctx_id;
	u64 tlb_gen;
};

167
struct tlb_state {
168 169 170 171 172 173 174
	/*
	 * cpu_tlbstate.loaded_mm should match CR3 whenever interrupts
	 * are on.  This means that it may not match current->active_mm,
	 * which will contain the previous user mm when we're in lazy TLB
	 * mode even if we've already switched back to swapper_pg_dir.
	 */
	struct mm_struct *loaded_mm;
175 176
	u16 loaded_mm_asid;
	u16 next_asid;
177

178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194
	/*
	 * We can be in one of several states:
	 *
	 *  - Actively using an mm.  Our CPU's bit will be set in
	 *    mm_cpumask(loaded_mm) and is_lazy == false;
	 *
	 *  - Not using a real mm.  loaded_mm == &init_mm.  Our CPU's bit
	 *    will not be set in mm_cpumask(&init_mm) and is_lazy == false.
	 *
	 *  - Lazily using a real mm.  loaded_mm != &init_mm, our bit
	 *    is set in mm_cpumask(loaded_mm), but is_lazy == true.
	 *    We're heuristically guessing that the CR3 load we
	 *    skipped more than makes up for the overhead added by
	 *    lazy mode.
	 */
	bool is_lazy;

195 196 197 198 199 200 201 202 203 204 205
	/*
	 * If set we changed the page tables in such a way that we
	 * needed an invalidation of all contexts (aka. PCIDs / ASIDs).
	 * This tells us to go invalidate all the non-loaded ctxs[]
	 * on the next context switch.
	 *
	 * The current ctx was kept up-to-date as it ran and does not
	 * need to be invalidated.
	 */
	bool invalidate_other;

206 207 208 209 210 211 212
	/*
	 * Mask that contains TLB_NR_DYN_ASIDS+1 bits to indicate
	 * the corresponding user PCID needs a flush next time we
	 * switch to it; see SWITCH_TO_USER_CR3.
	 */
	unsigned short user_pcid_flush_mask;

213 214 215 216 217
	/*
	 * Access to this CR4 shadow and to H/W CR4 is protected by
	 * disabling interrupts when modifying either one.
	 */
	unsigned long cr4;
218 219 220

	/*
	 * This is a list of all contexts that might exist in the TLB.
221 222
	 * There is one per ASID that we use, and the ASID (what the
	 * CPU calls PCID) is the index into ctxts.
223 224 225 226 227 228 229 230 231
	 *
	 * For each context, ctx_id indicates which mm the TLB's user
	 * entries came from.  As an invariant, the TLB will never
	 * contain entries that are out-of-date as when that mm reached
	 * the tlb_gen in the list.
	 *
	 * To be clear, this means that it's legal for the TLB code to
	 * flush the TLB without updating tlb_gen.  This can happen
	 * (for now, at least) due to paravirt remote flushes.
232 233 234 235 236
	 *
	 * NB: context 0 is a bit special, since it's also used by
	 * various bits of init code.  This is fine -- code that
	 * isn't aware of PCID will end up harmlessly flushing
	 * context 0.
237
	 */
238
	struct tlb_context ctxs[TLB_NR_DYN_ASIDS];
239 240 241 242 243 244
};
DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);

/* Initialize cr4 shadow for this CPU. */
static inline void cr4_init_shadow(void)
{
245
	this_cpu_write(cpu_tlbstate.cr4, __read_cr4());
246 247
}

A
Andy Lutomirski 已提交
248 249 250 251 252
/* Set in this cpu's CR4. */
static inline void cr4_set_bits(unsigned long mask)
{
	unsigned long cr4;

253 254 255 256 257 258
	cr4 = this_cpu_read(cpu_tlbstate.cr4);
	if ((cr4 | mask) != cr4) {
		cr4 |= mask;
		this_cpu_write(cpu_tlbstate.cr4, cr4);
		__write_cr4(cr4);
	}
A
Andy Lutomirski 已提交
259 260 261 262 263 264 265
}

/* Clear in this cpu's CR4. */
static inline void cr4_clear_bits(unsigned long mask)
{
	unsigned long cr4;

266 267 268 269 270 271 272 273
	cr4 = this_cpu_read(cpu_tlbstate.cr4);
	if ((cr4 & ~mask) != cr4) {
		cr4 &= ~mask;
		this_cpu_write(cpu_tlbstate.cr4, cr4);
		__write_cr4(cr4);
	}
}

274 275 276 277 278 279 280 281 282 283
static inline void cr4_toggle_bits(unsigned long mask)
{
	unsigned long cr4;

	cr4 = this_cpu_read(cpu_tlbstate.cr4);
	cr4 ^= mask;
	this_cpu_write(cpu_tlbstate.cr4, cr4);
	__write_cr4(cr4);
}

284 285 286 287
/* Read the CR4 shadow. */
static inline unsigned long cr4_read_shadow(void)
{
	return this_cpu_read(cpu_tlbstate.cr4);
A
Andy Lutomirski 已提交
288 289
}

290 291 292 293 294 295 296 297
/*
 * Mark all other ASIDs as invalid, preserves the current.
 */
static inline void invalidate_other_asid(void)
{
	this_cpu_write(cpu_tlbstate.invalidate_other, true);
}

A
Andy Lutomirski 已提交
298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314
/*
 * Save some of cr4 feature set we're using (e.g.  Pentium 4MB
 * enable and PPro Global page enable), so that any CPU's that boot
 * up after us can get the correct flags.  This should only be used
 * during boot on the boot cpu.
 */
extern unsigned long mmu_cr4_features;
extern u32 *trampoline_cr4_features;

static inline void cr4_set_bits_and_update_boot(unsigned long mask)
{
	mmu_cr4_features |= mask;
	if (trampoline_cr4_features)
		*trampoline_cr4_features = mmu_cr4_features;
	cr4_set_bits(mask);
}

315 316
extern void initialize_tlbstate_and_flush(void);

317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342
/*
 * Given an ASID, flush the corresponding user ASID.  We can delay this
 * until the next time we switch to it.
 *
 * See SWITCH_TO_USER_CR3.
 */
static inline void invalidate_user_asid(u16 asid)
{
	/* There is no user ASID if address space separation is off */
	if (!IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION))
		return;

	/*
	 * We only have a single ASID if PCID is off and the CR3
	 * write will have flushed it.
	 */
	if (!cpu_feature_enabled(X86_FEATURE_PCID))
		return;

	if (!static_cpu_has(X86_FEATURE_PTI))
		return;

	__set_bit(kern_pcid(asid),
		  (unsigned long *)this_cpu_ptr(&cpu_tlbstate.user_pcid_flush_mask));
}

343 344 345
/*
 * flush the entire current user mapping
 */
T
Thomas Gleixner 已提交
346 347
static inline void __native_flush_tlb(void)
{
348
	/*
349 350 351
	 * Preemption or interrupts must be disabled to protect the access
	 * to the per CPU variable and to prevent being preempted between
	 * read_cr3() and write_cr3().
352
	 */
353 354 355 356 357
	WARN_ON_ONCE(preemptible());

	invalidate_user_asid(this_cpu_read(cpu_tlbstate.loaded_mm_asid));

	/* If current->mm == NULL then the read_cr3() "borrows" an mm */
358
	native_write_cr3(__native_read_cr3());
T
Thomas Gleixner 已提交
359 360
}

361 362 363
/*
 * flush everything
 */
T
Thomas Gleixner 已提交
364 365
static inline void __native_flush_tlb_global(void)
{
366
	unsigned long cr4, flags;
T
Thomas Gleixner 已提交
367

368 369 370 371
	if (static_cpu_has(X86_FEATURE_INVPCID)) {
		/*
		 * Using INVPCID is considerably faster than a pair of writes
		 * to CR4 sandwiched inside an IRQ flag save/restore.
372 373
		 *
		 * Note, this works with CR4.PCIDE=0 or 1.
374 375 376 377 378
		 */
		invpcid_flush_all();
		return;
	}

379 380 381 382 383 384 385
	/*
	 * Read-modify-write to CR4 - protect it from preemption and
	 * from interrupts. (Use the raw variant because this code can
	 * be called from deep inside debugging code.)
	 */
	raw_local_irq_save(flags);

386 387 388 389 390
	cr4 = this_cpu_read(cpu_tlbstate.cr4);
	/* toggle PGE */
	native_write_cr4(cr4 ^ X86_CR4_PGE);
	/* write old PGE again and flush TLBs */
	native_write_cr4(cr4);
391 392

	raw_local_irq_restore(flags);
T
Thomas Gleixner 已提交
393 394
}

395 396 397
/*
 * flush one page in the user mapping
 */
T
Thomas Gleixner 已提交
398 399
static inline void __native_flush_tlb_single(unsigned long addr)
{
400 401
	u32 loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);

402
	asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
403 404 405 406

	if (!static_cpu_has(X86_FEATURE_PTI))
		return;

407 408 409 410 411 412 413 414
	/*
	 * Some platforms #GP if we call invpcid(type=1/2) before CR4.PCIDE=1.
	 * Just use invalidate_user_asid() in case we are called early.
	 */
	if (!this_cpu_has(X86_FEATURE_INVPCID_SINGLE))
		invalidate_user_asid(loaded_mm_asid);
	else
		invpcid_flush_one(user_pcid(loaded_mm_asid), addr);
T
Thomas Gleixner 已提交
415 416
}

417 418 419
/*
 * flush everything
 */
T
Thomas Gleixner 已提交
420 421
static inline void __flush_tlb_all(void)
{
422
	if (boot_cpu_has(X86_FEATURE_PGE)) {
T
Thomas Gleixner 已提交
423
		__flush_tlb_global();
424 425 426 427
	} else {
		/*
		 * !PGE -> !PCID (setup_pcid()), thus every flush is total.
		 */
T
Thomas Gleixner 已提交
428
		__flush_tlb();
429
	}
T
Thomas Gleixner 已提交
430 431
}

432 433 434
/*
 * flush one page in the kernel mapping
 */
T
Thomas Gleixner 已提交
435 436
static inline void __flush_tlb_one(unsigned long addr)
{
437
	count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
438
	__flush_tlb_single(addr);
439 440 441 442 443 444 445 446 447 448

	if (!static_cpu_has(X86_FEATURE_PTI))
		return;

	/*
	 * __flush_tlb_single() will have cleared the TLB entry for this ASID,
	 * but since kernel space is replicated across all, we must also
	 * invalidate all others.
	 */
	invalidate_other_asid();
T
Thomas Gleixner 已提交
449 450
}

451
#define TLB_FLUSH_ALL	-1UL
T
Thomas Gleixner 已提交
452 453 454 455 456 457 458 459 460

/*
 * TLB flushing:
 *
 *  - flush_tlb_all() flushes all processes TLBs
 *  - flush_tlb_mm(mm) flushes the specified mm context TLB's
 *  - flush_tlb_page(vma, vmaddr) flushes one page
 *  - flush_tlb_range(vma, start, end) flushes a range of pages
 *  - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
461
 *  - flush_tlb_others(cpumask, info) flushes TLBs on other cpus
T
Thomas Gleixner 已提交
462 463 464 465
 *
 * ..but the i386 has somewhat limited tlb flushing capabilities,
 * and page-granular flushes are available only on i486 and up.
 */
466
struct flush_tlb_info {
467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486
	/*
	 * We support several kinds of flushes.
	 *
	 * - Fully flush a single mm.  .mm will be set, .end will be
	 *   TLB_FLUSH_ALL, and .new_tlb_gen will be the tlb_gen to
	 *   which the IPI sender is trying to catch us up.
	 *
	 * - Partially flush a single mm.  .mm will be set, .start and
	 *   .end will indicate the range, and .new_tlb_gen will be set
	 *   such that the changes between generation .new_tlb_gen-1 and
	 *   .new_tlb_gen are entirely contained in the indicated range.
	 *
	 * - Fully flush all mms whose tlb_gens have been updated.  .mm
	 *   will be NULL, .end will be TLB_FLUSH_ALL, and .new_tlb_gen
	 *   will be zero.
	 */
	struct mm_struct	*mm;
	unsigned long		start;
	unsigned long		end;
	u64			new_tlb_gen;
487 488
};

T
Thomas Gleixner 已提交
489 490
#define local_flush_tlb() __flush_tlb()

491 492 493 494 495
#define flush_tlb_mm(mm)	flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL)

#define flush_tlb_range(vma, start, end)	\
		flush_tlb_mm_range(vma->vm_mm, start, end, vma->vm_flags)

T
Thomas Gleixner 已提交
496
extern void flush_tlb_all(void);
497 498
extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
				unsigned long end, unsigned long vmflag);
499
extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
T
Thomas Gleixner 已提交
500

501 502 503 504 505
static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a)
{
	flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, VM_NONE);
}

506
void native_flush_tlb_others(const struct cpumask *cpumask,
507
			     const struct flush_tlb_info *info);
T
Thomas Gleixner 已提交
508

509 510 511 512 513 514 515 516 517 518 519
static inline u64 inc_mm_tlb_gen(struct mm_struct *mm)
{
	/*
	 * Bump the generation count.  This also serves as a full barrier
	 * that synchronizes with switch_mm(): callers are required to order
	 * their read of mm_cpumask after their writes to the paging
	 * structures.
	 */
	return atomic64_inc_return(&mm->context.tlb_gen);
}

520 521 522
static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch,
					struct mm_struct *mm)
{
523
	inc_mm_tlb_gen(mm);
524 525 526 527 528
	cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm));
}

extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);

T
Thomas Gleixner 已提交
529
#ifndef CONFIG_PARAVIRT
530 531
#define flush_tlb_others(mask, info)	\
	native_flush_tlb_others(mask, info)
532
#endif
T
Thomas Gleixner 已提交
533

H
H. Peter Anvin 已提交
534
#endif /* _ASM_X86_TLBFLUSH_H */