nospec-branch.h 9.7 KB
Newer Older
1 2
/* SPDX-License-Identifier: GPL-2.0 */

3 4
#ifndef _ASM_X86_NOSPEC_BRANCH_H_
#define _ASM_X86_NOSPEC_BRANCH_H_
5 6 7 8

#include <asm/alternative.h>
#include <asm/alternative-asm.h>
#include <asm/cpufeatures.h>
9
#include <asm/msr-index.h>
10

11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54
/*
 * Fill the CPU return stack buffer.
 *
 * Each entry in the RSB, if used for a speculative 'ret', contains an
 * infinite 'pause; lfence; jmp' loop to capture speculative execution.
 *
 * This is required in various cases for retpoline and IBRS-based
 * mitigations for the Spectre variant 2 vulnerability. Sometimes to
 * eliminate potentially bogus entries from the RSB, and sometimes
 * purely to ensure that it doesn't get empty, which on some CPUs would
 * allow predictions from other (unwanted!) sources to be used.
 *
 * We define a CPP macro such that it can be used from both .S files and
 * inline assembly. It's possible to do a .macro and then include that
 * from C via asm(".include <asm/nospec-branch.h>") but let's not go there.
 */

#define RSB_CLEAR_LOOPS		32	/* To forcibly overwrite all entries */
#define RSB_FILL_LOOPS		16	/* To avoid underflow */

/*
 * Google experimented with loop-unrolling and this turned out to be
 * the optimal version — two calls, each with their own speculation
 * trap should their return address end up getting used, in a loop.
 */
#define __FILL_RETURN_BUFFER(reg, nr, sp)	\
	mov	$(nr/2), reg;			\
771:						\
	call	772f;				\
773:	/* speculation trap */			\
	pause;					\
	lfence;					\
	jmp	773b;				\
772:						\
	call	774f;				\
775:	/* speculation trap */			\
	pause;					\
	lfence;					\
	jmp	775b;				\
774:						\
	dec	reg;				\
	jnz	771b;				\
	add	$(BITS_PER_LONG/8) * nr, sp;

55 56 57 58 59 60 61 62 63 64 65 66 67 68 69
#ifdef __ASSEMBLY__

/*
 * This should be used immediately before a retpoline alternative.  It tells
 * objtool where the retpolines are so that it can make sense of the control
 * flow by just reading the original instruction(s) and ignoring the
 * alternatives.
 */
.macro ANNOTATE_NOSPEC_ALTERNATIVE
	.Lannotate_\@:
	.pushsection .discard.nospec
	.long .Lannotate_\@ - .
	.popsection
.endm

70 71 72 73 74 75 76 77 78 79 80 81
/*
 * This should be used immediately before an indirect jump/call. It tells
 * objtool the subsequent indirect jump/call is vouched safe for retpoline
 * builds.
 */
.macro ANNOTATE_RETPOLINE_SAFE
	.Lannotate_\@:
	.pushsection .discard.retpoline_safe
	_ASM_PTR .Lannotate_\@
	.popsection
.endm

82 83 84 85 86 87 88 89 90
/*
 * These are the bare retpoline primitives for indirect jmp and call.
 * Do not use these directly; they only exist to make the ALTERNATIVE
 * invocation below less ugly.
 */
.macro RETPOLINE_JMP reg:req
	call	.Ldo_rop_\@
.Lspec_trap_\@:
	pause
91
	lfence
92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117
	jmp	.Lspec_trap_\@
.Ldo_rop_\@:
	mov	\reg, (%_ASM_SP)
	ret
.endm

/*
 * This is a wrapper around RETPOLINE_JMP so the called function in reg
 * returns to the instruction after the macro.
 */
.macro RETPOLINE_CALL reg:req
	jmp	.Ldo_call_\@
.Ldo_retpoline_jmp_\@:
	RETPOLINE_JMP \reg
.Ldo_call_\@:
	call	.Ldo_retpoline_jmp_\@
.endm

/*
 * JMP_NOSPEC and CALL_NOSPEC macros can be used instead of a simple
 * indirect jmp/call which may be susceptible to the Spectre variant 2
 * attack.
 */
.macro JMP_NOSPEC reg:req
#ifdef CONFIG_RETPOLINE
	ANNOTATE_NOSPEC_ALTERNATIVE
118
	ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; jmp *\reg),	\
119
		__stringify(RETPOLINE_JMP \reg), X86_FEATURE_RETPOLINE,	\
120
		__stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *\reg), X86_FEATURE_RETPOLINE_AMD
121 122 123 124 125 126 127 128
#else
	jmp	*\reg
#endif
.endm

.macro CALL_NOSPEC reg:req
#ifdef CONFIG_RETPOLINE
	ANNOTATE_NOSPEC_ALTERNATIVE
129
	ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; call *\reg),	\
130
		__stringify(RETPOLINE_CALL \reg), X86_FEATURE_RETPOLINE,\
131
		__stringify(lfence; ANNOTATE_RETPOLINE_SAFE; call *\reg), X86_FEATURE_RETPOLINE_AMD
132 133 134
#else
	call	*\reg
#endif
135 136
.endm

137 138 139 140 141
 /*
  * A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP
  * monstrosity above, manually.
  */
.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req
142
#ifdef CONFIG_RETPOLINE
143 144 145 146 147
	ANNOTATE_NOSPEC_ALTERNATIVE
	ALTERNATIVE "jmp .Lskip_rsb_\@",				\
		__stringify(__FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP))	\
		\ftr
.Lskip_rsb_\@:
148
#endif
149 150 151 152 153 154 155 156 157 158
.endm

#else /* __ASSEMBLY__ */

#define ANNOTATE_NOSPEC_ALTERNATIVE				\
	"999:\n\t"						\
	".pushsection .discard.nospec\n\t"			\
	".long 999b - .\n\t"					\
	".popsection\n\t"

159 160 161 162 163 164
#define ANNOTATE_RETPOLINE_SAFE					\
	"999:\n\t"						\
	".pushsection .discard.retpoline_safe\n\t"		\
	_ASM_PTR " 999b\n\t"					\
	".popsection\n\t"

165 166
#ifdef CONFIG_RETPOLINE
#ifdef CONFIG_X86_64
167 168

/*
169 170
 * Inline asm uses the %V modifier which is only in newer GCC
 * which is ensured when CONFIG_RETPOLINE is defined.
171 172 173
 */
# define CALL_NOSPEC						\
	ANNOTATE_NOSPEC_ALTERNATIVE				\
174
	ALTERNATIVE_2(						\
175
	ANNOTATE_RETPOLINE_SAFE					\
176 177
	"call *%[thunk_target]\n",				\
	"call __x86_indirect_thunk_%V[thunk_target]\n",		\
178 179 180 181 182
	X86_FEATURE_RETPOLINE,					\
	"lfence;\n"						\
	ANNOTATE_RETPOLINE_SAFE					\
	"call *%[thunk_target]\n",				\
	X86_FEATURE_RETPOLINE_AMD)
183 184
# define THUNK_TARGET(addr) [thunk_target] "r" (addr)

185
#else /* CONFIG_X86_32 */
186 187 188 189 190
/*
 * For i386 we use the original ret-equivalent retpoline, because
 * otherwise we'll run out of registers. We don't care about CET
 * here, anyway.
 */
191
# define CALL_NOSPEC						\
192 193
	ANNOTATE_NOSPEC_ALTERNATIVE				\
	ALTERNATIVE_2(						\
194 195
	ANNOTATE_RETPOLINE_SAFE					\
	"call *%[thunk_target]\n",				\
196 197 198 199
	"       jmp    904f;\n"					\
	"       .align 16\n"					\
	"901:	call   903f;\n"					\
	"902:	pause;\n"					\
200
	"    	lfence;\n"					\
201 202 203 204 205 206 207
	"       jmp    902b;\n"					\
	"       .align 16\n"					\
	"903:	addl   $4, %%esp;\n"				\
	"       pushl  %[thunk_target];\n"			\
	"       ret;\n"						\
	"       .align 16\n"					\
	"904:	call   901b;\n",				\
208 209 210 211 212
	X86_FEATURE_RETPOLINE,					\
	"lfence;\n"						\
	ANNOTATE_RETPOLINE_SAFE					\
	"call *%[thunk_target]\n",				\
	X86_FEATURE_RETPOLINE_AMD)
213 214

# define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
215
#endif
216
#else /* No retpoline for C / inline asm */
217 218 219 220
# define CALL_NOSPEC "call *%[thunk_target]\n"
# define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
#endif

221 222 223 224 225
/* The Spectre V2 mitigation variants */
enum spectre_v2_mitigation {
	SPECTRE_V2_NONE,
	SPECTRE_V2_RETPOLINE_GENERIC,
	SPECTRE_V2_RETPOLINE_AMD,
226
	SPECTRE_V2_IBRS_ENHANCED,
227 228
};

229 230 231 232
/* The Speculative Store Bypass disable variants */
enum ssb_mitigation {
	SPEC_STORE_BYPASS_NONE,
	SPEC_STORE_BYPASS_DISABLE,
233
	SPEC_STORE_BYPASS_PRCTL,
234
	SPEC_STORE_BYPASS_SECCOMP,
235 236
};

237 238 239
extern char __indirect_thunk_start[];
extern char __indirect_thunk_end[];

240 241 242 243
/*
 * On VMEXIT we must ensure that no RSB predictions learned in the guest
 * can be followed in the host, by overwriting the RSB completely. Both
 * retpoline and IBRS mitigations for Spectre v2 need this; only on future
244
 * CPUs with IBRS_ALL *might* it be avoided.
245 246 247 248
 */
static inline void vmexit_fill_RSB(void)
{
#ifdef CONFIG_RETPOLINE
249 250 251 252 253 254 255 256 257
	unsigned long loops;

	asm volatile (ANNOTATE_NOSPEC_ALTERNATIVE
		      ALTERNATIVE("jmp 910f",
				  __stringify(__FILL_RETURN_BUFFER(%0, RSB_CLEAR_LOOPS, %1)),
				  X86_FEATURE_RETPOLINE)
		      "910:"
		      : "=r" (loops), ASM_CALL_CONSTRAINT
		      : : "memory" );
258 259
#endif
}
260

261 262 263 264 265
static __always_inline
void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature)
{
	asm volatile(ALTERNATIVE("", "wrmsr", %c[feature])
		: : "c" (msr),
266 267
		    "a" ((u32)val),
		    "d" ((u32)(val >> 32)),
268 269 270
		    [feature] "i" (feature)
		: "memory");
}
271

272 273
static inline void indirect_branch_prediction_barrier(void)
{
274 275 276
	u64 val = PRED_CMD_IBPB;

	alternative_msr_write(MSR_IA32_PRED_CMD, val, X86_FEATURE_USE_IBPB);
277 278
}

279 280 281
/* The Intel SPEC CTRL MSR base value cache */
extern u64 x86_spec_ctrl_base;

282 283 284
/*
 * With retpoline, we must use IBRS to restrict branch prediction
 * before calling into firmware.
285 286
 *
 * (Implemented as CPP macros due to header hell.)
287
 */
288 289
#define firmware_restrict_branch_speculation_start()			\
do {									\
290
	u64 val = x86_spec_ctrl_base | SPEC_CTRL_IBRS;			\
291
									\
292
	preempt_disable();						\
293
	alternative_msr_write(MSR_IA32_SPEC_CTRL, val,			\
294 295
			      X86_FEATURE_USE_IBRS_FW);			\
} while (0)
296

297 298
#define firmware_restrict_branch_speculation_end()			\
do {									\
299
	u64 val = x86_spec_ctrl_base;					\
300 301
									\
	alternative_msr_write(MSR_IA32_SPEC_CTRL, val,			\
302 303 304
			      X86_FEATURE_USE_IBRS_FW);			\
	preempt_enable();						\
} while (0)
305

306
#endif /* __ASSEMBLY__ */
307 308 309 310 311 312 313 314 315 316 317 318 319

/*
 * Below is used in the eBPF JIT compiler and emits the byte sequence
 * for the following assembly:
 *
 * With retpolines configured:
 *
 *    callq do_rop
 *  spec_trap:
 *    pause
 *    lfence
 *    jmp spec_trap
 *  do_rop:
320 321
 *    mov %rax,(%rsp) for x86_64
 *    mov %edx,(%esp) for x86_32
322 323 324 325
 *    retq
 *
 * Without retpolines configured:
 *
326 327
 *    jmp *%rax for x86_64
 *    jmp *%edx for x86_32
328 329
 */
#ifdef CONFIG_RETPOLINE
330 331 332
# ifdef CONFIG_X86_64
#  define RETPOLINE_RAX_BPF_JIT_SIZE	17
#  define RETPOLINE_RAX_BPF_JIT()				\
333
do {								\
334 335 336 337 338 339 340
	EMIT1_off32(0xE8, 7);	 /* callq do_rop */		\
	/* spec_trap: */					\
	EMIT2(0xF3, 0x90);       /* pause */			\
	EMIT3(0x0F, 0xAE, 0xE8); /* lfence */			\
	EMIT2(0xEB, 0xF9);       /* jmp spec_trap */		\
	/* do_rop: */						\
	EMIT4(0x48, 0x89, 0x04, 0x24); /* mov %rax,(%rsp) */	\
341 342
	EMIT1(0xC3);             /* retq */			\
} while (0)
343 344
# else /* !CONFIG_X86_64 */
#  define RETPOLINE_EDX_BPF_JIT()				\
345 346 347 348 349 350 351 352 353 354
do {								\
	EMIT1_off32(0xE8, 7);	 /* call do_rop */		\
	/* spec_trap: */					\
	EMIT2(0xF3, 0x90);       /* pause */			\
	EMIT3(0x0F, 0xAE, 0xE8); /* lfence */			\
	EMIT2(0xEB, 0xF9);       /* jmp spec_trap */		\
	/* do_rop: */						\
	EMIT3(0x89, 0x14, 0x24); /* mov %edx,(%esp) */		\
	EMIT1(0xC3);             /* ret */			\
} while (0)
355
# endif
356
#else /* !CONFIG_RETPOLINE */
357 358 359 360 361 362 363 364
# ifdef CONFIG_X86_64
#  define RETPOLINE_RAX_BPF_JIT_SIZE	2
#  define RETPOLINE_RAX_BPF_JIT()				\
	EMIT2(0xFF, 0xE0);       /* jmp *%rax */
# else /* !CONFIG_X86_64 */
#  define RETPOLINE_EDX_BPF_JIT()				\
	EMIT2(0xFF, 0xE2)        /* jmp *%edx */
# endif
365 366
#endif

367
#endif /* _ASM_X86_NOSPEC_BRANCH_H_ */