提交 0c60eeb4 编写于 作者: P Peter Zijlstra 提交者: Zheng Zengkai

x86/retbleed: Add fine grained Kconfig knobs

stable inclusion
from stable-v5.10.133
commit b24fdd0f1c3328cf8ee0c518b93a7187f8cee097
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I5PTAS
CVE: CVE-2022-29900,CVE-2022-23816,CVE-2022-29901

Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id=b24fdd0f1c3328cf8ee0c518b93a7187f8cee097

--------------------------------

commit f43b9876 upstream.

Do fine-grained Kconfig for all the various retbleed parts.

NOTE: if your compiler doesn't support return thunks this will
silently 'upgrade' your mitigation to IBPB, you might not like this.
Signed-off-by: NPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: NBorislav Petkov <bp@suse.de>
[cascardo: there is no CONFIG_OBJTOOL]
[cascardo: objtool calling and option parsing has changed]
Signed-off-by: NThadeu Lima de Souza Cascardo <cascardo@canonical.com>
[bwh: Backported to 5.10:
 - In scripts/Makefile.build, add the objtool option with an ifdef
   block, same as for other options
 - Adjust filename, context]
Signed-off-by: NBen Hutchings <ben@decadent.org.uk>
Signed-off-by: NGreg Kroah-Hartman <gregkh@linuxfoundation.org>

conflict:
	arch/x86/include/asm/disabled-features.h
Signed-off-by: NLin Yujun <linyujun809@huawei.com>
Reviewed-by: NZhang Jianhua <chris.zjh@huawei.com>
Signed-off-by: NZheng Zengkai <zhengzengkai@huawei.com>
上级 000942fb
......@@ -672,14 +672,18 @@ endif
ifdef CONFIG_CC_IS_GCC
RETPOLINE_CFLAGS := $(call cc-option,-mindirect-branch=thunk-extern -mindirect-branch-register)
RETPOLINE_CFLAGS += $(call cc-option,-mfunction-return=thunk-extern)
RETPOLINE_VDSO_CFLAGS := $(call cc-option,-mindirect-branch=thunk-inline -mindirect-branch-register)
endif
ifdef CONFIG_CC_IS_CLANG
RETPOLINE_CFLAGS := -mretpoline-external-thunk
RETPOLINE_VDSO_CFLAGS := -mretpoline
RETPOLINE_CFLAGS += $(call cc-option,-mfunction-return=thunk-extern)
endif
ifdef CONFIG_RETHUNK
RETHUNK_CFLAGS := -mfunction-return=thunk-extern
RETPOLINE_CFLAGS += $(RETHUNK_CFLAGS)
endif
export RETPOLINE_CFLAGS
export RETPOLINE_VDSO_CFLAGS
......
......@@ -461,30 +461,6 @@ config GOLDFISH
def_bool y
depends on X86_GOLDFISH
config RETPOLINE
bool "Avoid speculative indirect branches in kernel"
default y
help
Compile kernel with the retpoline compiler options to guard against
kernel-to-user data leaks by avoiding speculative indirect
branches. Requires a compiler with -mindirect-branch=thunk-extern
support for full protection. The kernel may run slower.
config CC_HAS_SLS
def_bool $(cc-option,-mharden-sls=all)
config CC_HAS_RETURN_THUNK
def_bool $(cc-option,-mfunction-return=thunk-extern)
config SLS
bool "Mitigate Straight-Line-Speculation"
depends on CC_HAS_SLS && X86_64
default n
help
Compile the kernel with straight-line-speculation options to guard
against straight line speculation. The kernel image might be slightly
larger.
config X86_CPU_RESCTRL
bool "x86 CPU resource control support"
depends on X86 && (CPU_SUP_INTEL || CPU_SUP_AMD)
......@@ -2471,6 +2447,88 @@ source "kernel/livepatch/Kconfig"
endmenu
config CC_HAS_SLS
def_bool $(cc-option,-mharden-sls=all)
config CC_HAS_RETURN_THUNK
def_bool $(cc-option,-mfunction-return=thunk-extern)
menuconfig SPECULATION_MITIGATIONS
bool "Mitigations for speculative execution vulnerabilities"
default y
help
Say Y here to enable options which enable mitigations for
speculative execution hardware vulnerabilities.
If you say N, all mitigations will be disabled. You really
should know what you are doing to say so.
if SPECULATION_MITIGATIONS
config PAGE_TABLE_ISOLATION
bool "Remove the kernel mapping in user mode"
default y
depends on (X86_64 || X86_PAE)
help
This feature reduces the number of hardware side channels by
ensuring that the majority of kernel addresses are not mapped
into userspace.
See Documentation/x86/pti.rst for more details.
config RETPOLINE
bool "Avoid speculative indirect branches in kernel"
default y
help
Compile kernel with the retpoline compiler options to guard against
kernel-to-user data leaks by avoiding speculative indirect
branches. Requires a compiler with -mindirect-branch=thunk-extern
support for full protection. The kernel may run slower.
config RETHUNK
bool "Enable return-thunks"
depends on RETPOLINE && CC_HAS_RETURN_THUNK
default y
help
Compile the kernel with the return-thunks compiler option to guard
against kernel-to-user data leaks by avoiding return speculation.
Requires a compiler with -mfunction-return=thunk-extern
support for full protection. The kernel may run slower.
config CPU_UNRET_ENTRY
bool "Enable UNRET on kernel entry"
depends on CPU_SUP_AMD && RETHUNK
default y
help
Compile the kernel with support for the retbleed=unret mitigation.
config CPU_IBPB_ENTRY
bool "Enable IBPB on kernel entry"
depends on CPU_SUP_AMD
default y
help
Compile the kernel with support for the retbleed=ibpb mitigation.
config CPU_IBRS_ENTRY
bool "Enable IBRS on kernel entry"
depends on CPU_SUP_INTEL
default y
help
Compile the kernel with support for the spectre_v2=ibrs mitigation.
This mitigates both spectre_v2 and retbleed at great cost to
performance.
config SLS
bool "Mitigate Straight-Line-Speculation"
depends on CC_HAS_SLS && X86_64
default n
help
Compile the kernel with straight-line-speculation options to guard
against straight line speculation. The kernel image might be slightly
larger.
endif
config ARCH_HAS_ADD_PAGES
def_bool y
depends on X86_64 && ARCH_ENABLE_MEMORY_HOTPLUG
......
......@@ -323,6 +323,7 @@ For 32-bit we have the following conventions - kernel is built with
* Assumes x86_spec_ctrl_{base,current} to have SPEC_CTRL_IBRS set.
*/
.macro IBRS_ENTER save_reg
#ifdef CONFIG_CPU_IBRS_ENTRY
ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_KERNEL_IBRS
movl $MSR_IA32_SPEC_CTRL, %ecx
......@@ -343,6 +344,7 @@ For 32-bit we have the following conventions - kernel is built with
shr $32, %rdx
wrmsr
.Lend_\@:
#endif
.endm
/*
......@@ -350,6 +352,7 @@ For 32-bit we have the following conventions - kernel is built with
* regs. Must be called after the last RET.
*/
.macro IBRS_EXIT save_reg
#ifdef CONFIG_CPU_IBRS_ENTRY
ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_KERNEL_IBRS
movl $MSR_IA32_SPEC_CTRL, %ecx
......@@ -364,6 +367,7 @@ For 32-bit we have the following conventions - kernel is built with
shr $32, %rdx
wrmsr
.Lend_\@:
#endif
.endm
/*
......
......@@ -60,9 +60,19 @@
# define DISABLE_RETPOLINE 0
#else
# define DISABLE_RETPOLINE ((1 << (X86_FEATURE_RETPOLINE & 31)) | \
(1 << (X86_FEATURE_RETPOLINE_LFENCE & 31)) | \
(1 << (X86_FEATURE_RETHUNK & 31)) | \
(1 << (X86_FEATURE_UNRET & 31)))
(1 << (X86_FEATURE_RETPOLINE_LFENCE & 31)))
#endif
#ifdef CONFIG_RETHUNK
# define DISABLE_RETHUNK 0
#else
# define DISABLE_RETHUNK (1 << (X86_FEATURE_RETHUNK & 31))
#endif
#ifdef CONFIG_CPU_UNRET_ENTRY
# define DISABLE_UNRET 0
#else
# define DISABLE_UNRET (1 << (X86_FEATURE_UNRET & 31))
#endif
/* Force disable because it's broken beyond repair */
......@@ -88,7 +98,7 @@
#define DISABLED_MASK8 0
#define DISABLED_MASK9 (DISABLE_SMAP|DISABLE_SGX)
#define DISABLED_MASK10 0
#define DISABLED_MASK11 (DISABLE_RETPOLINE)
#define DISABLED_MASK11 (DISABLE_RETPOLINE|DISABLE_RETHUNK|DISABLE_UNRET)
#define DISABLED_MASK12 0
#define DISABLED_MASK13 0
#define DISABLED_MASK14 0
......
......@@ -18,7 +18,7 @@
#define __ALIGN_STR __stringify(__ALIGN)
#endif
#if defined(CONFIG_RETPOLINE) && !defined(__DISABLE_EXPORTS) && !defined(BUILD_VDSO)
#if defined(CONFIG_RETHUNK) && !defined(__DISABLE_EXPORTS) && !defined(BUILD_VDSO)
#define RET jmp __x86_return_thunk
#else /* CONFIG_RETPOLINE */
#ifdef CONFIG_SLS
......@@ -30,7 +30,7 @@
#else /* __ASSEMBLY__ */
#if defined(CONFIG_RETPOLINE) && !defined(__DISABLE_EXPORTS) && !defined(BUILD_VDSO)
#if defined(CONFIG_RETHUNK) && !defined(__DISABLE_EXPORTS) && !defined(BUILD_VDSO)
#define ASM_RET "jmp __x86_return_thunk\n\t"
#else /* CONFIG_RETPOLINE */
#ifdef CONFIG_SLS
......
......@@ -127,6 +127,12 @@
.Lskip_rsb_\@:
.endm
#ifdef CONFIG_CPU_UNRET_ENTRY
#define CALL_ZEN_UNTRAIN_RET "call zen_untrain_ret"
#else
#define CALL_ZEN_UNTRAIN_RET ""
#endif
/*
* Mitigate RETBleed for AMD/Hygon Zen uarch. Requires KERNEL CR3 because the
* return thunk isn't mapped into the userspace tables (then again, AMD
......@@ -139,10 +145,10 @@
* where we have a stack but before any RET instruction.
*/
.macro UNTRAIN_RET
#ifdef CONFIG_RETPOLINE
#if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_IBPB_ENTRY)
ANNOTATE_UNRET_END
ALTERNATIVE_2 "", \
"call zen_untrain_ret", X86_FEATURE_UNRET, \
CALL_ZEN_UNTRAIN_RET, X86_FEATURE_UNRET, \
"call entry_ibpb", X86_FEATURE_ENTRY_IBPB
#endif
.endm
......
......@@ -44,7 +44,7 @@
#define ARCH_DEFINE_STATIC_CALL_TRAMP(name, func) \
__ARCH_DEFINE_STATIC_CALL_TRAMP(name, ".byte 0xe9; .long " #func " - (. + 4)")
#ifdef CONFIG_RETPOLINE
#ifdef CONFIG_RETHUNK
#define ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name) \
__ARCH_DEFINE_STATIC_CALL_TRAMP(name, "jmp __x86_return_thunk")
#else
......
......@@ -612,6 +612,7 @@ void __init_or_module noinline apply_retpolines(s32 *start, s32 *end)
}
}
#ifdef CONFIG_RETHUNK
/*
* Rewrite the compiler generated return thunk tail-calls.
*
......@@ -673,6 +674,10 @@ void __init_or_module noinline apply_returns(s32 *start, s32 *end)
}
}
}
#else
void __init_or_module noinline apply_returns(s32 *start, s32 *end) { }
#endif /* CONFIG_RETHUNK */
#else /* !RETPOLINES || !CONFIG_STACK_VALIDATION */
void __init_or_module noinline apply_retpolines(s32 *start, s32 *end) { }
......
......@@ -916,6 +916,7 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
void init_spectral_chicken(struct cpuinfo_x86 *c)
{
#ifdef CONFIG_CPU_UNRET_ENTRY
u64 value;
/*
......@@ -932,6 +933,7 @@ void init_spectral_chicken(struct cpuinfo_x86 *c)
wrmsrl_safe(MSR_ZEN2_SPECTRAL_CHICKEN, value);
}
}
#endif
}
static void init_amd_zn(struct cpuinfo_x86 *c)
......
......@@ -798,7 +798,6 @@ static int __init retbleed_parse_cmdline(char *str)
early_param("retbleed", retbleed_parse_cmdline);
#define RETBLEED_UNTRAIN_MSG "WARNING: BTB untrained return thunk mitigation is only effective on AMD/Hygon!\n"
#define RETBLEED_COMPILER_MSG "WARNING: kernel not compiled with RETPOLINE or -mfunction-return capable compiler; falling back to IBPB!\n"
#define RETBLEED_INTEL_MSG "WARNING: Spectre v2 mitigation leaves CPU vulnerable to RETBleed attacks, data leaks possible!\n"
static void __init retbleed_select_mitigation(void)
......@@ -813,18 +812,33 @@ static void __init retbleed_select_mitigation(void)
return;
case RETBLEED_CMD_UNRET:
retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
if (IS_ENABLED(CONFIG_CPU_UNRET_ENTRY)) {
retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
} else {
pr_err("WARNING: kernel not compiled with CPU_UNRET_ENTRY.\n");
goto do_cmd_auto;
}
break;
case RETBLEED_CMD_IBPB:
retbleed_mitigation = RETBLEED_MITIGATION_IBPB;
if (IS_ENABLED(CONFIG_CPU_IBPB_ENTRY)) {
retbleed_mitigation = RETBLEED_MITIGATION_IBPB;
} else {
pr_err("WARNING: kernel not compiled with CPU_IBPB_ENTRY.\n");
goto do_cmd_auto;
}
break;
do_cmd_auto:
case RETBLEED_CMD_AUTO:
default:
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
if (IS_ENABLED(CONFIG_CPU_UNRET_ENTRY))
retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
else if (IS_ENABLED(CONFIG_CPU_IBPB_ENTRY))
retbleed_mitigation = RETBLEED_MITIGATION_IBPB;
}
/*
* The Intel mitigation (IBRS or eIBRS) was already selected in
......@@ -837,14 +851,6 @@ static void __init retbleed_select_mitigation(void)
switch (retbleed_mitigation) {
case RETBLEED_MITIGATION_UNRET:
if (!IS_ENABLED(CONFIG_RETPOLINE) ||
!IS_ENABLED(CONFIG_CC_HAS_RETURN_THUNK)) {
pr_err(RETBLEED_COMPILER_MSG);
retbleed_mitigation = RETBLEED_MITIGATION_IBPB;
goto retbleed_force_ibpb;
}
setup_force_cpu_cap(X86_FEATURE_RETHUNK);
setup_force_cpu_cap(X86_FEATURE_UNRET);
......@@ -856,7 +862,6 @@ static void __init retbleed_select_mitigation(void)
break;
case RETBLEED_MITIGATION_IBPB:
retbleed_force_ibpb:
setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
mitigate_smt = true;
break;
......@@ -1227,6 +1232,12 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
return SPECTRE_V2_CMD_AUTO;
}
if (cmd == SPECTRE_V2_CMD_IBRS && !IS_ENABLED(CONFIG_CPU_IBRS_ENTRY)) {
pr_err("%s selected but not compiled in. Switching to AUTO select\n",
mitigation_options[i].option);
return SPECTRE_V2_CMD_AUTO;
}
if (cmd == SPECTRE_V2_CMD_IBRS && boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
pr_err("%s selected but not Intel CPU. Switching to AUTO select\n",
mitigation_options[i].option);
......@@ -1284,7 +1295,8 @@ static void __init spectre_v2_select_mitigation(void)
break;
}
if (boot_cpu_has_bug(X86_BUG_RETBLEED) &&
if (IS_ENABLED(CONFIG_CPU_IBRS_ENTRY) &&
boot_cpu_has_bug(X86_BUG_RETBLEED) &&
retbleed_cmd != RETBLEED_CMD_OFF &&
boot_cpu_has(X86_FEATURE_IBRS) &&
boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
......
......@@ -121,7 +121,7 @@ void arch_static_call_transform(void *site, void *tramp, void *func, bool tail)
}
EXPORT_SYMBOL_GPL(arch_static_call_transform);
#ifdef CONFIG_RETPOLINE
#ifdef CONFIG_RETHUNK
/*
* This is called by apply_returns() to fix up static call trampolines,
* specifically ARCH_DEFINE_STATIC_CALL_NULL_TRAMP which is recorded as
......
......@@ -435,10 +435,10 @@ static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop);
* Depending on .config the SETcc functions look like:
*
* SETcc %al [3 bytes]
* RET | JMP __x86_return_thunk [1,5 bytes; CONFIG_RETPOLINE]
* RET | JMP __x86_return_thunk [1,5 bytes; CONFIG_RETHUNK]
* INT3 [1 byte; CONFIG_SLS]
*/
#define RET_LENGTH (1 + (4 * IS_ENABLED(CONFIG_RETPOLINE)) + \
#define RET_LENGTH (1 + (4 * IS_ENABLED(CONFIG_RETHUNK)) + \
IS_ENABLED(CONFIG_SLS))
#define SETCC_LENGTH (3 + RET_LENGTH)
#define SETCC_ALIGN (4 << ((SETCC_LENGTH > 4) & 1) << ((SETCC_LENGTH > 8) & 1))
......
......@@ -71,6 +71,8 @@ SYM_CODE_END(__x86_indirect_thunk_array)
* This function name is magical and is used by -mfunction-return=thunk-extern
* for the compiler to generate JMPs to it.
*/
#ifdef CONFIG_RETHUNK
.section .text.__x86.return_thunk
/*
......@@ -135,3 +137,5 @@ SYM_FUNC_END(zen_untrain_ret)
__EXPORT_THUNK(zen_untrain_ret)
EXPORT_SYMBOL(__x86_return_thunk)
#endif /* CONFIG_RETHUNK */
......@@ -227,6 +227,9 @@ endif
ifdef CONFIG_RETPOLINE
objtool_args += --retpoline
endif
ifdef CONFIG_RETHUNK
objtool_args += --rethunk
endif
ifdef CONFIG_X86_SMAP
objtool_args += --uaccess
endif
......
......@@ -65,7 +65,7 @@ objtool_link()
if [ -n "${CONFIG_VMLINUX_VALIDATION}" ]; then
objtoolopt="check"
if [ -n "${CONFIG_RETPOLINE}" ]; then
if [ -n "${CONFIG_CPU_UNRET_ENTRY}" ]; then
objtoolopt="${objtoolopt} --unret"
fi
if [ -z "${CONFIG_FRAME_POINTER}" ]; then
......
......@@ -54,17 +54,6 @@ config SECURITY_NETWORK
implement socket and networking access controls.
If you are unsure how to answer this question, answer N.
config PAGE_TABLE_ISOLATION
bool "Remove the kernel mapping in user mode"
default y
depends on (X86_64 || X86_PAE) && !UML
help
This feature reduces the number of hardware side channels by
ensuring that the majority of kernel addresses are not mapped
into userspace.
See Documentation/x86/pti.rst for more details.
config SECURITY_INFINIBAND
bool "Infiniband Security Hooks"
depends on SECURITY && INFINIBAND
......
......@@ -19,7 +19,7 @@
#include "objtool.h"
bool no_fp, no_unreachable, retpoline, module, backtrace, uaccess, stats,
validate_dup, vmlinux, sls, unret;
validate_dup, vmlinux, sls, unret, rethunk;
static const char * const check_usage[] = {
"objtool check [<options>] file.o",
......@@ -30,6 +30,7 @@ const struct option check_options[] = {
OPT_BOOLEAN('f', "no-fp", &no_fp, "Skip frame pointer validation"),
OPT_BOOLEAN('u', "no-unreachable", &no_unreachable, "Skip 'unreachable instruction' warnings"),
OPT_BOOLEAN('r', "retpoline", &retpoline, "Validate retpoline assumptions"),
OPT_BOOLEAN(0, "rethunk", &rethunk, "validate and annotate rethunk usage"),
OPT_BOOLEAN(0, "unret", &unret, "validate entry unret placement"),
OPT_BOOLEAN('m', "module", &module, "Indicates the object will be part of a kernel module"),
OPT_BOOLEAN('b', "backtrace", &backtrace, "unwind on error"),
......
......@@ -9,7 +9,7 @@
extern const struct option check_options[];
extern bool no_fp, no_unreachable, retpoline, module, backtrace, uaccess, stats,
validate_dup, vmlinux, sls, unret;
validate_dup, vmlinux, sls, unret, rethunk;
extern int cmd_check(int argc, const char **argv);
extern int cmd_orc(int argc, const char **argv);
......
......@@ -3261,8 +3261,11 @@ static int validate_retpoline(struct objtool_file *file)
continue;
if (insn->type == INSN_RETURN) {
WARN_FUNC("'naked' return found in RETPOLINE build",
insn->sec, insn->offset);
if (rethunk) {
WARN_FUNC("'naked' return found in RETHUNK build",
insn->sec, insn->offset);
} else
continue;
} else {
WARN_FUNC("indirect %s found in RETPOLINE build",
insn->sec, insn->offset,
......@@ -3532,7 +3535,9 @@ int check(struct objtool_file *file)
if (ret < 0)
goto out;
warnings += ret;
}
if (rethunk) {
ret = create_return_sites_sections(file);
if (ret < 0)
goto out;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册