提交 05017fed 编写于 作者: L Linus Torvalds

Merge tag 'x86_urgent_for_v5.19_rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Borislav Petkov:
 "A couple more retbleed fallout fixes.

  It looks like their urgency is decreasing so it seems like we've
  managed to catch whatever snafus the limited -rc testing has exposed.
  Maybe we're getting ready... :)

   - Make retbleed mitigations 64-bit only (32-bit will need a bit more
     work if even needed, at all).

   - Prevent return thunks patching of the LKDTM modules as it is not
     needed there

   - Avoid writing the SPEC_CTRL MSR on every kernel entry on eIBRS
     parts

   - Enhance error output of apply_returns() when it fails to patch a
     return thunk

   - A sparse fix to the sev-guest module

   - Protect EFI fw calls by issuing an IBPB on AMD"

* tag 'x86_urgent_for_v5.19_rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/speculation: Make all RETbleed mitigations 64-bit only
  lkdtm: Disable return thunks in rodata.c
  x86/bugs: Warn when "ibrs" mitigation is selected on Enhanced IBRS parts
  x86/alternative: Report missing return thunk details
  virt: sev-guest: Pass the appropriate argument type to iounmap()
  x86/amd: Use IBPB for firmware calls
...@@ -2474,7 +2474,7 @@ config RETHUNK ...@@ -2474,7 +2474,7 @@ config RETHUNK
bool "Enable return-thunks" bool "Enable return-thunks"
depends on RETPOLINE && CC_HAS_RETURN_THUNK depends on RETPOLINE && CC_HAS_RETURN_THUNK
select OBJTOOL if HAVE_OBJTOOL select OBJTOOL if HAVE_OBJTOOL
default y default y if X86_64
help help
Compile the kernel with the return-thunks compiler option to guard Compile the kernel with the return-thunks compiler option to guard
against kernel-to-user data leaks by avoiding return speculation. against kernel-to-user data leaks by avoiding return speculation.
...@@ -2483,21 +2483,21 @@ config RETHUNK ...@@ -2483,21 +2483,21 @@ config RETHUNK
config CPU_UNRET_ENTRY config CPU_UNRET_ENTRY
bool "Enable UNRET on kernel entry" bool "Enable UNRET on kernel entry"
depends on CPU_SUP_AMD && RETHUNK depends on CPU_SUP_AMD && RETHUNK && X86_64
default y default y
help help
Compile the kernel with support for the retbleed=unret mitigation. Compile the kernel with support for the retbleed=unret mitigation.
config CPU_IBPB_ENTRY config CPU_IBPB_ENTRY
bool "Enable IBPB on kernel entry" bool "Enable IBPB on kernel entry"
depends on CPU_SUP_AMD depends on CPU_SUP_AMD && X86_64
default y default y
help help
Compile the kernel with support for the retbleed=ibpb mitigation. Compile the kernel with support for the retbleed=ibpb mitigation.
config CPU_IBRS_ENTRY config CPU_IBRS_ENTRY
bool "Enable IBRS on kernel entry" bool "Enable IBRS on kernel entry"
depends on CPU_SUP_INTEL depends on CPU_SUP_INTEL && X86_64
default y default y
help help
Compile the kernel with support for the spectre_v2=ibrs mitigation. Compile the kernel with support for the spectre_v2=ibrs mitigation.
......
...@@ -27,6 +27,7 @@ RETHUNK_CFLAGS := -mfunction-return=thunk-extern ...@@ -27,6 +27,7 @@ RETHUNK_CFLAGS := -mfunction-return=thunk-extern
RETPOLINE_CFLAGS += $(RETHUNK_CFLAGS) RETPOLINE_CFLAGS += $(RETHUNK_CFLAGS)
endif endif
export RETHUNK_CFLAGS
export RETPOLINE_CFLAGS export RETPOLINE_CFLAGS
export RETPOLINE_VDSO_CFLAGS export RETPOLINE_VDSO_CFLAGS
......
...@@ -302,6 +302,7 @@ ...@@ -302,6 +302,7 @@
#define X86_FEATURE_RETPOLINE_LFENCE (11*32+13) /* "" Use LFENCE for Spectre variant 2 */ #define X86_FEATURE_RETPOLINE_LFENCE (11*32+13) /* "" Use LFENCE for Spectre variant 2 */
#define X86_FEATURE_RETHUNK (11*32+14) /* "" Use REturn THUNK */ #define X86_FEATURE_RETHUNK (11*32+14) /* "" Use REturn THUNK */
#define X86_FEATURE_UNRET (11*32+15) /* "" AMD BTB untrain return */ #define X86_FEATURE_UNRET (11*32+15) /* "" AMD BTB untrain return */
#define X86_FEATURE_USE_IBPB_FW (11*32+16) /* "" Use IBPB during runtime firmware calls */
/* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */ /* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
#define X86_FEATURE_AVX_VNNI (12*32+ 4) /* AVX VNNI instructions */ #define X86_FEATURE_AVX_VNNI (12*32+ 4) /* AVX VNNI instructions */
......
...@@ -297,6 +297,8 @@ do { \ ...@@ -297,6 +297,8 @@ do { \
alternative_msr_write(MSR_IA32_SPEC_CTRL, \ alternative_msr_write(MSR_IA32_SPEC_CTRL, \
spec_ctrl_current() | SPEC_CTRL_IBRS, \ spec_ctrl_current() | SPEC_CTRL_IBRS, \
X86_FEATURE_USE_IBRS_FW); \ X86_FEATURE_USE_IBRS_FW); \
alternative_msr_write(MSR_IA32_PRED_CMD, PRED_CMD_IBPB, \
X86_FEATURE_USE_IBPB_FW); \
} while (0) } while (0)
#define firmware_restrict_branch_speculation_end() \ #define firmware_restrict_branch_speculation_end() \
......
...@@ -555,7 +555,9 @@ void __init_or_module noinline apply_returns(s32 *start, s32 *end) ...@@ -555,7 +555,9 @@ void __init_or_module noinline apply_returns(s32 *start, s32 *end)
dest = addr + insn.length + insn.immediate.value; dest = addr + insn.length + insn.immediate.value;
if (__static_call_fixup(addr, op, dest) || if (__static_call_fixup(addr, op, dest) ||
WARN_ON_ONCE(dest != &__x86_return_thunk)) WARN_ONCE(dest != &__x86_return_thunk,
"missing return thunk: %pS-%pS: %*ph",
addr, dest, 5, addr))
continue; continue;
DPRINTK("return thunk at: %pS (%px) len: %d to: %pS", DPRINTK("return thunk at: %pS (%px) len: %d to: %pS",
......
...@@ -975,6 +975,7 @@ static inline const char *spectre_v2_module_string(void) { return ""; } ...@@ -975,6 +975,7 @@ static inline const char *spectre_v2_module_string(void) { return ""; }
#define SPECTRE_V2_LFENCE_MSG "WARNING: LFENCE mitigation is not recommended for this CPU, data leaks possible!\n" #define SPECTRE_V2_LFENCE_MSG "WARNING: LFENCE mitigation is not recommended for this CPU, data leaks possible!\n"
#define SPECTRE_V2_EIBRS_EBPF_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS on, data leaks possible via Spectre v2 BHB attacks!\n" #define SPECTRE_V2_EIBRS_EBPF_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS on, data leaks possible via Spectre v2 BHB attacks!\n"
#define SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS+LFENCE mitigation and SMT, data leaks possible via Spectre v2 BHB attacks!\n" #define SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS+LFENCE mitigation and SMT, data leaks possible via Spectre v2 BHB attacks!\n"
#define SPECTRE_V2_IBRS_PERF_MSG "WARNING: IBRS mitigation selected on Enhanced IBRS CPU, this may cause unnecessary performance loss\n"
#ifdef CONFIG_BPF_SYSCALL #ifdef CONFIG_BPF_SYSCALL
void unpriv_ebpf_notify(int new_state) void unpriv_ebpf_notify(int new_state)
...@@ -1415,6 +1416,8 @@ static void __init spectre_v2_select_mitigation(void) ...@@ -1415,6 +1416,8 @@ static void __init spectre_v2_select_mitigation(void)
case SPECTRE_V2_IBRS: case SPECTRE_V2_IBRS:
setup_force_cpu_cap(X86_FEATURE_KERNEL_IBRS); setup_force_cpu_cap(X86_FEATURE_KERNEL_IBRS);
if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED))
pr_warn(SPECTRE_V2_IBRS_PERF_MSG);
break; break;
case SPECTRE_V2_LFENCE: case SPECTRE_V2_LFENCE:
...@@ -1516,7 +1519,16 @@ static void __init spectre_v2_select_mitigation(void) ...@@ -1516,7 +1519,16 @@ static void __init spectre_v2_select_mitigation(void)
* the CPU supports Enhanced IBRS, kernel might un-intentionally not * the CPU supports Enhanced IBRS, kernel might un-intentionally not
* enable IBRS around firmware calls. * enable IBRS around firmware calls.
*/ */
if (boot_cpu_has(X86_FEATURE_IBRS) && !spectre_v2_in_ibrs_mode(mode)) { if (boot_cpu_has_bug(X86_BUG_RETBLEED) &&
(boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)) {
if (retbleed_cmd != RETBLEED_CMD_IBPB) {
setup_force_cpu_cap(X86_FEATURE_USE_IBPB_FW);
pr_info("Enabling Speculation Barrier for firmware calls\n");
}
} else if (boot_cpu_has(X86_FEATURE_IBRS) && !spectre_v2_in_ibrs_mode(mode)) {
setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW); setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
pr_info("Enabling Restricted Speculation for firmware calls\n"); pr_info("Enabling Restricted Speculation for firmware calls\n");
} }
......
...@@ -13,10 +13,13 @@ lkdtm-$(CONFIG_LKDTM) += cfi.o ...@@ -13,10 +13,13 @@ lkdtm-$(CONFIG_LKDTM) += cfi.o
lkdtm-$(CONFIG_LKDTM) += fortify.o lkdtm-$(CONFIG_LKDTM) += fortify.o
lkdtm-$(CONFIG_PPC_64S_HASH_MMU) += powerpc.o lkdtm-$(CONFIG_PPC_64S_HASH_MMU) += powerpc.o
KASAN_SANITIZE_rodata.o := n
KASAN_SANITIZE_stackleak.o := n KASAN_SANITIZE_stackleak.o := n
KCOV_INSTRUMENT_rodata.o := n
CFLAGS_REMOVE_rodata.o += $(CC_FLAGS_LTO) KASAN_SANITIZE_rodata.o := n
KCSAN_SANITIZE_rodata.o := n
KCOV_INSTRUMENT_rodata.o := n
OBJECT_FILES_NON_STANDARD_rodata.o := y
CFLAGS_REMOVE_rodata.o += $(CC_FLAGS_LTO) $(RETHUNK_CFLAGS)
OBJCOPYFLAGS := OBJCOPYFLAGS :=
OBJCOPYFLAGS_rodata_objcopy.o := \ OBJCOPYFLAGS_rodata_objcopy.o := \
......
...@@ -632,16 +632,19 @@ static int __init sev_guest_probe(struct platform_device *pdev) ...@@ -632,16 +632,19 @@ static int __init sev_guest_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
struct snp_guest_dev *snp_dev; struct snp_guest_dev *snp_dev;
struct miscdevice *misc; struct miscdevice *misc;
void __iomem *mapping;
int ret; int ret;
if (!dev->platform_data) if (!dev->platform_data)
return -ENODEV; return -ENODEV;
data = (struct sev_guest_platform_data *)dev->platform_data; data = (struct sev_guest_platform_data *)dev->platform_data;
layout = (__force void *)ioremap_encrypted(data->secrets_gpa, PAGE_SIZE); mapping = ioremap_encrypted(data->secrets_gpa, PAGE_SIZE);
if (!layout) if (!mapping)
return -ENODEV; return -ENODEV;
layout = (__force void *)mapping;
ret = -ENOMEM; ret = -ENOMEM;
snp_dev = devm_kzalloc(&pdev->dev, sizeof(struct snp_guest_dev), GFP_KERNEL); snp_dev = devm_kzalloc(&pdev->dev, sizeof(struct snp_guest_dev), GFP_KERNEL);
if (!snp_dev) if (!snp_dev)
...@@ -706,7 +709,7 @@ static int __init sev_guest_probe(struct platform_device *pdev) ...@@ -706,7 +709,7 @@ static int __init sev_guest_probe(struct platform_device *pdev)
e_free_request: e_free_request:
free_shared_pages(snp_dev->request, sizeof(struct snp_guest_msg)); free_shared_pages(snp_dev->request, sizeof(struct snp_guest_msg));
e_unmap: e_unmap:
iounmap(layout); iounmap(mapping);
return ret; return ret;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册