提交 42c692ae 编写于 作者: P Peter Zijlstra 提交者: Zheng Zengkai

x86/bugs: Optimize SPEC_CTRL MSR writes

stable inclusion
from stable-v5.10.133
commit 6d7e13ccc4d73e5c88cc015bc0154b7d08f65038
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I5PTAS
CVE: CVE-2022-29900,CVE-2022-23816,CVE-2022-29901

Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id=6d7e13ccc4d73e5c88cc015bc0154b7d08f65038

--------------------------------

commit c779bc1a upstream.

When changing SPEC_CTRL for user control, the WRMSR can be delayed
until return-to-user when KERNEL_IBRS has been enabled.

This avoids an MSR write during context switch.
Signed-off-by: NPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: NBorislav Petkov <bp@suse.de>
Reviewed-by: NJosh Poimboeuf <jpoimboe@kernel.org>
Signed-off-by: NBorislav Petkov <bp@suse.de>
Signed-off-by: NThadeu Lima de Souza Cascardo <cascardo@canonical.com>
Signed-off-by: NBen Hutchings <ben@decadent.org.uk>
Signed-off-by: NGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: NLin Yujun <linyujun809@huawei.com>
Signed-off-by: NZheng Zengkai <zhengzengkai@huawei.com>
上级 2b6e030f
...@@ -261,7 +261,7 @@ static inline void indirect_branch_prediction_barrier(void) ...@@ -261,7 +261,7 @@ static inline void indirect_branch_prediction_barrier(void)
/* The Intel SPEC CTRL MSR base value cache */ /* The Intel SPEC CTRL MSR base value cache */
extern u64 x86_spec_ctrl_base; extern u64 x86_spec_ctrl_base;
extern void write_spec_ctrl_current(u64 val); extern void write_spec_ctrl_current(u64 val, bool force);
/* /*
* With retpoline, we must use IBRS to restrict branch prediction * With retpoline, we must use IBRS to restrict branch prediction
......
...@@ -62,13 +62,19 @@ static DEFINE_MUTEX(spec_ctrl_mutex); ...@@ -62,13 +62,19 @@ static DEFINE_MUTEX(spec_ctrl_mutex);
* Keep track of the SPEC_CTRL MSR value for the current task, which may differ * Keep track of the SPEC_CTRL MSR value for the current task, which may differ
* from x86_spec_ctrl_base due to STIBP/SSB in __speculation_ctrl_update(). * from x86_spec_ctrl_base due to STIBP/SSB in __speculation_ctrl_update().
*/ */
void write_spec_ctrl_current(u64 val) void write_spec_ctrl_current(u64 val, bool force)
{ {
if (this_cpu_read(x86_spec_ctrl_current) == val) if (this_cpu_read(x86_spec_ctrl_current) == val)
return; return;
this_cpu_write(x86_spec_ctrl_current, val); this_cpu_write(x86_spec_ctrl_current, val);
wrmsrl(MSR_IA32_SPEC_CTRL, val);
/*
* When KERNEL_IBRS this MSR is written on return-to-user, unless
* forced the update can be delayed until that time.
*/
if (force || !cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS))
wrmsrl(MSR_IA32_SPEC_CTRL, val);
} }
/* /*
...@@ -1253,7 +1259,7 @@ static void __init spectre_v2_select_mitigation(void) ...@@ -1253,7 +1259,7 @@ static void __init spectre_v2_select_mitigation(void)
if (spectre_v2_in_eibrs_mode(mode)) { if (spectre_v2_in_eibrs_mode(mode)) {
/* Force it so VMEXIT will restore correctly */ /* Force it so VMEXIT will restore correctly */
x86_spec_ctrl_base |= SPEC_CTRL_IBRS; x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
write_spec_ctrl_current(x86_spec_ctrl_base); write_spec_ctrl_current(x86_spec_ctrl_base, true);
} }
switch (mode) { switch (mode) {
...@@ -1308,7 +1314,7 @@ static void __init spectre_v2_select_mitigation(void) ...@@ -1308,7 +1314,7 @@ static void __init spectre_v2_select_mitigation(void)
static void update_stibp_msr(void * __unused) static void update_stibp_msr(void * __unused)
{ {
write_spec_ctrl_current(x86_spec_ctrl_base); write_spec_ctrl_current(x86_spec_ctrl_base, true);
} }
/* Update x86_spec_ctrl_base in case SMT state changed. */ /* Update x86_spec_ctrl_base in case SMT state changed. */
...@@ -1551,7 +1557,7 @@ static enum ssb_mitigation __init __ssb_select_mitigation(void) ...@@ -1551,7 +1557,7 @@ static enum ssb_mitigation __init __ssb_select_mitigation(void)
x86_amd_ssb_disable(); x86_amd_ssb_disable();
} else { } else {
x86_spec_ctrl_base |= SPEC_CTRL_SSBD; x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
write_spec_ctrl_current(x86_spec_ctrl_base); write_spec_ctrl_current(x86_spec_ctrl_base, true);
} }
} }
...@@ -1769,7 +1775,7 @@ int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which) ...@@ -1769,7 +1775,7 @@ int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
void x86_spec_ctrl_setup_ap(void) void x86_spec_ctrl_setup_ap(void)
{ {
if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
write_spec_ctrl_current(x86_spec_ctrl_base); write_spec_ctrl_current(x86_spec_ctrl_base, true);
if (ssb_mode == SPEC_STORE_BYPASS_DISABLE) if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
x86_amd_ssb_disable(); x86_amd_ssb_disable();
......
...@@ -557,7 +557,7 @@ static __always_inline void __speculation_ctrl_update(unsigned long tifp, ...@@ -557,7 +557,7 @@ static __always_inline void __speculation_ctrl_update(unsigned long tifp,
} }
if (updmsr) if (updmsr)
write_spec_ctrl_current(msr); write_spec_ctrl_current(msr, false);
} }
static unsigned long speculation_ctrl_update_tif(struct task_struct *tsk) static unsigned long speculation_ctrl_update_tif(struct task_struct *tsk)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册