提交 3a1d63be 编写于 作者: J Josh Poimboeuf 提交者: Zheng Zengkai

x86/speculation: Fix RSB filling with CONFIG_RETPOLINE=n

stable inclusion
from stable-v5.10.133
commit f1b01ace814b0a8318041e3aea5fd36cc74f09b0
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I5PTAS
CVE: CVE-2022-29900,CVE-2022-23816,CVE-2022-29901

Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id=f1b01ace814b0a8318041e3aea5fd36cc74f09b0

--------------------------------

commit b2620fac upstream.

If a kernel is built with CONFIG_RETPOLINE=n, but the user still wants
to mitigate Spectre v2 using IBRS or eIBRS, the RSB filling will be
silently disabled.

There's nothing retpoline-specific about RSB buffer filling.  Remove the
CONFIG_RETPOLINE guards around it.
Signed-off-by: NJosh Poimboeuf <jpoimboe@kernel.org>
Signed-off-by: NPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: NBorislav Petkov <bp@suse.de>
Signed-off-by: NThadeu Lima de Souza Cascardo <cascardo@canonical.com>
Signed-off-by: NBen Hutchings <ben@decadent.org.uk>
Signed-off-by: NGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: NLin Yujun <linyujun809@huawei.com>
Reviewed-by: NZhang Jianhua <chris.zjh@huawei.com>
Signed-off-by: NZheng Zengkai <zhengzengkai@huawei.com>
上级 878d71c7
...@@ -782,7 +782,6 @@ SYM_CODE_START(__switch_to_asm) ...@@ -782,7 +782,6 @@ SYM_CODE_START(__switch_to_asm)
movl %ebx, PER_CPU_VAR(stack_canary)+stack_canary_offset movl %ebx, PER_CPU_VAR(stack_canary)+stack_canary_offset
#endif #endif
#ifdef CONFIG_RETPOLINE
/* /*
* When switching from a shallower to a deeper call stack * When switching from a shallower to a deeper call stack
* the RSB may either underflow or use entries populated * the RSB may either underflow or use entries populated
...@@ -791,7 +790,6 @@ SYM_CODE_START(__switch_to_asm) ...@@ -791,7 +790,6 @@ SYM_CODE_START(__switch_to_asm)
* speculative execution to prevent attack. * speculative execution to prevent attack.
*/ */
FILL_RETURN_BUFFER %ebx, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW FILL_RETURN_BUFFER %ebx, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
#endif
/* Restore flags or the incoming task to restore AC state. */ /* Restore flags or the incoming task to restore AC state. */
popfl popfl
......
...@@ -249,7 +249,6 @@ SYM_FUNC_START(__switch_to_asm) ...@@ -249,7 +249,6 @@ SYM_FUNC_START(__switch_to_asm)
movq %rbx, PER_CPU_VAR(fixed_percpu_data) + stack_canary_offset movq %rbx, PER_CPU_VAR(fixed_percpu_data) + stack_canary_offset
#endif #endif
#ifdef CONFIG_RETPOLINE
/* /*
* When switching from a shallower to a deeper call stack * When switching from a shallower to a deeper call stack
* the RSB may either underflow or use entries populated * the RSB may either underflow or use entries populated
...@@ -258,7 +257,6 @@ SYM_FUNC_START(__switch_to_asm) ...@@ -258,7 +257,6 @@ SYM_FUNC_START(__switch_to_asm)
* speculative execution to prevent attack. * speculative execution to prevent attack.
*/ */
FILL_RETURN_BUFFER %r12, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW FILL_RETURN_BUFFER %r12, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
#endif
/* restore callee-saved registers */ /* restore callee-saved registers */
popq %r15 popq %r15
......
...@@ -122,11 +122,9 @@ ...@@ -122,11 +122,9 @@
* monstrosity above, manually. * monstrosity above, manually.
*/ */
.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req .macro FILL_RETURN_BUFFER reg:req nr:req ftr:req
#ifdef CONFIG_RETPOLINE
ALTERNATIVE "jmp .Lskip_rsb_\@", "", \ftr ALTERNATIVE "jmp .Lskip_rsb_\@", "", \ftr
__FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP) __FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP)
.Lskip_rsb_\@: .Lskip_rsb_\@:
#endif
.endm .endm
/* /*
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册