提交 113add96 编写于 作者: K Konrad Rzeszutek Wilk 提交者: Zheng Zengkai

x86/kexec: Disable RET on kexec

stable inclusion
from stable-v5.10.133
commit c2ca992144281917cfae19d231b1195c02906a4e
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I5PTAS
CVE: CVE-2022-29900,CVE-2022-23816,CVE-2022-29901

Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id=c2ca992144281917cfae19d231b1195c02906a4e

--------------------------------

commit 697977d8 upstream.

All the invocations unroll to __x86_return_thunk and this file
must be PIC independent.

This fixes kexec on 64-bit AMD boxes.

  [ bp: Fix 32-bit build. ]
Reported-by: NEdward Tran <edward.tran@oracle.com>
Reported-by: NAwais Tanveer <awais.tanveer@oracle.com>
Suggested-by: NAnkur Arora <ankur.a.arora@oracle.com>
Signed-off-by: NKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Signed-off-by: NAlexandre Chartre <alexandre.chartre@oracle.com>
Signed-off-by: NBorislav Petkov <bp@suse.de>
Signed-off-by: NBen Hutchings <ben@decadent.org.uk>
Signed-off-by: NGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: NLin Yujun <linyujun809@huawei.com>
Reviewed-by: NZhang Jianhua <chris.zjh@huawei.com>
Signed-off-by: NZheng Zengkai <zhengzengkai@huawei.com>
上级 a4e10f56
...@@ -7,10 +7,12 @@ ...@@ -7,10 +7,12 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/page_types.h> #include <asm/page_types.h>
#include <asm/kexec.h> #include <asm/kexec.h>
#include <asm/nospec-branch.h>
#include <asm/processor-flags.h> #include <asm/processor-flags.h>
/* /*
* Must be relocatable PIC code callable as a C function * Must be relocatable PIC code callable as a C function, in particular
* there must be a plain RET and not jump to return thunk.
*/ */
#define PTR(x) (x << 2) #define PTR(x) (x << 2)
...@@ -91,7 +93,9 @@ SYM_CODE_START_NOALIGN(relocate_kernel) ...@@ -91,7 +93,9 @@ SYM_CODE_START_NOALIGN(relocate_kernel)
movl %edi, %eax movl %edi, %eax
addl $(identity_mapped - relocate_kernel), %eax addl $(identity_mapped - relocate_kernel), %eax
pushl %eax pushl %eax
RET ANNOTATE_UNRET_SAFE
ret
int3
SYM_CODE_END(relocate_kernel) SYM_CODE_END(relocate_kernel)
SYM_CODE_START_LOCAL_NOALIGN(identity_mapped) SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
...@@ -159,12 +163,15 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped) ...@@ -159,12 +163,15 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
xorl %edx, %edx xorl %edx, %edx
xorl %esi, %esi xorl %esi, %esi
xorl %ebp, %ebp xorl %ebp, %ebp
RET ANNOTATE_UNRET_SAFE
ret
int3
1: 1:
popl %edx popl %edx
movl CP_PA_SWAP_PAGE(%edi), %esp movl CP_PA_SWAP_PAGE(%edi), %esp
addl $PAGE_SIZE, %esp addl $PAGE_SIZE, %esp
2: 2:
ANNOTATE_RETPOLINE_SAFE
call *%edx call *%edx
/* get the re-entry point of the peer system */ /* get the re-entry point of the peer system */
...@@ -190,7 +197,9 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped) ...@@ -190,7 +197,9 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
movl %edi, %eax movl %edi, %eax
addl $(virtual_mapped - relocate_kernel), %eax addl $(virtual_mapped - relocate_kernel), %eax
pushl %eax pushl %eax
RET ANNOTATE_UNRET_SAFE
ret
int3
SYM_CODE_END(identity_mapped) SYM_CODE_END(identity_mapped)
SYM_CODE_START_LOCAL_NOALIGN(virtual_mapped) SYM_CODE_START_LOCAL_NOALIGN(virtual_mapped)
...@@ -208,7 +217,9 @@ SYM_CODE_START_LOCAL_NOALIGN(virtual_mapped) ...@@ -208,7 +217,9 @@ SYM_CODE_START_LOCAL_NOALIGN(virtual_mapped)
popl %edi popl %edi
popl %esi popl %esi
popl %ebx popl %ebx
RET ANNOTATE_UNRET_SAFE
ret
int3
SYM_CODE_END(virtual_mapped) SYM_CODE_END(virtual_mapped)
/* Do the copies */ /* Do the copies */
...@@ -271,7 +282,9 @@ SYM_CODE_START_LOCAL_NOALIGN(swap_pages) ...@@ -271,7 +282,9 @@ SYM_CODE_START_LOCAL_NOALIGN(swap_pages)
popl %edi popl %edi
popl %ebx popl %ebx
popl %ebp popl %ebp
RET ANNOTATE_UNRET_SAFE
ret
int3
SYM_CODE_END(swap_pages) SYM_CODE_END(swap_pages)
.globl kexec_control_code_size .globl kexec_control_code_size
......
...@@ -13,7 +13,8 @@ ...@@ -13,7 +13,8 @@
#include <asm/unwind_hints.h> #include <asm/unwind_hints.h>
/* /*
* Must be relocatable PIC code callable as a C function * Must be relocatable PIC code callable as a C function, in particular
* there must be a plain RET and not jump to return thunk.
*/ */
#define PTR(x) (x << 3) #define PTR(x) (x << 3)
...@@ -104,7 +105,9 @@ SYM_CODE_START_NOALIGN(relocate_kernel) ...@@ -104,7 +105,9 @@ SYM_CODE_START_NOALIGN(relocate_kernel)
/* jump to identity mapped page */ /* jump to identity mapped page */
addq $(identity_mapped - relocate_kernel), %r8 addq $(identity_mapped - relocate_kernel), %r8
pushq %r8 pushq %r8
RET ANNOTATE_UNRET_SAFE
ret
int3
SYM_CODE_END(relocate_kernel) SYM_CODE_END(relocate_kernel)
SYM_CODE_START_LOCAL_NOALIGN(identity_mapped) SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
...@@ -191,7 +194,9 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped) ...@@ -191,7 +194,9 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
xorl %r14d, %r14d xorl %r14d, %r14d
xorl %r15d, %r15d xorl %r15d, %r15d
RET ANNOTATE_UNRET_SAFE
ret
int3
1: 1:
popq %rdx popq %rdx
...@@ -210,7 +215,9 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped) ...@@ -210,7 +215,9 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
call swap_pages call swap_pages
movq $virtual_mapped, %rax movq $virtual_mapped, %rax
pushq %rax pushq %rax
RET ANNOTATE_UNRET_SAFE
ret
int3
SYM_CODE_END(identity_mapped) SYM_CODE_END(identity_mapped)
SYM_CODE_START_LOCAL_NOALIGN(virtual_mapped) SYM_CODE_START_LOCAL_NOALIGN(virtual_mapped)
...@@ -231,7 +238,9 @@ SYM_CODE_START_LOCAL_NOALIGN(virtual_mapped) ...@@ -231,7 +238,9 @@ SYM_CODE_START_LOCAL_NOALIGN(virtual_mapped)
popq %r12 popq %r12
popq %rbp popq %rbp
popq %rbx popq %rbx
RET ANNOTATE_UNRET_SAFE
ret
int3
SYM_CODE_END(virtual_mapped) SYM_CODE_END(virtual_mapped)
/* Do the copies */ /* Do the copies */
...@@ -288,7 +297,9 @@ SYM_CODE_START_LOCAL_NOALIGN(swap_pages) ...@@ -288,7 +297,9 @@ SYM_CODE_START_LOCAL_NOALIGN(swap_pages)
lea PAGE_SIZE(%rax), %rsi lea PAGE_SIZE(%rax), %rsi
jmp 0b jmp 0b
3: 3:
RET ANNOTATE_UNRET_SAFE
ret
int3
SYM_CODE_END(swap_pages) SYM_CODE_END(swap_pages)
.globl kexec_control_code_size .globl kexec_control_code_size
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册