提交 f050bf2b 编写于 作者: P Peter Zijlstra 提交者: Zheng Zengkai

x86/kvm: Fix SETcc emulation for return thunks

stable inclusion
from stable-v5.10.133
commit ee4996f07d868ee6cc7e76151dfab9a2344cdeb0
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I5PTAS
CVE: CVE-2022-29900,CVE-2022-23816,CVE-2022-29901

Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id=ee4996f07d868ee6cc7e76151dfab9a2344cdeb0

--------------------------------

commit af2e140f upstream.

Prepare the SETcc fastop stuff for when RET can be larger still.

The tricky bit here is that the expressions should not only be
constant C expressions, but also absolute GAS expressions. This means
no ?: and 'true' is ~0.

Also ensure em_setcc() has the same alignment as the actual FOP_SETCC()
ops, this ensures there cannot be an alignment hole between em_setcc()
and the first op.

Additionally, add a .skip directive to the FOP_SETCC() macro to fill
any remaining space with INT3 traps; however the primary purpose of
this directive is to generate AS warnings when the remaining space
goes negative. Which is a very good indication the alignment magic
went side-ways.
Signed-off-by: NPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: NBorislav Petkov <bp@suse.de>
Reviewed-by: NJosh Poimboeuf <jpoimboe@kernel.org>
Signed-off-by: NBorislav Petkov <bp@suse.de>
[cascardo: ignore ENDBR when computing SETCC_LENGTH]
[cascardo: conflict fixup]
Signed-off-by: NThadeu Lima de Souza Cascardo <cascardo@canonical.com>
Signed-off-by: NBen Hutchings <ben@decadent.org.uk>
Signed-off-by: NGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: NLin Yujun <linyujun809@huawei.com>
Reviewed-by: NZhang Jianhua <chris.zjh@huawei.com>
Signed-off-by: NZheng Zengkai <zhengzengkai@huawei.com>
上级 70819a9a
...@@ -322,13 +322,15 @@ static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop); ...@@ -322,13 +322,15 @@ static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop);
#define FOP_RET(name) \ #define FOP_RET(name) \
__FOP_RET(#name) __FOP_RET(#name)
#define FOP_START(op) \ #define __FOP_START(op, align) \
extern void em_##op(struct fastop *fake); \ extern void em_##op(struct fastop *fake); \
asm(".pushsection .text, \"ax\" \n\t" \ asm(".pushsection .text, \"ax\" \n\t" \
".global em_" #op " \n\t" \ ".global em_" #op " \n\t" \
".align " __stringify(FASTOP_SIZE) " \n\t" \ ".align " __stringify(align) " \n\t" \
"em_" #op ":\n\t" "em_" #op ":\n\t"
#define FOP_START(op) __FOP_START(op, FASTOP_SIZE)
#define FOP_END \ #define FOP_END \
".popsection") ".popsection")
...@@ -432,15 +434,14 @@ static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop); ...@@ -432,15 +434,14 @@ static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop);
/* /*
* Depending on .config the SETcc functions look like: * Depending on .config the SETcc functions look like:
* *
* SETcc %al [3 bytes] * SETcc %al [3 bytes]
* RET [1 byte] * RET | JMP __x86_return_thunk [1,5 bytes; CONFIG_RETPOLINE]
* INT3 [1 byte; CONFIG_SLS] * INT3 [1 byte; CONFIG_SLS]
*
* Which gives possible sizes 4 or 5. When rounded up to the
* next power-of-two alignment they become 4 or 8.
*/ */
#define SETCC_LENGTH (4 + IS_ENABLED(CONFIG_SLS)) #define RET_LENGTH (1 + (4 * IS_ENABLED(CONFIG_RETPOLINE)) + \
#define SETCC_ALIGN (4 << IS_ENABLED(CONFIG_SLS)) IS_ENABLED(CONFIG_SLS))
#define SETCC_LENGTH (3 + RET_LENGTH)
#define SETCC_ALIGN (4 << ((SETCC_LENGTH > 4) & 1) << ((SETCC_LENGTH > 8) & 1))
static_assert(SETCC_LENGTH <= SETCC_ALIGN); static_assert(SETCC_LENGTH <= SETCC_ALIGN);
#define FOP_SETCC(op) \ #define FOP_SETCC(op) \
...@@ -448,14 +449,15 @@ static_assert(SETCC_LENGTH <= SETCC_ALIGN); ...@@ -448,14 +449,15 @@ static_assert(SETCC_LENGTH <= SETCC_ALIGN);
".type " #op ", @function \n\t" \ ".type " #op ", @function \n\t" \
#op ": \n\t" \ #op ": \n\t" \
#op " %al \n\t" \ #op " %al \n\t" \
__FOP_RET(#op) __FOP_RET(#op) \
".skip " __stringify(SETCC_ALIGN) " - (.-" #op "), 0xcc \n\t"
asm(".pushsection .fixup, \"ax\"\n" asm(".pushsection .fixup, \"ax\"\n"
".global kvm_fastop_exception \n" ".global kvm_fastop_exception \n"
"kvm_fastop_exception: xor %esi, %esi; " ASM_RET "kvm_fastop_exception: xor %esi, %esi; " ASM_RET
".popsection"); ".popsection");
FOP_START(setcc) __FOP_START(setcc, SETCC_ALIGN)
FOP_SETCC(seto) FOP_SETCC(seto)
FOP_SETCC(setno) FOP_SETCC(setno)
FOP_SETCC(setc) FOP_SETCC(setc)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册