提交 5e0781df 编写于 作者: S Sean Christopherson 提交者: Paolo Bonzini

KVM: VMX: Move vCPU-run code to a proper assembly routine

As evidenced by the myriad patches leading up to this moment, using
an inline asm blob for vCPU-run is nothing short of horrific.  It's also
been called "unholy", "an abomination" and likely a whole host of other
names that would violate the Code of Conduct if recorded here and now.

The code is relocated nearly verbatim, e.g. quotes, newlines, tabs and
__stringify need to be dropped, but other than those cosmetic changes
the only functional changees are to add the "call" and replace the final
"jmp" with a "ret".

Note that STACK_FRAME_NON_STANDARD is also dropped from __vmx_vcpu_run().
Suggested-by: NAndi Kleen <ak@linux.intel.com>
Suggested-by: NJosh Poimboeuf <jpoimboe@redhat.com>
Signed-off-by: NSean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: NPaolo Bonzini <pbonzini@redhat.com>
上级 63c73aa0
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: GPL-2.0 */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/asm.h> #include <asm/asm.h>
#include <asm/bitsperlong.h>
#include <asm/kvm_vcpu_regs.h>
#define WORD_SIZE (BITS_PER_LONG / 8)
#define VCPU_RAX __VCPU_REGS_RAX * WORD_SIZE
#define VCPU_RCX __VCPU_REGS_RCX * WORD_SIZE
#define VCPU_RDX __VCPU_REGS_RDX * WORD_SIZE
#define VCPU_RBX __VCPU_REGS_RBX * WORD_SIZE
/* Intentionally omit RSP as it's context switched by hardware */
#define VCPU_RBP __VCPU_REGS_RBP * WORD_SIZE
#define VCPU_RSI __VCPU_REGS_RSI * WORD_SIZE
#define VCPU_RDI __VCPU_REGS_RDI * WORD_SIZE
#ifdef CONFIG_X86_64
#define VCPU_R8 __VCPU_REGS_R8 * WORD_SIZE
#define VCPU_R9 __VCPU_REGS_R9 * WORD_SIZE
#define VCPU_R10 __VCPU_REGS_R10 * WORD_SIZE
#define VCPU_R11 __VCPU_REGS_R11 * WORD_SIZE
#define VCPU_R12 __VCPU_REGS_R12 * WORD_SIZE
#define VCPU_R13 __VCPU_REGS_R13 * WORD_SIZE
#define VCPU_R14 __VCPU_REGS_R14 * WORD_SIZE
#define VCPU_R15 __VCPU_REGS_R15 * WORD_SIZE
#endif
.text .text
...@@ -55,3 +79,123 @@ ENDPROC(vmx_vmenter) ...@@ -55,3 +79,123 @@ ENDPROC(vmx_vmenter)
ENTRY(vmx_vmexit) ENTRY(vmx_vmexit)
ret ret
ENDPROC(vmx_vmexit) ENDPROC(vmx_vmexit)
/**
* ____vmx_vcpu_run - Run a vCPU via a transition to VMX guest mode
* @vmx: struct vcpu_vmx *
* @regs: unsigned long * (to guest registers)
* %RBX: VMCS launched status (non-zero indicates already launched)
*
* Returns:
* %RBX is 0 on VM-Exit, 1 on VM-Fail
*/
ENTRY(____vmx_vcpu_run)
push %_ASM_BP
mov %_ASM_SP, %_ASM_BP
/*
* Save @regs, _ASM_ARG2 may be modified by vmx_update_host_rsp() and
* @regs is needed after VM-Exit to save the guest's register values.
*/
push %_ASM_ARG2
/* Adjust RSP to account for the CALL to vmx_vmenter(). */
lea -WORD_SIZE(%_ASM_SP), %_ASM_ARG2
call vmx_update_host_rsp
/* Load @regs to RCX. */
mov (%_ASM_SP), %_ASM_CX
/* Check if vmlaunch or vmresume is needed */
cmpb $0, %bl
/* Load guest registers. Don't clobber flags. */
mov VCPU_RAX(%_ASM_CX), %_ASM_AX
mov VCPU_RBX(%_ASM_CX), %_ASM_BX
mov VCPU_RDX(%_ASM_CX), %_ASM_DX
mov VCPU_RSI(%_ASM_CX), %_ASM_SI
mov VCPU_RDI(%_ASM_CX), %_ASM_DI
mov VCPU_RBP(%_ASM_CX), %_ASM_BP
#ifdef CONFIG_X86_64
mov VCPU_R8 (%_ASM_CX), %r8
mov VCPU_R9 (%_ASM_CX), %r9
mov VCPU_R10(%_ASM_CX), %r10
mov VCPU_R11(%_ASM_CX), %r11
mov VCPU_R12(%_ASM_CX), %r12
mov VCPU_R13(%_ASM_CX), %r13
mov VCPU_R14(%_ASM_CX), %r14
mov VCPU_R15(%_ASM_CX), %r15
#endif
/* Load guest RCX. This kills the vmx_vcpu pointer! */
mov VCPU_RCX(%_ASM_CX), %_ASM_CX
/* Enter guest mode */
call vmx_vmenter
/* Jump on VM-Fail. */
jbe 2f
/* Temporarily save guest's RCX. */
push %_ASM_CX
/* Reload @regs to RCX. */
mov WORD_SIZE(%_ASM_SP), %_ASM_CX
/* Save all guest registers, including RCX from the stack */
mov %_ASM_AX, VCPU_RAX(%_ASM_CX)
mov %_ASM_BX, VCPU_RBX(%_ASM_CX)
__ASM_SIZE(pop) VCPU_RCX(%_ASM_CX)
mov %_ASM_DX, VCPU_RDX(%_ASM_CX)
mov %_ASM_SI, VCPU_RSI(%_ASM_CX)
mov %_ASM_DI, VCPU_RDI(%_ASM_CX)
mov %_ASM_BP, VCPU_RBP(%_ASM_CX)
#ifdef CONFIG_X86_64
mov %r8, VCPU_R8 (%_ASM_CX)
mov %r9, VCPU_R9 (%_ASM_CX)
mov %r10, VCPU_R10(%_ASM_CX)
mov %r11, VCPU_R11(%_ASM_CX)
mov %r12, VCPU_R12(%_ASM_CX)
mov %r13, VCPU_R13(%_ASM_CX)
mov %r14, VCPU_R14(%_ASM_CX)
mov %r15, VCPU_R15(%_ASM_CX)
#endif
/* Clear EBX to indicate VM-Exit (as opposed to VM-Fail). */
xor %ebx, %ebx
/*
* Clear all general purpose registers except RSP and RBX to prevent
* speculative use of the guest's values, even those that are reloaded
* via the stack. In theory, an L1 cache miss when restoring registers
* could lead to speculative execution with the guest's values.
* Zeroing XORs are dirt cheap, i.e. the extra paranoia is essentially
* free. RSP and RBX are exempt as RSP is restored by hardware during
* VM-Exit and RBX is explicitly loaded with 0 or 1 to "return" VM-Fail.
*/
1:
#ifdef CONFIG_X86_64
xor %r8d, %r8d
xor %r9d, %r9d
xor %r10d, %r10d
xor %r11d, %r11d
xor %r12d, %r12d
xor %r13d, %r13d
xor %r14d, %r14d
xor %r15d, %r15d
#endif
xor %eax, %eax
xor %ecx, %ecx
xor %edx, %edx
xor %esi, %esi
xor %edi, %edi
xor %ebp, %ebp
/* "POP" @regs. */
add $WORD_SIZE, %_ASM_SP
pop %_ASM_BP
ret
/* VM-Fail. Out-of-line to avoid a taken Jcc after VM-Exit. */
2: mov $1, %ebx
jmp 1b
ENDPROC(____vmx_vcpu_run)
...@@ -6371,33 +6371,6 @@ void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp) ...@@ -6371,33 +6371,6 @@ void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp)
} }
} }
#ifdef CONFIG_X86_64
#define WORD_SIZE 8
#else
#define WORD_SIZE 4
#endif
#define _WORD_SIZE __stringify(WORD_SIZE)
#define VCPU_RAX __stringify(__VCPU_REGS_RAX * WORD_SIZE)
#define VCPU_RCX __stringify(__VCPU_REGS_RCX * WORD_SIZE)
#define VCPU_RDX __stringify(__VCPU_REGS_RDX * WORD_SIZE)
#define VCPU_RBX __stringify(__VCPU_REGS_RBX * WORD_SIZE)
/* Intentionally omit %RSP as it's context switched by hardware */
#define VCPU_RBP __stringify(__VCPU_REGS_RBP * WORD_SIZE)
#define VCPU_RSI __stringify(__VCPU_REGS_RSI * WORD_SIZE)
#define VCPU_RDI __stringify(__VCPU_REGS_RDI * WORD_SIZE)
#ifdef CONFIG_X86_64
#define VCPU_R8 __stringify(__VCPU_REGS_R8 * WORD_SIZE)
#define VCPU_R9 __stringify(__VCPU_REGS_R9 * WORD_SIZE)
#define VCPU_R10 __stringify(__VCPU_REGS_R10 * WORD_SIZE)
#define VCPU_R11 __stringify(__VCPU_REGS_R11 * WORD_SIZE)
#define VCPU_R12 __stringify(__VCPU_REGS_R12 * WORD_SIZE)
#define VCPU_R13 __stringify(__VCPU_REGS_R13 * WORD_SIZE)
#define VCPU_R14 __stringify(__VCPU_REGS_R14 * WORD_SIZE)
#define VCPU_R15 __stringify(__VCPU_REGS_R15 * WORD_SIZE)
#endif
static void __vmx_vcpu_run(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx) static void __vmx_vcpu_run(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
{ {
if (static_branch_unlikely(&vmx_l1d_should_flush)) if (static_branch_unlikely(&vmx_l1d_should_flush))
...@@ -6407,115 +6380,7 @@ static void __vmx_vcpu_run(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx) ...@@ -6407,115 +6380,7 @@ static void __vmx_vcpu_run(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
write_cr2(vcpu->arch.cr2); write_cr2(vcpu->arch.cr2);
asm( asm(
"push %%" _ASM_BP " \n\t" "call ____vmx_vcpu_run \n\t"
"mov %%" _ASM_SP ", %%" _ASM_BP " \n\t"
/*
* Save @regs, _ASM_ARG2 may be modified by vmx_update_host_rsp() and
* @regs is needed after VM-Exit to save the guest's register values.
*/
"push %%" _ASM_ARG2 " \n\t"
/* Adjust RSP to account for the CALL to vmx_vmenter(). */
"lea -" _WORD_SIZE "(%%" _ASM_SP "), %%" _ASM_ARG2 " \n\t"
"call vmx_update_host_rsp \n\t"
/* Load RCX with @regs. */
"mov (%%" _ASM_SP "), %%" _ASM_CX " \n\t"
/* Check if vmlaunch or vmresume is needed */
"cmpb $0, %%bl \n\t"
/* Load guest registers. Don't clobber flags. */
"mov " VCPU_RAX "(%%" _ASM_CX "), %%" _ASM_AX " \n\t"
"mov " VCPU_RBX "(%%" _ASM_CX "), %%" _ASM_BX " \n\t"
"mov " VCPU_RDX "(%%" _ASM_CX "), %%" _ASM_DX " \n\t"
"mov " VCPU_RSI "(%%" _ASM_CX "), %%" _ASM_SI " \n\t"
"mov " VCPU_RDI "(%%" _ASM_CX "), %%" _ASM_DI " \n\t"
"mov " VCPU_RBP "(%%" _ASM_CX "), %%" _ASM_BP " \n\t"
#ifdef CONFIG_X86_64
"mov " VCPU_R8 "(%%" _ASM_CX "), %%r8 \n\t"
"mov " VCPU_R9 "(%%" _ASM_CX "), %%r9 \n\t"
"mov " VCPU_R10 "(%%" _ASM_CX "), %%r10 \n\t"
"mov " VCPU_R11 "(%%" _ASM_CX "), %%r11 \n\t"
"mov " VCPU_R12 "(%%" _ASM_CX "), %%r12 \n\t"
"mov " VCPU_R13 "(%%" _ASM_CX "), %%r13 \n\t"
"mov " VCPU_R14 "(%%" _ASM_CX "), %%r14 \n\t"
"mov " VCPU_R15 "(%%" _ASM_CX "), %%r15 \n\t"
#endif
/* Load guest RCX. This kills the vmx_vcpu pointer! */
"mov " VCPU_RCX"(%%" _ASM_CX "), %%" _ASM_CX " \n\t"
/* Enter guest mode */
"call vmx_vmenter\n\t"
"jbe 2f \n\t"
/* Temporarily save guest's RCX. */
"push %%" _ASM_CX " \n\t"
/* Reload RCX with @regs. */
"mov " _WORD_SIZE "(%%" _ASM_SP "), %%" _ASM_CX " \n\t"
/* Save all guest registers, including RCX from the stack */
"mov %%" _ASM_AX ", " VCPU_RAX "(%%" _ASM_CX ") \n\t"
"mov %%" _ASM_BX ", " VCPU_RBX "(%%" _ASM_CX ") \n\t"
__ASM_SIZE(pop) " " VCPU_RCX "(%%" _ASM_CX ") \n\t"
"mov %%" _ASM_DX ", " VCPU_RDX "(%%" _ASM_CX ") \n\t"
"mov %%" _ASM_SI ", " VCPU_RSI "(%%" _ASM_CX ") \n\t"
"mov %%" _ASM_DI ", " VCPU_RDI "(%%" _ASM_CX ") \n\t"
"mov %%" _ASM_BP ", " VCPU_RBP "(%%" _ASM_CX ") \n\t"
#ifdef CONFIG_X86_64
"mov %%r8, " VCPU_R8 "(%%" _ASM_CX ") \n\t"
"mov %%r9, " VCPU_R9 "(%%" _ASM_CX ") \n\t"
"mov %%r10, " VCPU_R10 "(%%" _ASM_CX ") \n\t"
"mov %%r11, " VCPU_R11 "(%%" _ASM_CX ") \n\t"
"mov %%r12, " VCPU_R12 "(%%" _ASM_CX ") \n\t"
"mov %%r13, " VCPU_R13 "(%%" _ASM_CX ") \n\t"
"mov %%r14, " VCPU_R14 "(%%" _ASM_CX ") \n\t"
"mov %%r15, " VCPU_R15 "(%%" _ASM_CX ") \n\t"
#endif
/* Clear EBX to indicate VM-Exit (as opposed to VM-Fail). */
"xor %%ebx, %%ebx \n\t"
/*
* Clear all general purpose registers except RSP and RBX to prevent
* speculative use of the guest's values, even those that are reloaded
* via the stack. In theory, an L1 cache miss when restoring registers
* could lead to speculative execution with the guest's values.
* Zeroing XORs are dirt cheap, i.e. the extra paranoia is essentially
* free. RSP and RBX are exempt as RSP is restored by hardware during
* VM-Exit and RBX is explicitly loaded with 0 or 1 to "return" VM-Fail.
*/
"1: \n\t"
#ifdef CONFIG_X86_64
"xor %%r8d, %%r8d \n\t"
"xor %%r9d, %%r9d \n\t"
"xor %%r10d, %%r10d \n\t"
"xor %%r11d, %%r11d \n\t"
"xor %%r12d, %%r12d \n\t"
"xor %%r13d, %%r13d \n\t"
"xor %%r14d, %%r14d \n\t"
"xor %%r15d, %%r15d \n\t"
#endif
"xor %%eax, %%eax \n\t"
"xor %%ecx, %%ecx \n\t"
"xor %%edx, %%edx \n\t"
"xor %%esi, %%esi \n\t"
"xor %%edi, %%edi \n\t"
"xor %%ebp, %%ebp \n\t"
/* "POP" the vcpu_vmx pointer. */
"add $" _WORD_SIZE ", %%" _ASM_SP " \n\t"
"pop %%" _ASM_BP " \n\t"
"jmp 3f \n\t"
/* VM-Fail. Out-of-line to avoid a taken Jcc after VM-Exit. */
"2: \n\t"
"mov $1, %%ebx \n\t"
"jmp 1b \n\t"
"3: \n\t"
: ASM_CALL_CONSTRAINT, "=b"(vmx->fail), : ASM_CALL_CONSTRAINT, "=b"(vmx->fail),
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
"=D"((int){0}), "=S"((int){0}) "=D"((int){0}), "=S"((int){0})
...@@ -6536,7 +6401,6 @@ static void __vmx_vcpu_run(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx) ...@@ -6536,7 +6401,6 @@ static void __vmx_vcpu_run(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
vcpu->arch.cr2 = read_cr2(); vcpu->arch.cr2 = read_cr2();
} }
STACK_FRAME_NON_STANDARD(__vmx_vcpu_run);
static void vmx_vcpu_run(struct kvm_vcpu *vcpu) static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
{ {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册