diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S index a3d9a8e062f960376cd5f84b84a121905d2eaa4f..e06a3f33311efdb737487ad046da868a26037892 100644 --- a/arch/x86/kvm/vmx/vmenter.S +++ b/arch/x86/kvm/vmx/vmenter.S @@ -87,7 +87,7 @@ ENDPROC(vmx_vmexit) * @launched: %true if the VMCS has been launched * * Returns: - * %RBX is 0 on VM-Exit, 1 on VM-Fail + * 0 on VM-Exit, 1 on VM-Fail */ ENTRY(__vmx_vcpu_run) push %_ASM_BP @@ -163,17 +163,17 @@ ENTRY(__vmx_vcpu_run) mov %r15, VCPU_R15(%_ASM_AX) #endif - /* Clear EBX to indicate VM-Exit (as opposed to VM-Fail). */ - xor %ebx, %ebx + /* Clear RAX to indicate VM-Exit (as opposed to VM-Fail). */ + xor %eax, %eax /* - * Clear all general purpose registers except RSP and RBX to prevent + * Clear all general purpose registers except RSP and RAX to prevent * speculative use of the guest's values, even those that are reloaded * via the stack. In theory, an L1 cache miss when restoring registers * could lead to speculative execution with the guest's values. * Zeroing XORs are dirt cheap, i.e. the extra paranoia is essentially - * free. RSP and RBX are exempt as RSP is restored by hardware during - * VM-Exit and RBX is explicitly loaded with 0 or 1 to "return" VM-Fail. + * free. RSP and RAX are exempt as RSP is restored by hardware during + * VM-Exit and RAX is explicitly loaded with 0 or 1 to return VM-Fail. */ 1: #ifdef CONFIG_X86_64 @@ -186,7 +186,7 @@ ENTRY(__vmx_vcpu_run) xor %r14d, %r14d xor %r15d, %r15d #endif - xor %eax, %eax + xor %ebx, %ebx xor %ecx, %ecx xor %edx, %edx xor %esi, %esi @@ -199,6 +199,6 @@ ENTRY(__vmx_vcpu_run) ret /* VM-Fail. Out-of-line to avoid a taken Jcc after VM-Exit. */ -2: mov $1, %ebx +2: mov $1, %eax jmp 1b ENDPROC(__vmx_vcpu_run) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 1b73a82444a2b32d2761a359657af9bf2dd50fc9..9a1d27e77684ecf3a093d05eb407dd8d566e864f 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -6446,20 +6446,20 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu) asm( "call __vmx_vcpu_run \n\t" - : ASM_CALL_CONSTRAINT, "=b"(vmx->fail), + : ASM_CALL_CONSTRAINT, "=a"(vmx->fail), #ifdef CONFIG_X86_64 "=D"((int){0}), "=S"((int){0}), "=d"((int){0}) : "D"(vmx), "S"(&vcpu->arch.regs), "d"(vmx->loaded_vmcs->launched) #else - "=a"((int){0}), "=d"((int){0}), "=c"((int){0}) + "=d"((int){0}), "=c"((int){0}) : "a"(vmx), "d"(&vcpu->arch.regs), "c"(vmx->loaded_vmcs->launched) #endif : "cc", "memory" #ifdef CONFIG_X86_64 - , "rax", "rcx" + , "rbx", "rcx" , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15" #else - , "edi", "esi" + , "ebx", "edi", "esi" #endif );