提交 61724151 编写于 作者: U Uros Bizjak 提交者: Zheng Zengkai

KVM/nVMX: Use __vmx_vcpu_run in nested_vmx_check_vmentry_hw

stable inclusion
from stable-v5.10.133
commit dd87aa5f610be44f195cf5a99b7bc153faf30a3d
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I5PTAS
CVE: CVE-2022-29900,CVE-2022-23816,CVE-2022-29901

Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id=dd87aa5f610be44f195cf5a99b7bc153faf30a3d

--------------------------------

commit 150f17bf upstream.

Replace inline assembly in nested_vmx_check_vmentry_hw
with a call to __vmx_vcpu_run.  The function is not
performance critical, so (double) GPR save/restore
in __vmx_vcpu_run can be tolerated, as far as performance
effects are concerned.

Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Sean Christopherson <seanjc@google.com>
Reviewed-and-tested-by: NSean Christopherson <seanjc@google.com>
Signed-off-by: NUros Bizjak <ubizjak@gmail.com>
[sean: dropped versioning info from changelog]
Signed-off-by: NSean Christopherson <seanjc@google.com>
Message-Id: <20201231002702.22237077-5-seanjc@google.com>
Signed-off-by: NPaolo Bonzini <pbonzini@redhat.com>
Signed-off-by: NBen Hutchings <ben@decadent.org.uk>
Signed-off-by: NGreg Kroah-Hartman <gregkh@linuxfoundation.org>

conflict:
	arch/x86/kvm/vmx/vmx.h
Signed-off-by: NLin Yujun <linyujun809@huawei.com>
Signed-off-by: NZheng Zengkai <zhengzengkai@huawei.com>
上级 1552a45e
......@@ -12,6 +12,7 @@
#include "nested.h"
#include "pmu.h"
#include "trace.h"
#include "vmx.h"
#include "x86.h"
static bool __read_mostly enable_shadow_vmcs = 1;
......@@ -3075,35 +3076,8 @@ static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
vmx->loaded_vmcs->host_state.cr4 = cr4;
}
asm(
"sub $%c[wordsize], %%" _ASM_SP "\n\t" /* temporarily adjust RSP for CALL */
"cmp %%" _ASM_SP ", %c[host_state_rsp](%[loaded_vmcs]) \n\t"
"je 1f \n\t"
__ex("vmwrite %%" _ASM_SP ", %[HOST_RSP]") "\n\t"
"mov %%" _ASM_SP ", %c[host_state_rsp](%[loaded_vmcs]) \n\t"
"1: \n\t"
"add $%c[wordsize], %%" _ASM_SP "\n\t" /* un-adjust RSP */
/* Check if vmlaunch or vmresume is needed */
"cmpb $0, %c[launched](%[loaded_vmcs])\n\t"
/*
* VMLAUNCH and VMRESUME clear RFLAGS.{CF,ZF} on VM-Exit, set
* RFLAGS.CF on VM-Fail Invalid and set RFLAGS.ZF on VM-Fail
* Valid. vmx_vmenter() directly "returns" RFLAGS, and so the
* results of VM-Enter is captured via CC_{SET,OUT} to vm_fail.
*/
"call vmx_vmenter\n\t"
CC_SET(be)
: ASM_CALL_CONSTRAINT, CC_OUT(be) (vm_fail)
: [HOST_RSP]"r"((unsigned long)HOST_RSP),
[loaded_vmcs]"r"(vmx->loaded_vmcs),
[launched]"i"(offsetof(struct loaded_vmcs, launched)),
[host_state_rsp]"i"(offsetof(struct loaded_vmcs, host_state.rsp)),
[wordsize]"i"(sizeof(ulong))
: "memory"
);
vm_fail = __vmx_vcpu_run(vmx, (unsigned long *)&vcpu->arch.regs,
vmx->loaded_vmcs->launched);
if (vmx->msr_autoload.host.nr)
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
......
......@@ -44,7 +44,7 @@
* they VM-Fail, whereas a successful VM-Enter + VM-Exit will jump
* to vmx_vmexit.
*/
SYM_FUNC_START(vmx_vmenter)
SYM_FUNC_START_LOCAL(vmx_vmenter)
/* EFLAGS.ZF is set if VMCS.LAUNCHED == 0 */
je 2f
......
......@@ -6744,8 +6744,6 @@ static fastpath_t vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
}
}
bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs, bool launched);
static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
struct vcpu_vmx *vmx)
{
......
......@@ -391,6 +391,7 @@ void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
struct vmx_uret_msr *vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr);
void pt_update_intercept_for_msr(struct kvm_vcpu *vcpu);
void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp);
bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs, bool launched);
int vmx_find_loadstore_msr_slot(struct vmx_msrs *m, u32 msr);
void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu);
void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册