提交 7f7f1ba3 编写于 作者: P Paolo Bonzini

KVM: x86: do not load vmcs12 pages while still in SMM

If the vCPU enters system management mode while running a nested guest,
RSM starts processing the vmentry while still in SMM.  In that case,
however, the pages pointed to by the vmcs12 might be incorrectly
loaded from SMRAM.  To avoid this, delay the handling of the pages
until just before the next vmentry.  This is done with a new request
and a new entry in kvm_x86_ops, which we will be able to reuse for
nested VMX state migration.

Extracted from a patch by Jim Mattson and KarimAllah Ahmed.
Signed-off-by: NPaolo Bonzini <pbonzini@redhat.com>
上级 fa3899ad
...@@ -75,6 +75,7 @@ ...@@ -75,6 +75,7 @@
#define KVM_REQ_HV_EXIT KVM_ARCH_REQ(21) #define KVM_REQ_HV_EXIT KVM_ARCH_REQ(21)
#define KVM_REQ_HV_STIMER KVM_ARCH_REQ(22) #define KVM_REQ_HV_STIMER KVM_ARCH_REQ(22)
#define KVM_REQ_LOAD_EOI_EXITMAP KVM_ARCH_REQ(23) #define KVM_REQ_LOAD_EOI_EXITMAP KVM_ARCH_REQ(23)
#define KVM_REQ_GET_VMCS12_PAGES KVM_ARCH_REQ(24)
#define CR0_RESERVED_BITS \ #define CR0_RESERVED_BITS \
(~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \ (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
...@@ -1085,6 +1086,8 @@ struct kvm_x86_ops { ...@@ -1085,6 +1086,8 @@ struct kvm_x86_ops {
void (*setup_mce)(struct kvm_vcpu *vcpu); void (*setup_mce)(struct kvm_vcpu *vcpu);
void (*get_vmcs12_pages)(struct kvm_vcpu *vcpu);
int (*smi_allowed)(struct kvm_vcpu *vcpu); int (*smi_allowed)(struct kvm_vcpu *vcpu);
int (*pre_enter_smm)(struct kvm_vcpu *vcpu, char *smstate); int (*pre_enter_smm)(struct kvm_vcpu *vcpu, char *smstate);
int (*pre_leave_smm)(struct kvm_vcpu *vcpu, u64 smbase); int (*pre_leave_smm)(struct kvm_vcpu *vcpu, u64 smbase);
......
...@@ -10660,9 +10660,9 @@ static void vmx_inject_page_fault_nested(struct kvm_vcpu *vcpu, ...@@ -10660,9 +10660,9 @@ static void vmx_inject_page_fault_nested(struct kvm_vcpu *vcpu,
static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu, static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12); struct vmcs12 *vmcs12);
static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu, static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
struct vmcs12 *vmcs12)
{ {
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
struct page *page; struct page *page;
u64 hpa; u64 hpa;
...@@ -11774,12 +11774,17 @@ static int check_vmentry_postreqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, ...@@ -11774,12 +11774,17 @@ static int check_vmentry_postreqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
return 0; return 0;
} }
static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu) /*
* If exit_qual is NULL, this is being called from RSM.
* Otherwise it's called from vmlaunch/vmresume.
*/
static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, u32 *exit_qual)
{ {
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
struct vmcs12 *vmcs12 = get_vmcs12(vcpu); struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
u32 exit_qual; bool from_vmentry = !!exit_qual;
int r; u32 dummy_exit_qual;
int r = 0;
enter_guest_mode(vcpu); enter_guest_mode(vcpu);
...@@ -11793,17 +11798,28 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu) ...@@ -11793,17 +11798,28 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu)
vcpu->arch.tsc_offset += vmcs12->tsc_offset; vcpu->arch.tsc_offset += vmcs12->tsc_offset;
r = EXIT_REASON_INVALID_STATE; r = EXIT_REASON_INVALID_STATE;
if (prepare_vmcs02(vcpu, vmcs12, &exit_qual)) if (prepare_vmcs02(vcpu, vmcs12, from_vmentry ? exit_qual : &dummy_exit_qual))
goto fail; goto fail;
nested_get_vmcs12_pages(vcpu, vmcs12); if (from_vmentry) {
nested_get_vmcs12_pages(vcpu);
r = EXIT_REASON_MSR_LOAD_FAIL; r = EXIT_REASON_MSR_LOAD_FAIL;
exit_qual = nested_vmx_load_msr(vcpu, *exit_qual = nested_vmx_load_msr(vcpu,
vmcs12->vm_entry_msr_load_addr, vmcs12->vm_entry_msr_load_addr,
vmcs12->vm_entry_msr_load_count); vmcs12->vm_entry_msr_load_count);
if (exit_qual) if (*exit_qual)
goto fail; goto fail;
} else {
/*
* The MMU is not initialized to point at the right entities yet and
* "get pages" would need to read data from the guest (i.e. we will
* need to perform gpa to hpa translation). Request a call
* to nested_get_vmcs12_pages before the next VM-entry. The MSRs
* have already been set at vmentry time and should not be reset.
*/
kvm_make_request(KVM_REQ_GET_VMCS12_PAGES, vcpu);
}
/* /*
* Note no nested_vmx_succeed or nested_vmx_fail here. At this point * Note no nested_vmx_succeed or nested_vmx_fail here. At this point
...@@ -11818,8 +11834,7 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu) ...@@ -11818,8 +11834,7 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu)
vcpu->arch.tsc_offset -= vmcs12->tsc_offset; vcpu->arch.tsc_offset -= vmcs12->tsc_offset;
leave_guest_mode(vcpu); leave_guest_mode(vcpu);
vmx_switch_vmcs(vcpu, &vmx->vmcs01); vmx_switch_vmcs(vcpu, &vmx->vmcs01);
nested_vmx_entry_failure(vcpu, vmcs12, r, exit_qual); return r;
return 1;
} }
/* /*
...@@ -11896,10 +11911,11 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) ...@@ -11896,10 +11911,11 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
*/ */
vmx->nested.nested_run_pending = 1; vmx->nested.nested_run_pending = 1;
ret = enter_vmx_non_root_mode(vcpu); ret = enter_vmx_non_root_mode(vcpu, &exit_qual);
if (ret) { if (ret) {
nested_vmx_entry_failure(vcpu, vmcs12, ret, exit_qual);
vmx->nested.nested_run_pending = 0; vmx->nested.nested_run_pending = 0;
return ret; return 1;
} }
/* /*
...@@ -12985,7 +13001,7 @@ static int vmx_pre_leave_smm(struct kvm_vcpu *vcpu, u64 smbase) ...@@ -12985,7 +13001,7 @@ static int vmx_pre_leave_smm(struct kvm_vcpu *vcpu, u64 smbase)
if (vmx->nested.smm.guest_mode) { if (vmx->nested.smm.guest_mode) {
vcpu->arch.hflags &= ~HF_SMM_MASK; vcpu->arch.hflags &= ~HF_SMM_MASK;
ret = enter_vmx_non_root_mode(vcpu); ret = enter_vmx_non_root_mode(vcpu, NULL);
vcpu->arch.hflags |= HF_SMM_MASK; vcpu->arch.hflags |= HF_SMM_MASK;
if (ret) if (ret)
return ret; return ret;
...@@ -13134,6 +13150,8 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = { ...@@ -13134,6 +13150,8 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
.setup_mce = vmx_setup_mce, .setup_mce = vmx_setup_mce,
.get_vmcs12_pages = nested_get_vmcs12_pages,
.smi_allowed = vmx_smi_allowed, .smi_allowed = vmx_smi_allowed,
.pre_enter_smm = vmx_pre_enter_smm, .pre_enter_smm = vmx_pre_enter_smm,
.pre_leave_smm = vmx_pre_leave_smm, .pre_leave_smm = vmx_pre_leave_smm,
......
...@@ -7260,6 +7260,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) ...@@ -7260,6 +7260,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
bool req_immediate_exit = false; bool req_immediate_exit = false;
if (kvm_request_pending(vcpu)) { if (kvm_request_pending(vcpu)) {
if (kvm_check_request(KVM_REQ_GET_VMCS12_PAGES, vcpu))
kvm_x86_ops->get_vmcs12_pages(vcpu);
if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu))
kvm_mmu_unload(vcpu); kvm_mmu_unload(vcpu);
if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu)) if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu))
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册