提交 16fb9a46 编写于 作者: S Sean Christopherson 提交者: Paolo Bonzini

KVM: nVMX: do early preparation of vmcs02 before check_vmentry_postreqs()

In anticipation of using vmcs02 to do early consistency checks, move
the early preparation of vmcs02 prior to checking the postreqs.  The
downside of this approach is that we'll unnecessary load vmcs02 in
the case that check_vmentry_postreqs() fails, but that is essentially
our slow path anyways (not actually slow, but it's the path we don't
really care about optimizing).
Signed-off-by: NSean Christopherson <sean.j.christopherson@intel.com>
Reviewed-by: NJim Mattson <jmattson@google.com>
Signed-off-by: NPaolo Bonzini <pbonzini@redhat.com>
上级 9d6105b2
...@@ -12701,11 +12701,6 @@ static int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, ...@@ -12701,11 +12701,6 @@ static int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
if (likely(!evaluate_pending_interrupts) && kvm_vcpu_apicv_active(vcpu)) if (likely(!evaluate_pending_interrupts) && kvm_vcpu_apicv_active(vcpu))
evaluate_pending_interrupts |= vmx_has_apicv_interrupt(vcpu); evaluate_pending_interrupts |= vmx_has_apicv_interrupt(vcpu);
if (from_vmentry && check_vmentry_postreqs(vcpu, vmcs12, &exit_qual))
goto vmentry_fail_vmexit;
enter_guest_mode(vcpu);
if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS))
vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL); vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
if (kvm_mpx_supported() && if (kvm_mpx_supported() &&
...@@ -12714,17 +12709,23 @@ static int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, ...@@ -12714,17 +12709,23 @@ static int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02); vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02);
prepare_vmcs02_early(vmx, vmcs12);
if (from_vmentry) {
nested_get_vmcs12_pages(vcpu);
if (check_vmentry_postreqs(vcpu, vmcs12, &exit_qual))
goto vmentry_fail_vmexit;
}
enter_guest_mode(vcpu);
if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING) if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
vcpu->arch.tsc_offset += vmcs12->tsc_offset; vcpu->arch.tsc_offset += vmcs12->tsc_offset;
prepare_vmcs02_early(vmx, vmcs12);
if (prepare_vmcs02(vcpu, vmcs12, &exit_qual)) if (prepare_vmcs02(vcpu, vmcs12, &exit_qual))
goto vmentry_fail_vmexit_guest_mode; goto vmentry_fail_vmexit_guest_mode;
if (from_vmentry) { if (from_vmentry) {
nested_get_vmcs12_pages(vcpu);
exit_reason = EXIT_REASON_MSR_LOAD_FAIL; exit_reason = EXIT_REASON_MSR_LOAD_FAIL;
exit_qual = nested_vmx_load_msr(vcpu, exit_qual = nested_vmx_load_msr(vcpu,
vmcs12->vm_entry_msr_load_addr, vmcs12->vm_entry_msr_load_addr,
...@@ -12776,12 +12777,13 @@ static int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, ...@@ -12776,12 +12777,13 @@ static int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING) if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
vcpu->arch.tsc_offset -= vmcs12->tsc_offset; vcpu->arch.tsc_offset -= vmcs12->tsc_offset;
leave_guest_mode(vcpu); leave_guest_mode(vcpu);
vmentry_fail_vmexit:
vmx_switch_vmcs(vcpu, &vmx->vmcs01); vmx_switch_vmcs(vcpu, &vmx->vmcs01);
if (!from_vmentry) if (!from_vmentry)
return 1; return 1;
vmentry_fail_vmexit:
load_vmcs12_host_state(vcpu, vmcs12); load_vmcs12_host_state(vcpu, vmcs12);
vmcs12->vm_exit_reason = exit_reason | VMX_EXIT_REASONS_FAILED_VMENTRY; vmcs12->vm_exit_reason = exit_reason | VMX_EXIT_REASONS_FAILED_VMENTRY;
vmcs12->exit_qualification = exit_qual; vmcs12->exit_qualification = exit_qual;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册