提交 09abb5e3 编写于 作者: S Sean Christopherson 提交者: Paolo Bonzini

KVM: nVMX: call kvm_skip_emulated_instruction in nested_vmx_{fail,succeed}

... as every invocation of nested_vmx_{fail,succeed} is immediately
followed by a call to kvm_skip_emulated_instruction().  This saves
a bit of code and eliminates some silly paths, e.g. nested_vmx_run()
ended up with a goto label purely used to call and return
kvm_skip_emulated_instruction().
Signed-off-by: NSean Christopherson <sean.j.christopherson@intel.com>
Reviewed-by: NJim Mattson <jmattson@google.com>
Signed-off-by: NPaolo Bonzini <pbonzini@redhat.com>
上级 c37a6116
...@@ -8060,35 +8060,37 @@ static int handle_monitor(struct kvm_vcpu *vcpu) ...@@ -8060,35 +8060,37 @@ static int handle_monitor(struct kvm_vcpu *vcpu)
/* /*
* The following 3 functions, nested_vmx_succeed()/failValid()/failInvalid(), * The following 3 functions, nested_vmx_succeed()/failValid()/failInvalid(),
* set the success or error code of an emulated VMX instruction, as specified * set the success or error code of an emulated VMX instruction (as specified
* by Vol 2B, VMX Instruction Reference, "Conventions". * by Vol 2B, VMX Instruction Reference, "Conventions"), and skip the emulated
* instruction.
*/ */
static void nested_vmx_succeed(struct kvm_vcpu *vcpu) static int nested_vmx_succeed(struct kvm_vcpu *vcpu)
{ {
vmx_set_rflags(vcpu, vmx_get_rflags(vcpu) vmx_set_rflags(vcpu, vmx_get_rflags(vcpu)
& ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF)); X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF));
return kvm_skip_emulated_instruction(vcpu);
} }
static void nested_vmx_failInvalid(struct kvm_vcpu *vcpu) static int nested_vmx_failInvalid(struct kvm_vcpu *vcpu)
{ {
vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu) vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
& ~(X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF | & ~(X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
X86_EFLAGS_SF | X86_EFLAGS_OF)) X86_EFLAGS_SF | X86_EFLAGS_OF))
| X86_EFLAGS_CF); | X86_EFLAGS_CF);
return kvm_skip_emulated_instruction(vcpu);
} }
static void nested_vmx_failValid(struct kvm_vcpu *vcpu, static int nested_vmx_failValid(struct kvm_vcpu *vcpu,
u32 vm_instruction_error) u32 vm_instruction_error)
{ {
if (to_vmx(vcpu)->nested.current_vmptr == -1ull) { /*
/* * failValid writes the error number to the current VMCS, which
* failValid writes the error number to the current VMCS, which * can't be done if there isn't a current VMCS.
* can't be done there isn't a current VMCS. */
*/ if (to_vmx(vcpu)->nested.current_vmptr == -1ull)
nested_vmx_failInvalid(vcpu); return nested_vmx_failInvalid(vcpu);
return;
}
vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu) vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
& ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
X86_EFLAGS_SF | X86_EFLAGS_OF)) X86_EFLAGS_SF | X86_EFLAGS_OF))
...@@ -8098,6 +8100,7 @@ static void nested_vmx_failValid(struct kvm_vcpu *vcpu, ...@@ -8098,6 +8100,7 @@ static void nested_vmx_failValid(struct kvm_vcpu *vcpu,
* We don't need to force a shadow sync because * We don't need to force a shadow sync because
* VM_INSTRUCTION_ERROR is not shadowed * VM_INSTRUCTION_ERROR is not shadowed
*/ */
return kvm_skip_emulated_instruction(vcpu);
} }
static void nested_vmx_abort(struct kvm_vcpu *vcpu, u32 indicator) static void nested_vmx_abort(struct kvm_vcpu *vcpu, u32 indicator)
...@@ -8339,10 +8342,9 @@ static int handle_vmon(struct kvm_vcpu *vcpu) ...@@ -8339,10 +8342,9 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
return 1; return 1;
} }
if (vmx->nested.vmxon) { if (vmx->nested.vmxon)
nested_vmx_failValid(vcpu, VMXERR_VMXON_IN_VMX_ROOT_OPERATION); return nested_vmx_failValid(vcpu,
return kvm_skip_emulated_instruction(vcpu); VMXERR_VMXON_IN_VMX_ROOT_OPERATION);
}
if ((vmx->msr_ia32_feature_control & VMXON_NEEDED_FEATURES) if ((vmx->msr_ia32_feature_control & VMXON_NEEDED_FEATURES)
!= VMXON_NEEDED_FEATURES) { != VMXON_NEEDED_FEATURES) {
...@@ -8361,21 +8363,17 @@ static int handle_vmon(struct kvm_vcpu *vcpu) ...@@ -8361,21 +8363,17 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
* Note - IA32_VMX_BASIC[48] will never be 1 for the nested case; * Note - IA32_VMX_BASIC[48] will never be 1 for the nested case;
* which replaces physical address width with 32 * which replaces physical address width with 32
*/ */
if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) { if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu)))
nested_vmx_failInvalid(vcpu); return nested_vmx_failInvalid(vcpu);
return kvm_skip_emulated_instruction(vcpu);
}
page = kvm_vcpu_gpa_to_page(vcpu, vmptr); page = kvm_vcpu_gpa_to_page(vcpu, vmptr);
if (is_error_page(page)) { if (is_error_page(page))
nested_vmx_failInvalid(vcpu); return nested_vmx_failInvalid(vcpu);
return kvm_skip_emulated_instruction(vcpu);
}
if (*(u32 *)kmap(page) != VMCS12_REVISION) { if (*(u32 *)kmap(page) != VMCS12_REVISION) {
kunmap(page); kunmap(page);
kvm_release_page_clean(page); kvm_release_page_clean(page);
nested_vmx_failInvalid(vcpu); return nested_vmx_failInvalid(vcpu);
return kvm_skip_emulated_instruction(vcpu);
} }
kunmap(page); kunmap(page);
kvm_release_page_clean(page); kvm_release_page_clean(page);
...@@ -8385,8 +8383,7 @@ static int handle_vmon(struct kvm_vcpu *vcpu) ...@@ -8385,8 +8383,7 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
if (ret) if (ret)
return ret; return ret;
nested_vmx_succeed(vcpu); return nested_vmx_succeed(vcpu);
return kvm_skip_emulated_instruction(vcpu);
} }
/* /*
...@@ -8486,8 +8483,7 @@ static int handle_vmoff(struct kvm_vcpu *vcpu) ...@@ -8486,8 +8483,7 @@ static int handle_vmoff(struct kvm_vcpu *vcpu)
if (!nested_vmx_check_permission(vcpu)) if (!nested_vmx_check_permission(vcpu))
return 1; return 1;
free_nested(to_vmx(vcpu)); free_nested(to_vmx(vcpu));
nested_vmx_succeed(vcpu); return nested_vmx_succeed(vcpu);
return kvm_skip_emulated_instruction(vcpu);
} }
/* Emulate the VMCLEAR instruction */ /* Emulate the VMCLEAR instruction */
...@@ -8503,15 +8499,13 @@ static int handle_vmclear(struct kvm_vcpu *vcpu) ...@@ -8503,15 +8499,13 @@ static int handle_vmclear(struct kvm_vcpu *vcpu)
if (nested_vmx_get_vmptr(vcpu, &vmptr)) if (nested_vmx_get_vmptr(vcpu, &vmptr))
return 1; return 1;
if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) { if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu)))
nested_vmx_failValid(vcpu, VMXERR_VMCLEAR_INVALID_ADDRESS); return nested_vmx_failValid(vcpu,
return kvm_skip_emulated_instruction(vcpu); VMXERR_VMCLEAR_INVALID_ADDRESS);
}
if (vmptr == vmx->nested.vmxon_ptr) { if (vmptr == vmx->nested.vmxon_ptr)
nested_vmx_failValid(vcpu, VMXERR_VMCLEAR_VMXON_POINTER); return nested_vmx_failValid(vcpu,
return kvm_skip_emulated_instruction(vcpu); VMXERR_VMCLEAR_VMXON_POINTER);
}
if (vmptr == vmx->nested.current_vmptr) if (vmptr == vmx->nested.current_vmptr)
nested_release_vmcs12(vmx); nested_release_vmcs12(vmx);
...@@ -8520,8 +8514,7 @@ static int handle_vmclear(struct kvm_vcpu *vcpu) ...@@ -8520,8 +8514,7 @@ static int handle_vmclear(struct kvm_vcpu *vcpu)
vmptr + offsetof(struct vmcs12, launch_state), vmptr + offsetof(struct vmcs12, launch_state),
&zero, sizeof(zero)); &zero, sizeof(zero));
nested_vmx_succeed(vcpu); return nested_vmx_succeed(vcpu);
return kvm_skip_emulated_instruction(vcpu);
} }
static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch); static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch);
...@@ -8677,20 +8670,6 @@ static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx) ...@@ -8677,20 +8670,6 @@ static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)
vmcs_load(vmx->loaded_vmcs->vmcs); vmcs_load(vmx->loaded_vmcs->vmcs);
} }
/*
* VMX instructions which assume a current vmcs12 (i.e., that VMPTRLD was
* used before) all generate the same failure when it is missing.
*/
static int nested_vmx_check_vmcs12(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
if (vmx->nested.current_vmptr == -1ull) {
nested_vmx_failInvalid(vcpu);
return 0;
}
return 1;
}
static int handle_vmread(struct kvm_vcpu *vcpu) static int handle_vmread(struct kvm_vcpu *vcpu)
{ {
unsigned long field; unsigned long field;
...@@ -8703,8 +8682,8 @@ static int handle_vmread(struct kvm_vcpu *vcpu) ...@@ -8703,8 +8682,8 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
if (!nested_vmx_check_permission(vcpu)) if (!nested_vmx_check_permission(vcpu))
return 1; return 1;
if (!nested_vmx_check_vmcs12(vcpu)) if (to_vmx(vcpu)->nested.current_vmptr == -1ull)
return kvm_skip_emulated_instruction(vcpu); return nested_vmx_failInvalid(vcpu);
if (!is_guest_mode(vcpu)) if (!is_guest_mode(vcpu))
vmcs12 = get_vmcs12(vcpu); vmcs12 = get_vmcs12(vcpu);
...@@ -8713,20 +8692,18 @@ static int handle_vmread(struct kvm_vcpu *vcpu) ...@@ -8713,20 +8692,18 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
* When vmcs->vmcs_link_pointer is -1ull, any VMREAD * When vmcs->vmcs_link_pointer is -1ull, any VMREAD
* to shadowed-field sets the ALU flags for VMfailInvalid. * to shadowed-field sets the ALU flags for VMfailInvalid.
*/ */
if (get_vmcs12(vcpu)->vmcs_link_pointer == -1ull) { if (get_vmcs12(vcpu)->vmcs_link_pointer == -1ull)
nested_vmx_failInvalid(vcpu); return nested_vmx_failInvalid(vcpu);
return kvm_skip_emulated_instruction(vcpu);
}
vmcs12 = get_shadow_vmcs12(vcpu); vmcs12 = get_shadow_vmcs12(vcpu);
} }
/* Decode instruction info and find the field to read */ /* Decode instruction info and find the field to read */
field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf)); field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
/* Read the field, zero-extended to a u64 field_value */ /* Read the field, zero-extended to a u64 field_value */
if (vmcs12_read_any(vmcs12, field, &field_value) < 0) { if (vmcs12_read_any(vmcs12, field, &field_value) < 0)
nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT); return nested_vmx_failValid(vcpu,
return kvm_skip_emulated_instruction(vcpu); VMXERR_UNSUPPORTED_VMCS_COMPONENT);
}
/* /*
* Now copy part of this value to register or memory, as requested. * Now copy part of this value to register or memory, as requested.
* Note that the number of bits actually copied is 32 or 64 depending * Note that the number of bits actually copied is 32 or 64 depending
...@@ -8744,8 +8721,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu) ...@@ -8744,8 +8721,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
(is_long_mode(vcpu) ? 8 : 4), NULL); (is_long_mode(vcpu) ? 8 : 4), NULL);
} }
nested_vmx_succeed(vcpu); return nested_vmx_succeed(vcpu);
return kvm_skip_emulated_instruction(vcpu);
} }
...@@ -8770,8 +8746,8 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu) ...@@ -8770,8 +8746,8 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
if (!nested_vmx_check_permission(vcpu)) if (!nested_vmx_check_permission(vcpu))
return 1; return 1;
if (!nested_vmx_check_vmcs12(vcpu)) if (vmx->nested.current_vmptr == -1ull)
return kvm_skip_emulated_instruction(vcpu); return nested_vmx_failInvalid(vcpu);
if (vmx_instruction_info & (1u << 10)) if (vmx_instruction_info & (1u << 10))
field_value = kvm_register_readl(vcpu, field_value = kvm_register_readl(vcpu,
...@@ -8794,11 +8770,9 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu) ...@@ -8794,11 +8770,9 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
* VMCS," then the "read-only" fields are actually read/write. * VMCS," then the "read-only" fields are actually read/write.
*/ */
if (vmcs_field_readonly(field) && if (vmcs_field_readonly(field) &&
!nested_cpu_has_vmwrite_any_field(vcpu)) { !nested_cpu_has_vmwrite_any_field(vcpu))
nested_vmx_failValid(vcpu, return nested_vmx_failValid(vcpu,
VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT); VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT);
return kvm_skip_emulated_instruction(vcpu);
}
if (!is_guest_mode(vcpu)) if (!is_guest_mode(vcpu))
vmcs12 = get_vmcs12(vcpu); vmcs12 = get_vmcs12(vcpu);
...@@ -8807,18 +8781,14 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu) ...@@ -8807,18 +8781,14 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
* When vmcs->vmcs_link_pointer is -1ull, any VMWRITE * When vmcs->vmcs_link_pointer is -1ull, any VMWRITE
* to shadowed-field sets the ALU flags for VMfailInvalid. * to shadowed-field sets the ALU flags for VMfailInvalid.
*/ */
if (get_vmcs12(vcpu)->vmcs_link_pointer == -1ull) { if (get_vmcs12(vcpu)->vmcs_link_pointer == -1ull)
nested_vmx_failInvalid(vcpu); return nested_vmx_failInvalid(vcpu);
return kvm_skip_emulated_instruction(vcpu);
}
vmcs12 = get_shadow_vmcs12(vcpu); vmcs12 = get_shadow_vmcs12(vcpu);
} }
if (vmcs12_write_any(vmcs12, field, field_value) < 0) { if (vmcs12_write_any(vmcs12, field, field_value) < 0)
nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT); return nested_vmx_failValid(vcpu,
return kvm_skip_emulated_instruction(vcpu); VMXERR_UNSUPPORTED_VMCS_COMPONENT);
}
/* /*
* Do not track vmcs12 dirty-state if in guest-mode * Do not track vmcs12 dirty-state if in guest-mode
...@@ -8840,8 +8810,7 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu) ...@@ -8840,8 +8810,7 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
} }
} }
nested_vmx_succeed(vcpu); return nested_vmx_succeed(vcpu);
return kvm_skip_emulated_instruction(vcpu);
} }
static void set_current_vmptr(struct vcpu_vmx *vmx, gpa_t vmptr) static void set_current_vmptr(struct vcpu_vmx *vmx, gpa_t vmptr)
...@@ -8869,33 +8838,29 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu) ...@@ -8869,33 +8838,29 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
if (nested_vmx_get_vmptr(vcpu, &vmptr)) if (nested_vmx_get_vmptr(vcpu, &vmptr))
return 1; return 1;
if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) { if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu)))
nested_vmx_failValid(vcpu, VMXERR_VMPTRLD_INVALID_ADDRESS); return nested_vmx_failValid(vcpu,
return kvm_skip_emulated_instruction(vcpu); VMXERR_VMPTRLD_INVALID_ADDRESS);
}
if (vmptr == vmx->nested.vmxon_ptr) { if (vmptr == vmx->nested.vmxon_ptr)
nested_vmx_failValid(vcpu, VMXERR_VMPTRLD_VMXON_POINTER); return nested_vmx_failValid(vcpu,
return kvm_skip_emulated_instruction(vcpu); VMXERR_VMPTRLD_VMXON_POINTER);
}
if (vmx->nested.current_vmptr != vmptr) { if (vmx->nested.current_vmptr != vmptr) {
struct vmcs12 *new_vmcs12; struct vmcs12 *new_vmcs12;
struct page *page; struct page *page;
page = kvm_vcpu_gpa_to_page(vcpu, vmptr); page = kvm_vcpu_gpa_to_page(vcpu, vmptr);
if (is_error_page(page)) { if (is_error_page(page))
nested_vmx_failInvalid(vcpu); return nested_vmx_failInvalid(vcpu);
return kvm_skip_emulated_instruction(vcpu);
}
new_vmcs12 = kmap(page); new_vmcs12 = kmap(page);
if (new_vmcs12->hdr.revision_id != VMCS12_REVISION || if (new_vmcs12->hdr.revision_id != VMCS12_REVISION ||
(new_vmcs12->hdr.shadow_vmcs && (new_vmcs12->hdr.shadow_vmcs &&
!nested_cpu_has_vmx_shadow_vmcs(vcpu))) { !nested_cpu_has_vmx_shadow_vmcs(vcpu))) {
kunmap(page); kunmap(page);
kvm_release_page_clean(page); kvm_release_page_clean(page);
nested_vmx_failValid(vcpu, return nested_vmx_failValid(vcpu,
VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID); VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
return kvm_skip_emulated_instruction(vcpu);
} }
nested_release_vmcs12(vmx); nested_release_vmcs12(vmx);
...@@ -8910,8 +8875,7 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu) ...@@ -8910,8 +8875,7 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
set_current_vmptr(vmx, vmptr); set_current_vmptr(vmx, vmptr);
} }
nested_vmx_succeed(vcpu); return nested_vmx_succeed(vcpu);
return kvm_skip_emulated_instruction(vcpu);
} }
/* Emulate the VMPTRST instruction */ /* Emulate the VMPTRST instruction */
...@@ -8934,8 +8898,7 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu) ...@@ -8934,8 +8898,7 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu)
kvm_inject_page_fault(vcpu, &e); kvm_inject_page_fault(vcpu, &e);
return 1; return 1;
} }
nested_vmx_succeed(vcpu); return nested_vmx_succeed(vcpu);
return kvm_skip_emulated_instruction(vcpu);
} }
/* Emulate the INVEPT instruction */ /* Emulate the INVEPT instruction */
...@@ -8965,11 +8928,9 @@ static int handle_invept(struct kvm_vcpu *vcpu) ...@@ -8965,11 +8928,9 @@ static int handle_invept(struct kvm_vcpu *vcpu)
types = (vmx->nested.msrs.ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6; types = (vmx->nested.msrs.ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6;
if (type >= 32 || !(types & (1 << type))) { if (type >= 32 || !(types & (1 << type)))
nested_vmx_failValid(vcpu, return nested_vmx_failValid(vcpu,
VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
return kvm_skip_emulated_instruction(vcpu);
}
/* According to the Intel VMX instruction reference, the memory /* According to the Intel VMX instruction reference, the memory
* operand is read even if it isn't needed (e.g., for type==global) * operand is read even if it isn't needed (e.g., for type==global)
...@@ -8991,14 +8952,13 @@ static int handle_invept(struct kvm_vcpu *vcpu) ...@@ -8991,14 +8952,13 @@ static int handle_invept(struct kvm_vcpu *vcpu)
case VMX_EPT_EXTENT_CONTEXT: case VMX_EPT_EXTENT_CONTEXT:
kvm_mmu_sync_roots(vcpu); kvm_mmu_sync_roots(vcpu);
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
nested_vmx_succeed(vcpu);
break; break;
default: default:
BUG_ON(1); BUG_ON(1);
break; break;
} }
return kvm_skip_emulated_instruction(vcpu); return nested_vmx_succeed(vcpu);
} }
static u16 nested_get_vpid02(struct kvm_vcpu *vcpu) static u16 nested_get_vpid02(struct kvm_vcpu *vcpu)
...@@ -9037,11 +8997,9 @@ static int handle_invvpid(struct kvm_vcpu *vcpu) ...@@ -9037,11 +8997,9 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
types = (vmx->nested.msrs.vpid_caps & types = (vmx->nested.msrs.vpid_caps &
VMX_VPID_EXTENT_SUPPORTED_MASK) >> 8; VMX_VPID_EXTENT_SUPPORTED_MASK) >> 8;
if (type >= 32 || !(types & (1 << type))) { if (type >= 32 || !(types & (1 << type)))
nested_vmx_failValid(vcpu, return nested_vmx_failValid(vcpu,
VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
return kvm_skip_emulated_instruction(vcpu);
}
/* according to the intel vmx instruction reference, the memory /* according to the intel vmx instruction reference, the memory
* operand is read even if it isn't needed (e.g., for type==global) * operand is read even if it isn't needed (e.g., for type==global)
...@@ -9053,21 +9011,17 @@ static int handle_invvpid(struct kvm_vcpu *vcpu) ...@@ -9053,21 +9011,17 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
kvm_inject_page_fault(vcpu, &e); kvm_inject_page_fault(vcpu, &e);
return 1; return 1;
} }
if (operand.vpid >> 16) { if (operand.vpid >> 16)
nested_vmx_failValid(vcpu, return nested_vmx_failValid(vcpu,
VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
return kvm_skip_emulated_instruction(vcpu);
}
vpid02 = nested_get_vpid02(vcpu); vpid02 = nested_get_vpid02(vcpu);
switch (type) { switch (type) {
case VMX_VPID_EXTENT_INDIVIDUAL_ADDR: case VMX_VPID_EXTENT_INDIVIDUAL_ADDR:
if (!operand.vpid || if (!operand.vpid ||
is_noncanonical_address(operand.gla, vcpu)) { is_noncanonical_address(operand.gla, vcpu))
nested_vmx_failValid(vcpu, return nested_vmx_failValid(vcpu,
VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
return kvm_skip_emulated_instruction(vcpu);
}
if (cpu_has_vmx_invvpid_individual_addr()) { if (cpu_has_vmx_invvpid_individual_addr()) {
__invvpid(VMX_VPID_EXTENT_INDIVIDUAL_ADDR, __invvpid(VMX_VPID_EXTENT_INDIVIDUAL_ADDR,
vpid02, operand.gla); vpid02, operand.gla);
...@@ -9076,11 +9030,9 @@ static int handle_invvpid(struct kvm_vcpu *vcpu) ...@@ -9076,11 +9030,9 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
break; break;
case VMX_VPID_EXTENT_SINGLE_CONTEXT: case VMX_VPID_EXTENT_SINGLE_CONTEXT:
case VMX_VPID_EXTENT_SINGLE_NON_GLOBAL: case VMX_VPID_EXTENT_SINGLE_NON_GLOBAL:
if (!operand.vpid) { if (!operand.vpid)
nested_vmx_failValid(vcpu, return nested_vmx_failValid(vcpu,
VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
return kvm_skip_emulated_instruction(vcpu);
}
__vmx_flush_tlb(vcpu, vpid02, false); __vmx_flush_tlb(vcpu, vpid02, false);
break; break;
case VMX_VPID_EXTENT_ALL_CONTEXT: case VMX_VPID_EXTENT_ALL_CONTEXT:
...@@ -9091,9 +9043,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu) ...@@ -9091,9 +9043,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
return kvm_skip_emulated_instruction(vcpu); return kvm_skip_emulated_instruction(vcpu);
} }
nested_vmx_succeed(vcpu); return nested_vmx_succeed(vcpu);
return kvm_skip_emulated_instruction(vcpu);
} }
static int handle_invpcid(struct kvm_vcpu *vcpu) static int handle_invpcid(struct kvm_vcpu *vcpu)
...@@ -12806,8 +12756,8 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) ...@@ -12806,8 +12756,8 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
if (!nested_vmx_check_permission(vcpu)) if (!nested_vmx_check_permission(vcpu))
return 1; return 1;
if (!nested_vmx_check_vmcs12(vcpu)) if (vmx->nested.current_vmptr == -1ull)
goto out; return nested_vmx_failInvalid(vcpu);
vmcs12 = get_vmcs12(vcpu); vmcs12 = get_vmcs12(vcpu);
...@@ -12817,10 +12767,8 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) ...@@ -12817,10 +12767,8 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
* rather than RFLAGS.ZF, and no error number is stored to the * rather than RFLAGS.ZF, and no error number is stored to the
* VM-instruction error field. * VM-instruction error field.
*/ */
if (vmcs12->hdr.shadow_vmcs) { if (vmcs12->hdr.shadow_vmcs)
nested_vmx_failInvalid(vcpu); return nested_vmx_failInvalid(vcpu);
goto out;
}
if (enable_shadow_vmcs) if (enable_shadow_vmcs)
copy_shadow_to_vmcs12(vmx); copy_shadow_to_vmcs12(vmx);
...@@ -12835,24 +12783,18 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) ...@@ -12835,24 +12783,18 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
* for misconfigurations which will anyway be caught by the processor * for misconfigurations which will anyway be caught by the processor
* when using the merged vmcs02. * when using the merged vmcs02.
*/ */
if (interrupt_shadow & KVM_X86_SHADOW_INT_MOV_SS) { if (interrupt_shadow & KVM_X86_SHADOW_INT_MOV_SS)
nested_vmx_failValid(vcpu, return nested_vmx_failValid(vcpu,
VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS); VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS);
goto out;
}
if (vmcs12->launch_state == launch) { if (vmcs12->launch_state == launch)
nested_vmx_failValid(vcpu, return nested_vmx_failValid(vcpu,
launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS
: VMXERR_VMRESUME_NONLAUNCHED_VMCS); : VMXERR_VMRESUME_NONLAUNCHED_VMCS);
goto out;
}
ret = check_vmentry_prereqs(vcpu, vmcs12); ret = check_vmentry_prereqs(vcpu, vmcs12);
if (ret) { if (ret)
nested_vmx_failValid(vcpu, ret); return nested_vmx_failValid(vcpu, ret);
goto out;
}
/* /*
* We're finally done with prerequisite checking, and can start with * We're finally done with prerequisite checking, and can start with
...@@ -12891,9 +12833,6 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) ...@@ -12891,9 +12833,6 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
return kvm_vcpu_halt(vcpu); return kvm_vcpu_halt(vcpu);
} }
return 1; return 1;
out:
return kvm_skip_emulated_instruction(vcpu);
} }
/* /*
...@@ -13622,7 +13561,7 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, ...@@ -13622,7 +13561,7 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
return; return;
} }
/* /*
* After an early L2 VM-entry failure, we're now back * After an early L2 VM-entry failure, we're now back
* in L1 which thinks it just finished a VMLAUNCH or * in L1 which thinks it just finished a VMLAUNCH or
...@@ -13630,9 +13569,7 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, ...@@ -13630,9 +13569,7 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
* flag and the VM-instruction error field of the VMCS * flag and the VM-instruction error field of the VMCS
* accordingly, and skip the emulated instruction. * accordingly, and skip the emulated instruction.
*/ */
nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); (void)nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
kvm_skip_emulated_instruction(vcpu);
/* /*
* Restore L1's host state to KVM's software model. We're here * Restore L1's host state to KVM's software model. We're here
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册