提交 384bb783 编写于 作者: J Jan Kiszka 提交者: Gleb Natapov

KVM: nVMX: Validate EFER values for VM_ENTRY/EXIT_LOAD_IA32_EFER

As we may emulate the loading of EFER on VM-entry and VM-exit, implement
the checks that VMX performs on the guest and host values on vmlaunch/
vmresume. Factor out kvm_valid_efer for this purpose which checks for
set reserved bits.
Signed-off-by: NJan Kiszka <jan.kiszka@siemens.com>
Reviewed-by: NPaolo Bonzini <pbonzini@redhat.com>
Signed-off-by: NGleb Natapov <gleb@redhat.com>
上级 ea8ceb83
...@@ -809,6 +809,7 @@ static inline int emulate_instruction(struct kvm_vcpu *vcpu, ...@@ -809,6 +809,7 @@ static inline int emulate_instruction(struct kvm_vcpu *vcpu,
} }
void kvm_enable_efer_bits(u64); void kvm_enable_efer_bits(u64);
bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer);
int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data); int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data);
int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr); int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr);
......
...@@ -7558,6 +7558,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) ...@@ -7558,6 +7558,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
int cpu; int cpu;
struct loaded_vmcs *vmcs02; struct loaded_vmcs *vmcs02;
bool ia32e;
if (!nested_vmx_check_permission(vcpu) || if (!nested_vmx_check_permission(vcpu) ||
!nested_vmx_check_vmcs12(vcpu)) !nested_vmx_check_vmcs12(vcpu))
...@@ -7648,6 +7649,45 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) ...@@ -7648,6 +7649,45 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
return 1; return 1;
} }
/*
* If the load IA32_EFER VM-entry control is 1, the following checks
* are performed on the field for the IA32_EFER MSR:
* - Bits reserved in the IA32_EFER MSR must be 0.
* - Bit 10 (corresponding to IA32_EFER.LMA) must equal the value of
* the IA-32e mode guest VM-exit control. It must also be identical
* to bit 8 (LME) if bit 31 in the CR0 field (corresponding to
* CR0.PG) is 1.
*/
if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER) {
ia32e = (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) != 0;
if (!kvm_valid_efer(vcpu, vmcs12->guest_ia32_efer) ||
ia32e != !!(vmcs12->guest_ia32_efer & EFER_LMA) ||
((vmcs12->guest_cr0 & X86_CR0_PG) &&
ia32e != !!(vmcs12->guest_ia32_efer & EFER_LME))) {
nested_vmx_entry_failure(vcpu, vmcs12,
EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT);
return 1;
}
}
/*
* If the load IA32_EFER VM-exit control is 1, bits reserved in the
* IA32_EFER MSR must be 0 in the field for that register. In addition,
* the values of the LMA and LME bits in the field must each be that of
* the host address-space size VM-exit control.
*/
if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) {
ia32e = (vmcs12->vm_exit_controls &
VM_EXIT_HOST_ADDR_SPACE_SIZE) != 0;
if (!kvm_valid_efer(vcpu, vmcs12->host_ia32_efer) ||
ia32e != !!(vmcs12->host_ia32_efer & EFER_LMA) ||
ia32e != !!(vmcs12->host_ia32_efer & EFER_LME)) {
nested_vmx_entry_failure(vcpu, vmcs12,
EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT);
return 1;
}
}
/* /*
* We're finally done with prerequisite checking, and can start with * We're finally done with prerequisite checking, and can start with
* the nested entry. * the nested entry.
......
...@@ -845,23 +845,17 @@ static const u32 emulated_msrs[] = { ...@@ -845,23 +845,17 @@ static const u32 emulated_msrs[] = {
MSR_IA32_MCG_CTL, MSR_IA32_MCG_CTL,
}; };
static int set_efer(struct kvm_vcpu *vcpu, u64 efer) bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
{ {
u64 old_efer = vcpu->arch.efer;
if (efer & efer_reserved_bits) if (efer & efer_reserved_bits)
return 1; return false;
if (is_paging(vcpu)
&& (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
return 1;
if (efer & EFER_FFXSR) { if (efer & EFER_FFXSR) {
struct kvm_cpuid_entry2 *feat; struct kvm_cpuid_entry2 *feat;
feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT)))
return 1; return false;
} }
if (efer & EFER_SVME) { if (efer & EFER_SVME) {
...@@ -869,9 +863,24 @@ static int set_efer(struct kvm_vcpu *vcpu, u64 efer) ...@@ -869,9 +863,24 @@ static int set_efer(struct kvm_vcpu *vcpu, u64 efer)
feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM)))
return 1; return false;
} }
return true;
}
EXPORT_SYMBOL_GPL(kvm_valid_efer);
static int set_efer(struct kvm_vcpu *vcpu, u64 efer)
{
u64 old_efer = vcpu->arch.efer;
if (!kvm_valid_efer(vcpu, efer))
return 1;
if (is_paging(vcpu)
&& (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
return 1;
efer &= ~EFER_LMA; efer &= ~EFER_LMA;
efer |= vcpu->arch.efer & EFER_LMA; efer |= vcpu->arch.efer & EFER_LMA;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册