提交 3899152c 编写于 作者: D David Matlack 提交者: Paolo Bonzini

KVM: nVMX: fix checks on CR{0,4} during virtual VMX operation

KVM emulates MSR_IA32_VMX_CR{0,4}_FIXED1 with the value -1ULL, meaning
all CR0 and CR4 bits are allowed to be 1 during VMX operation.

This does not match real hardware, which disallows the high 32 bits of
CR0 to be 1, and disallows reserved bits of CR4 to be 1 (including bits
which are defined in the SDM but missing according to CPUID). A guest
can induce a VM-entry failure by setting these bits in GUEST_CR0 and
GUEST_CR4, despite MSR_IA32_VMX_CR{0,4}_FIXED1 indicating they are
valid.

Since KVM has allowed all bits to be 1 in CR0 and CR4, the existing
checks on these registers do not verify must-be-0 bits. Fix these checks
to identify must-be-0 bits according to MSR_IA32_VMX_CR{0,4}_FIXED1.

This patch should introduce no change in behavior in KVM, since these
MSRs are still -1ULL.
Signed-off-by: NDavid Matlack <dmatlack@google.com>
Signed-off-by: NRadim Krčmář <rkrcmar@redhat.com>
上级 62cc6b9d
...@@ -2892,12 +2892,18 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx) ...@@ -2892,12 +2892,18 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
vmx->nested.nested_vmx_vmcs_enum = 0x2e; vmx->nested.nested_vmx_vmcs_enum = 0x2e;
} }
/*
* if fixed0[i] == 1: val[i] must be 1
* if fixed1[i] == 0: val[i] must be 0
*/
static inline bool fixed_bits_valid(u64 val, u64 fixed0, u64 fixed1)
{
return ((val & fixed1) | fixed0) == val;
}
static inline bool vmx_control_verify(u32 control, u32 low, u32 high) static inline bool vmx_control_verify(u32 control, u32 low, u32 high)
{ {
/* return fixed_bits_valid(control, low, high);
* Bits 0 in high must be 0, and bits 1 in low must be 1.
*/
return ((control & high) | low) == control;
} }
static inline u64 vmx_control_msr(u32 low, u32 high) static inline u64 vmx_control_msr(u32 low, u32 high)
...@@ -4132,6 +4138,40 @@ static void ept_save_pdptrs(struct kvm_vcpu *vcpu) ...@@ -4132,6 +4138,40 @@ static void ept_save_pdptrs(struct kvm_vcpu *vcpu)
(unsigned long *)&vcpu->arch.regs_dirty); (unsigned long *)&vcpu->arch.regs_dirty);
} }
static bool nested_guest_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val)
{
u64 fixed0 = to_vmx(vcpu)->nested.nested_vmx_cr0_fixed0;
u64 fixed1 = to_vmx(vcpu)->nested.nested_vmx_cr0_fixed1;
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
if (to_vmx(vcpu)->nested.nested_vmx_secondary_ctls_high &
SECONDARY_EXEC_UNRESTRICTED_GUEST &&
nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST))
fixed0 &= ~(X86_CR0_PE | X86_CR0_PG);
return fixed_bits_valid(val, fixed0, fixed1);
}
static bool nested_host_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val)
{
u64 fixed0 = to_vmx(vcpu)->nested.nested_vmx_cr0_fixed0;
u64 fixed1 = to_vmx(vcpu)->nested.nested_vmx_cr0_fixed1;
return fixed_bits_valid(val, fixed0, fixed1);
}
static bool nested_cr4_valid(struct kvm_vcpu *vcpu, unsigned long val)
{
u64 fixed0 = to_vmx(vcpu)->nested.nested_vmx_cr4_fixed0;
u64 fixed1 = to_vmx(vcpu)->nested.nested_vmx_cr4_fixed1;
return fixed_bits_valid(val, fixed0, fixed1);
}
/* No difference in the restrictions on guest and host CR4 in VMX operation. */
#define nested_guest_cr4_valid nested_cr4_valid
#define nested_host_cr4_valid nested_cr4_valid
static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
static void ept_update_paging_mode_cr0(unsigned long *hw_cr0, static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
...@@ -4260,8 +4300,8 @@ static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) ...@@ -4260,8 +4300,8 @@ static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
if (!nested_vmx_allowed(vcpu)) if (!nested_vmx_allowed(vcpu))
return 1; return 1;
} }
if (to_vmx(vcpu)->nested.vmxon &&
((cr4 & VMXON_CR4_ALWAYSON) != VMXON_CR4_ALWAYSON)) if (to_vmx(vcpu)->nested.vmxon && !nested_cr4_valid(vcpu, cr4))
return 1; return 1;
vcpu->arch.cr4 = cr4; vcpu->arch.cr4 = cr4;
...@@ -5826,18 +5866,6 @@ vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall) ...@@ -5826,18 +5866,6 @@ vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
hypercall[2] = 0xc1; hypercall[2] = 0xc1;
} }
static bool nested_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val)
{
unsigned long always_on = VMXON_CR0_ALWAYSON;
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
if (to_vmx(vcpu)->nested.nested_vmx_secondary_ctls_high &
SECONDARY_EXEC_UNRESTRICTED_GUEST &&
nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST))
always_on &= ~(X86_CR0_PE | X86_CR0_PG);
return (val & always_on) == always_on;
}
/* called to set cr0 as appropriate for a mov-to-cr0 exit. */ /* called to set cr0 as appropriate for a mov-to-cr0 exit. */
static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val) static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val)
{ {
...@@ -5856,7 +5884,7 @@ static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val) ...@@ -5856,7 +5884,7 @@ static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val)
val = (val & ~vmcs12->cr0_guest_host_mask) | val = (val & ~vmcs12->cr0_guest_host_mask) |
(vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask); (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask);
if (!nested_cr0_valid(vcpu, val)) if (!nested_guest_cr0_valid(vcpu, val))
return 1; return 1;
if (kvm_set_cr0(vcpu, val)) if (kvm_set_cr0(vcpu, val))
...@@ -5865,8 +5893,9 @@ static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val) ...@@ -5865,8 +5893,9 @@ static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val)
return 0; return 0;
} else { } else {
if (to_vmx(vcpu)->nested.vmxon && if (to_vmx(vcpu)->nested.vmxon &&
((val & VMXON_CR0_ALWAYSON) != VMXON_CR0_ALWAYSON)) !nested_host_cr0_valid(vcpu, val))
return 1; return 1;
return kvm_set_cr0(vcpu, val); return kvm_set_cr0(vcpu, val);
} }
} }
...@@ -10325,15 +10354,15 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) ...@@ -10325,15 +10354,15 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
goto out; goto out;
} }
if (((vmcs12->host_cr0 & VMXON_CR0_ALWAYSON) != VMXON_CR0_ALWAYSON) || if (!nested_host_cr0_valid(vcpu, vmcs12->host_cr0) ||
((vmcs12->host_cr4 & VMXON_CR4_ALWAYSON) != VMXON_CR4_ALWAYSON)) { !nested_host_cr4_valid(vcpu, vmcs12->host_cr4)) {
nested_vmx_failValid(vcpu, nested_vmx_failValid(vcpu,
VMXERR_ENTRY_INVALID_HOST_STATE_FIELD); VMXERR_ENTRY_INVALID_HOST_STATE_FIELD);
goto out; goto out;
} }
if (!nested_cr0_valid(vcpu, vmcs12->guest_cr0) || if (!nested_guest_cr0_valid(vcpu, vmcs12->guest_cr0) ||
((vmcs12->guest_cr4 & VMXON_CR4_ALWAYSON) != VMXON_CR4_ALWAYSON)) { !nested_guest_cr4_valid(vcpu, vmcs12->guest_cr4)) {
nested_vmx_entry_failure(vcpu, vmcs12, nested_vmx_entry_failure(vcpu, vmcs12,
EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT); EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT);
goto out; goto out;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册