提交 c45dcc71 编写于 作者: A Ashok Raj 提交者: Paolo Bonzini

KVM: VMX: enable guest access to LMCE related MSRs

On Intel platforms, this patch adds LMCE to KVM MCE supported
capabilities and handles guest access to LMCE related MSRs.
Signed-off-by: NAshok Raj <ashok.raj@intel.com>
[Haozhong: macro KVM_MCE_CAP_SUPPORTED => variable kvm_mce_cap_supported
           Only enable LMCE on Intel platform
           Check MSR_IA32_FEATURE_CONTROL when handling guest
             access to MSR_IA32_MCG_EXT_CTL]
Signed-off-by: NHaozhong Zhang <haozhong.zhang@intel.com>
Signed-off-by: NPaolo Bonzini <pbonzini@redhat.com>
上级 37e4c997
...@@ -598,6 +598,7 @@ struct kvm_vcpu_arch { ...@@ -598,6 +598,7 @@ struct kvm_vcpu_arch {
u64 mcg_cap; u64 mcg_cap;
u64 mcg_status; u64 mcg_status;
u64 mcg_ctl; u64 mcg_ctl;
u64 mcg_ext_ctl;
u64 *mce_banks; u64 *mce_banks;
/* Cache MMIO info */ /* Cache MMIO info */
...@@ -1008,6 +1009,8 @@ struct kvm_x86_ops { ...@@ -1008,6 +1009,8 @@ struct kvm_x86_ops {
int (*set_hv_timer)(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc); int (*set_hv_timer)(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc);
void (*cancel_hv_timer)(struct kvm_vcpu *vcpu); void (*cancel_hv_timer)(struct kvm_vcpu *vcpu);
void (*setup_mce)(struct kvm_vcpu *vcpu);
}; };
struct kvm_arch_async_pf { struct kvm_arch_async_pf {
...@@ -1082,6 +1085,8 @@ extern u64 kvm_max_tsc_scaling_ratio; ...@@ -1082,6 +1085,8 @@ extern u64 kvm_max_tsc_scaling_ratio;
/* 1ull << kvm_tsc_scaling_ratio_frac_bits */ /* 1ull << kvm_tsc_scaling_ratio_frac_bits */
extern u64 kvm_default_tsc_scaling_ratio; extern u64 kvm_default_tsc_scaling_ratio;
extern u64 kvm_mce_cap_supported;
enum emulation_result { enum emulation_result {
EMULATE_DONE, /* no further processing */ EMULATE_DONE, /* no further processing */
EMULATE_USER_EXIT, /* kvm_run ready for userspace exit */ EMULATE_USER_EXIT, /* kvm_run ready for userspace exit */
......
...@@ -2984,6 +2984,13 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -2984,6 +2984,13 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
return 1; return 1;
msr_info->data = vmcs_read64(GUEST_BNDCFGS); msr_info->data = vmcs_read64(GUEST_BNDCFGS);
break; break;
case MSR_IA32_MCG_EXT_CTL:
if (!msr_info->host_initiated &&
!(to_vmx(vcpu)->msr_ia32_feature_control &
FEATURE_CONTROL_LMCE))
return 1;
msr_info->data = vcpu->arch.mcg_ext_ctl;
break;
case MSR_IA32_FEATURE_CONTROL: case MSR_IA32_FEATURE_CONTROL:
msr_info->data = to_vmx(vcpu)->msr_ia32_feature_control; msr_info->data = to_vmx(vcpu)->msr_ia32_feature_control;
break; break;
...@@ -3075,6 +3082,14 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -3075,6 +3082,14 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_IA32_TSC_ADJUST: case MSR_IA32_TSC_ADJUST:
ret = kvm_set_msr_common(vcpu, msr_info); ret = kvm_set_msr_common(vcpu, msr_info);
break; break;
case MSR_IA32_MCG_EXT_CTL:
if ((!msr_info->host_initiated &&
!(to_vmx(vcpu)->msr_ia32_feature_control &
FEATURE_CONTROL_LMCE)) ||
(data & ~MCG_EXT_CTL_LMCE_EN))
return 1;
vcpu->arch.mcg_ext_ctl = data;
break;
case MSR_IA32_FEATURE_CONTROL: case MSR_IA32_FEATURE_CONTROL:
if (!vmx_feature_control_msr_valid(vcpu, data) || if (!vmx_feature_control_msr_valid(vcpu, data) ||
(to_vmx(vcpu)->msr_ia32_feature_control & (to_vmx(vcpu)->msr_ia32_feature_control &
...@@ -6484,6 +6499,8 @@ static __init int hardware_setup(void) ...@@ -6484,6 +6499,8 @@ static __init int hardware_setup(void)
kvm_set_posted_intr_wakeup_handler(wakeup_handler); kvm_set_posted_intr_wakeup_handler(wakeup_handler);
kvm_mce_cap_supported |= MCG_LMCE_P;
return alloc_kvm_area(); return alloc_kvm_area();
out8: out8:
...@@ -11109,6 +11126,16 @@ static int vmx_update_pi_irte(struct kvm *kvm, unsigned int host_irq, ...@@ -11109,6 +11126,16 @@ static int vmx_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
return ret; return ret;
} }
static void vmx_setup_mce(struct kvm_vcpu *vcpu)
{
if (vcpu->arch.mcg_cap & MCG_LMCE_P)
to_vmx(vcpu)->msr_ia32_feature_control_valid_bits |=
FEATURE_CONTROL_LMCE;
else
to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &=
~FEATURE_CONTROL_LMCE;
}
static struct kvm_x86_ops vmx_x86_ops = { static struct kvm_x86_ops vmx_x86_ops = {
.cpu_has_kvm_support = cpu_has_kvm_support, .cpu_has_kvm_support = cpu_has_kvm_support,
.disabled_by_bios = vmx_disabled_by_bios, .disabled_by_bios = vmx_disabled_by_bios,
...@@ -11238,6 +11265,8 @@ static struct kvm_x86_ops vmx_x86_ops = { ...@@ -11238,6 +11265,8 @@ static struct kvm_x86_ops vmx_x86_ops = {
.set_hv_timer = vmx_set_hv_timer, .set_hv_timer = vmx_set_hv_timer,
.cancel_hv_timer = vmx_cancel_hv_timer, .cancel_hv_timer = vmx_cancel_hv_timer,
#endif #endif
.setup_mce = vmx_setup_mce,
}; };
static int __init vmx_init(void) static int __init vmx_init(void)
......
...@@ -70,7 +70,8 @@ ...@@ -70,7 +70,8 @@
#define MAX_IO_MSRS 256 #define MAX_IO_MSRS 256
#define KVM_MAX_MCE_BANKS 32 #define KVM_MAX_MCE_BANKS 32
#define KVM_MCE_CAP_SUPPORTED (MCG_CTL_P | MCG_SER_P) u64 __read_mostly kvm_mce_cap_supported = MCG_CTL_P | MCG_SER_P;
EXPORT_SYMBOL_GPL(kvm_mce_cap_supported);
#define emul_to_vcpu(ctxt) \ #define emul_to_vcpu(ctxt) \
container_of(ctxt, struct kvm_vcpu, arch.emulate_ctxt) container_of(ctxt, struct kvm_vcpu, arch.emulate_ctxt)
...@@ -984,6 +985,7 @@ static u32 emulated_msrs[] = { ...@@ -984,6 +985,7 @@ static u32 emulated_msrs[] = {
MSR_IA32_MISC_ENABLE, MSR_IA32_MISC_ENABLE,
MSR_IA32_MCG_STATUS, MSR_IA32_MCG_STATUS,
MSR_IA32_MCG_CTL, MSR_IA32_MCG_CTL,
MSR_IA32_MCG_EXT_CTL,
MSR_IA32_SMBASE, MSR_IA32_SMBASE,
}; };
...@@ -2685,11 +2687,9 @@ long kvm_arch_dev_ioctl(struct file *filp, ...@@ -2685,11 +2687,9 @@ long kvm_arch_dev_ioctl(struct file *filp,
break; break;
} }
case KVM_X86_GET_MCE_CAP_SUPPORTED: { case KVM_X86_GET_MCE_CAP_SUPPORTED: {
u64 mce_cap;
mce_cap = KVM_MCE_CAP_SUPPORTED;
r = -EFAULT; r = -EFAULT;
if (copy_to_user(argp, &mce_cap, sizeof mce_cap)) if (copy_to_user(argp, &kvm_mce_cap_supported,
sizeof(kvm_mce_cap_supported)))
goto out; goto out;
r = 0; r = 0;
break; break;
...@@ -2872,7 +2872,7 @@ static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu, ...@@ -2872,7 +2872,7 @@ static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu,
r = -EINVAL; r = -EINVAL;
if (!bank_num || bank_num >= KVM_MAX_MCE_BANKS) if (!bank_num || bank_num >= KVM_MAX_MCE_BANKS)
goto out; goto out;
if (mcg_cap & ~(KVM_MCE_CAP_SUPPORTED | 0xff | 0xff0000)) if (mcg_cap & ~(kvm_mce_cap_supported | 0xff | 0xff0000))
goto out; goto out;
r = 0; r = 0;
vcpu->arch.mcg_cap = mcg_cap; vcpu->arch.mcg_cap = mcg_cap;
...@@ -2882,6 +2882,9 @@ static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu, ...@@ -2882,6 +2882,9 @@ static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu,
/* Init IA32_MCi_CTL to all 1s */ /* Init IA32_MCi_CTL to all 1s */
for (bank = 0; bank < bank_num; bank++) for (bank = 0; bank < bank_num; bank++)
vcpu->arch.mce_banks[bank*4] = ~(u64)0; vcpu->arch.mce_banks[bank*4] = ~(u64)0;
if (kvm_x86_ops->setup_mce)
kvm_x86_ops->setup_mce(vcpu);
out: out:
return r; return r;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册