提交 e7d9513b 编写于 作者: A Andrey Smetanin 提交者: Paolo Bonzini

kvm/x86: added hyper-v crash msrs into kvm hyperv context

Added kvm Hyper-V context hv crash variables as storage
of Hyper-V crash msrs.
Signed-off-by: NAndrey Smetanin <asmetanin@virtuozzo.com>
Signed-off-by: NDenis V. Lunev <den@openvz.org>
Reviewed-by: NPeter Hornyack <peterhornyack@google.com>
CC: Paolo Bonzini <pbonzini@redhat.com>
CC: Gleb Natapov <gleb@kernel.org>
Signed-off-by: NPaolo Bonzini <pbonzini@redhat.com>
上级 ee86dbc6
...@@ -595,6 +595,10 @@ struct kvm_hv { ...@@ -595,6 +595,10 @@ struct kvm_hv {
u64 hv_guest_os_id; u64 hv_guest_os_id;
u64 hv_hypercall; u64 hv_hypercall;
u64 hv_tsc_page; u64 hv_tsc_page;
/* Hyper-v based guest crash (NT kernel bugcheck) parameters */
u64 hv_crash_param[HV_X64_MSR_CRASH_PARAMS];
u64 hv_crash_ctl;
}; };
struct kvm_arch { struct kvm_arch {
......
...@@ -39,6 +39,8 @@ static bool kvm_hv_msr_partition_wide(u32 msr) ...@@ -39,6 +39,8 @@ static bool kvm_hv_msr_partition_wide(u32 msr)
case HV_X64_MSR_HYPERCALL: case HV_X64_MSR_HYPERCALL:
case HV_X64_MSR_REFERENCE_TSC: case HV_X64_MSR_REFERENCE_TSC:
case HV_X64_MSR_TIME_REF_COUNT: case HV_X64_MSR_TIME_REF_COUNT:
case HV_X64_MSR_CRASH_CTL:
case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
r = true; r = true;
break; break;
} }
...@@ -46,7 +48,63 @@ static bool kvm_hv_msr_partition_wide(u32 msr) ...@@ -46,7 +48,63 @@ static bool kvm_hv_msr_partition_wide(u32 msr)
return r; return r;
} }
static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data) static int kvm_hv_msr_get_crash_data(struct kvm_vcpu *vcpu,
u32 index, u64 *pdata)
{
struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
if (WARN_ON_ONCE(index >= ARRAY_SIZE(hv->hv_crash_param)))
return -EINVAL;
*pdata = hv->hv_crash_param[index];
return 0;
}
static int kvm_hv_msr_get_crash_ctl(struct kvm_vcpu *vcpu, u64 *pdata)
{
struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
*pdata = hv->hv_crash_ctl;
return 0;
}
static int kvm_hv_msr_set_crash_ctl(struct kvm_vcpu *vcpu, u64 data, bool host)
{
struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
if (host)
hv->hv_crash_ctl = data & HV_X64_MSR_CRASH_CTL_NOTIFY;
if (!host && (data & HV_X64_MSR_CRASH_CTL_NOTIFY)) {
vcpu_debug(vcpu, "hv crash (0x%llx 0x%llx 0x%llx 0x%llx 0x%llx)\n",
hv->hv_crash_param[0],
hv->hv_crash_param[1],
hv->hv_crash_param[2],
hv->hv_crash_param[3],
hv->hv_crash_param[4]);
/* Send notification about crash to user space */
kvm_make_request(KVM_REQ_HV_CRASH, vcpu);
}
return 0;
}
static int kvm_hv_msr_set_crash_data(struct kvm_vcpu *vcpu,
u32 index, u64 data)
{
struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
if (WARN_ON_ONCE(index >= ARRAY_SIZE(hv->hv_crash_param)))
return -EINVAL;
hv->hv_crash_param[index] = data;
return 0;
}
static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
bool host)
{ {
struct kvm *kvm = vcpu->kvm; struct kvm *kvm = vcpu->kvm;
struct kvm_hv *hv = &kvm->arch.hyperv; struct kvm_hv *hv = &kvm->arch.hyperv;
...@@ -99,6 +157,12 @@ static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data) ...@@ -99,6 +157,12 @@ static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data)
mark_page_dirty(kvm, gfn); mark_page_dirty(kvm, gfn);
break; break;
} }
case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
return kvm_hv_msr_set_crash_data(vcpu,
msr - HV_X64_MSR_CRASH_P0,
data);
case HV_X64_MSR_CRASH_CTL:
return kvm_hv_msr_set_crash_ctl(vcpu, data, host);
default: default:
vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n", vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n",
msr, data); msr, data);
...@@ -171,6 +235,12 @@ static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) ...@@ -171,6 +235,12 @@ static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
case HV_X64_MSR_REFERENCE_TSC: case HV_X64_MSR_REFERENCE_TSC:
data = hv->hv_tsc_page; data = hv->hv_tsc_page;
break; break;
case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
return kvm_hv_msr_get_crash_data(vcpu,
msr - HV_X64_MSR_CRASH_P0,
pdata);
case HV_X64_MSR_CRASH_CTL:
return kvm_hv_msr_get_crash_ctl(vcpu, pdata);
default: default:
vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr); vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
return 1; return 1;
...@@ -215,13 +285,13 @@ static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) ...@@ -215,13 +285,13 @@ static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
return 0; return 0;
} }
int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
{ {
if (kvm_hv_msr_partition_wide(msr)) { if (kvm_hv_msr_partition_wide(msr)) {
int r; int r;
mutex_lock(&vcpu->kvm->lock); mutex_lock(&vcpu->kvm->lock);
r = kvm_hv_set_msr_pw(vcpu, msr, data); r = kvm_hv_set_msr_pw(vcpu, msr, data, host);
mutex_unlock(&vcpu->kvm->lock); mutex_unlock(&vcpu->kvm->lock);
return r; return r;
} else } else
......
...@@ -24,7 +24,7 @@ ...@@ -24,7 +24,7 @@
#ifndef __ARCH_X86_KVM_HYPERV_H__ #ifndef __ARCH_X86_KVM_HYPERV_H__
#define __ARCH_X86_KVM_HYPERV_H__ #define __ARCH_X86_KVM_HYPERV_H__
int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data); int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host);
int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata); int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
bool kvm_hv_hypercall_enabled(struct kvm *kvm); bool kvm_hv_hypercall_enabled(struct kvm *kvm);
int kvm_hv_hypercall(struct kvm_vcpu *vcpu); int kvm_hv_hypercall(struct kvm_vcpu *vcpu);
......
...@@ -950,6 +950,8 @@ static u32 emulated_msrs[] = { ...@@ -950,6 +950,8 @@ static u32 emulated_msrs[] = {
MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW, MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,
HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL, HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
HV_X64_MSR_TIME_REF_COUNT, HV_X64_MSR_REFERENCE_TSC, HV_X64_MSR_TIME_REF_COUNT, HV_X64_MSR_REFERENCE_TSC,
HV_X64_MSR_CRASH_P0, HV_X64_MSR_CRASH_P1, HV_X64_MSR_CRASH_P2,
HV_X64_MSR_CRASH_P3, HV_X64_MSR_CRASH_P4, HV_X64_MSR_CRASH_CTL,
HV_X64_MSR_APIC_ASSIST_PAGE, MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME, HV_X64_MSR_APIC_ASSIST_PAGE, MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME,
MSR_KVM_PV_EOI_EN, MSR_KVM_PV_EOI_EN,
...@@ -2103,7 +2105,10 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -2103,7 +2105,10 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
*/ */
break; break;
case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15: case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
return kvm_hv_set_msr_common(vcpu, msr, data); case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
case HV_X64_MSR_CRASH_CTL:
return kvm_hv_set_msr_common(vcpu, msr, data,
msr_info->host_initiated);
case MSR_IA32_BBL_CR_CTL3: case MSR_IA32_BBL_CR_CTL3:
/* Drop writes to this legacy MSR -- see rdmsr /* Drop writes to this legacy MSR -- see rdmsr
* counterpart for further detail. * counterpart for further detail.
...@@ -2302,6 +2307,8 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -2302,6 +2307,8 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
msr_info->data = 0x20000000; msr_info->data = 0x20000000;
break; break;
case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15: case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
case HV_X64_MSR_CRASH_CTL:
return kvm_hv_get_msr_common(vcpu, return kvm_hv_get_msr_common(vcpu,
msr_info->index, &msr_info->data); msr_info->index, &msr_info->data);
break; break;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册