提交 c4abb7c9 编写于 作者: J Jan Kiszka 提交者: Avi Kivity

KVM: x86: Support for user space injected NMIs

Introduces the KVM_NMI IOCTL to the generic x86 part of KVM for
injecting NMIs from user space and also extends the statistic report
accordingly.

Based on the original patch by Sheng Yang.
Signed-off-by: NJan Kiszka <jan.kiszka@siemens.com>
Signed-off-by: NSheng Yang <sheng.yang@intel.com>
Signed-off-by: NAvi Kivity <avi@redhat.com>
上级 26df99c6
...@@ -398,6 +398,7 @@ struct kvm_vcpu_stat { ...@@ -398,6 +398,7 @@ struct kvm_vcpu_stat {
u32 halt_exits; u32 halt_exits;
u32 halt_wakeup; u32 halt_wakeup;
u32 request_irq_exits; u32 request_irq_exits;
u32 request_nmi_exits;
u32 irq_exits; u32 irq_exits;
u32 host_state_reload; u32 host_state_reload;
u32 efer_reload; u32 efer_reload;
...@@ -406,6 +407,7 @@ struct kvm_vcpu_stat { ...@@ -406,6 +407,7 @@ struct kvm_vcpu_stat {
u32 insn_emulation_fail; u32 insn_emulation_fail;
u32 hypercalls; u32 hypercalls;
u32 irq_injections; u32 irq_injections;
u32 nmi_injections;
}; };
struct descriptor_table { struct descriptor_table {
......
...@@ -86,6 +86,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { ...@@ -86,6 +86,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "halt_wakeup", VCPU_STAT(halt_wakeup) }, { "halt_wakeup", VCPU_STAT(halt_wakeup) },
{ "hypercalls", VCPU_STAT(hypercalls) }, { "hypercalls", VCPU_STAT(hypercalls) },
{ "request_irq", VCPU_STAT(request_irq_exits) }, { "request_irq", VCPU_STAT(request_irq_exits) },
{ "request_nmi", VCPU_STAT(request_nmi_exits) },
{ "irq_exits", VCPU_STAT(irq_exits) }, { "irq_exits", VCPU_STAT(irq_exits) },
{ "host_state_reload", VCPU_STAT(host_state_reload) }, { "host_state_reload", VCPU_STAT(host_state_reload) },
{ "efer_reload", VCPU_STAT(efer_reload) }, { "efer_reload", VCPU_STAT(efer_reload) },
...@@ -93,6 +94,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { ...@@ -93,6 +94,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "insn_emulation", VCPU_STAT(insn_emulation) }, { "insn_emulation", VCPU_STAT(insn_emulation) },
{ "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) }, { "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) },
{ "irq_injections", VCPU_STAT(irq_injections) }, { "irq_injections", VCPU_STAT(irq_injections) },
{ "nmi_injections", VCPU_STAT(nmi_injections) },
{ "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) }, { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) },
{ "mmu_pte_write", VM_STAT(mmu_pte_write) }, { "mmu_pte_write", VM_STAT(mmu_pte_write) },
{ "mmu_pte_updated", VM_STAT(mmu_pte_updated) }, { "mmu_pte_updated", VM_STAT(mmu_pte_updated) },
...@@ -1318,6 +1320,15 @@ static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, ...@@ -1318,6 +1320,15 @@ static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
return 0; return 0;
} }
static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu)
{
vcpu_load(vcpu);
kvm_inject_nmi(vcpu);
vcpu_put(vcpu);
return 0;
}
static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu, static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu,
struct kvm_tpr_access_ctl *tac) struct kvm_tpr_access_ctl *tac)
{ {
...@@ -1377,6 +1388,13 @@ long kvm_arch_vcpu_ioctl(struct file *filp, ...@@ -1377,6 +1388,13 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
r = 0; r = 0;
break; break;
} }
case KVM_NMI: {
r = kvm_vcpu_ioctl_nmi(vcpu);
if (r)
goto out;
r = 0;
break;
}
case KVM_SET_CPUID: { case KVM_SET_CPUID: {
struct kvm_cpuid __user *cpuid_arg = argp; struct kvm_cpuid __user *cpuid_arg = argp;
struct kvm_cpuid cpuid; struct kvm_cpuid cpuid;
...@@ -2812,18 +2830,37 @@ static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu, ...@@ -2812,18 +2830,37 @@ static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu,
(kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF)); (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF));
} }
/*
* Check if userspace requested a NMI window, and that the NMI window
* is open.
*
* No need to exit to userspace if we already have a NMI queued.
*/
static int dm_request_for_nmi_injection(struct kvm_vcpu *vcpu,
struct kvm_run *kvm_run)
{
return (!vcpu->arch.nmi_pending &&
kvm_run->request_nmi_window &&
vcpu->arch.nmi_window_open);
}
static void post_kvm_run_save(struct kvm_vcpu *vcpu, static void post_kvm_run_save(struct kvm_vcpu *vcpu,
struct kvm_run *kvm_run) struct kvm_run *kvm_run)
{ {
kvm_run->if_flag = (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF) != 0; kvm_run->if_flag = (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
kvm_run->cr8 = kvm_get_cr8(vcpu); kvm_run->cr8 = kvm_get_cr8(vcpu);
kvm_run->apic_base = kvm_get_apic_base(vcpu); kvm_run->apic_base = kvm_get_apic_base(vcpu);
if (irqchip_in_kernel(vcpu->kvm)) if (irqchip_in_kernel(vcpu->kvm)) {
kvm_run->ready_for_interrupt_injection = 1; kvm_run->ready_for_interrupt_injection = 1;
else kvm_run->ready_for_nmi_injection = 1;
} else {
kvm_run->ready_for_interrupt_injection = kvm_run->ready_for_interrupt_injection =
(vcpu->arch.interrupt_window_open && (vcpu->arch.interrupt_window_open &&
vcpu->arch.irq_summary == 0); vcpu->arch.irq_summary == 0);
kvm_run->ready_for_nmi_injection =
(vcpu->arch.nmi_window_open &&
vcpu->arch.nmi_pending == 0);
}
} }
static void vapic_enter(struct kvm_vcpu *vcpu) static void vapic_enter(struct kvm_vcpu *vcpu)
...@@ -2999,6 +3036,11 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -2999,6 +3036,11 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
} }
if (r > 0) { if (r > 0) {
if (dm_request_for_nmi_injection(vcpu, kvm_run)) {
r = -EINTR;
kvm_run->exit_reason = KVM_EXIT_NMI;
++vcpu->stat.request_nmi_exits;
}
if (dm_request_for_irq_injection(vcpu, kvm_run)) { if (dm_request_for_irq_injection(vcpu, kvm_run)) {
r = -EINTR; r = -EINTR;
kvm_run->exit_reason = KVM_EXIT_INTR; kvm_run->exit_reason = KVM_EXIT_INTR;
......
...@@ -83,18 +83,22 @@ struct kvm_irqchip { ...@@ -83,18 +83,22 @@ struct kvm_irqchip {
#define KVM_EXIT_S390_SIEIC 13 #define KVM_EXIT_S390_SIEIC 13
#define KVM_EXIT_S390_RESET 14 #define KVM_EXIT_S390_RESET 14
#define KVM_EXIT_DCR 15 #define KVM_EXIT_DCR 15
#define KVM_EXIT_NMI 16
#define KVM_EXIT_NMI_WINDOW_OPEN 17
/* for KVM_RUN, returned by mmap(vcpu_fd, offset=0) */ /* for KVM_RUN, returned by mmap(vcpu_fd, offset=0) */
struct kvm_run { struct kvm_run {
/* in */ /* in */
__u8 request_interrupt_window; __u8 request_interrupt_window;
__u8 padding1[7]; __u8 request_nmi_window;
__u8 padding1[6];
/* out */ /* out */
__u32 exit_reason; __u32 exit_reason;
__u8 ready_for_interrupt_injection; __u8 ready_for_interrupt_injection;
__u8 if_flag; __u8 if_flag;
__u8 padding2[2]; __u8 ready_for_nmi_injection;
__u8 padding2;
/* in (pre_kvm_run), out (post_kvm_run) */ /* in (pre_kvm_run), out (post_kvm_run) */
__u64 cr8; __u64 cr8;
...@@ -387,6 +391,7 @@ struct kvm_trace_rec { ...@@ -387,6 +391,7 @@ struct kvm_trace_rec {
#define KVM_CAP_DEVICE_ASSIGNMENT 17 #define KVM_CAP_DEVICE_ASSIGNMENT 17
#endif #endif
#define KVM_CAP_IOMMU 18 #define KVM_CAP_IOMMU 18
#define KVM_CAP_NMI 19
/* /*
* ioctls for VM fds * ioctls for VM fds
...@@ -458,6 +463,8 @@ struct kvm_trace_rec { ...@@ -458,6 +463,8 @@ struct kvm_trace_rec {
#define KVM_S390_INITIAL_RESET _IO(KVMIO, 0x97) #define KVM_S390_INITIAL_RESET _IO(KVMIO, 0x97)
#define KVM_GET_MP_STATE _IOR(KVMIO, 0x98, struct kvm_mp_state) #define KVM_GET_MP_STATE _IOR(KVMIO, 0x98, struct kvm_mp_state)
#define KVM_SET_MP_STATE _IOW(KVMIO, 0x99, struct kvm_mp_state) #define KVM_SET_MP_STATE _IOW(KVMIO, 0x99, struct kvm_mp_state)
/* Available with KVM_CAP_NMI */
#define KVM_NMI _IO(KVMIO, 0x9a)
#define KVM_TRC_INJ_VIRQ (KVM_TRC_HANDLER + 0x02) #define KVM_TRC_INJ_VIRQ (KVM_TRC_HANDLER + 0x02)
#define KVM_TRC_REDELIVER_EVT (KVM_TRC_HANDLER + 0x03) #define KVM_TRC_REDELIVER_EVT (KVM_TRC_HANDLER + 0x03)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册