提交 48005f64 编写于 作者: J Jan Kiszka 提交者: Avi Kivity

KVM: x86: Save&restore interrupt shadow mask

The interrupt shadow created by STI or MOV-SS-like operations is part of
the VCPU state and must be preserved across migration. Transfer it in
the spare padding field of kvm_vcpu_events.interrupt.

As a side effect we now have to make vmx_set_interrupt_shadow robust
against both shadow types being set. Give MOV SS a higher priority and
skip STI in that case to avoid that VMX throws a fault on next entry.
Signed-off-by: NJan Kiszka <jan.kiszka@siemens.com>
Signed-off-by: NAvi Kivity <avi@redhat.com>
上级 03b82a30
...@@ -656,6 +656,7 @@ struct kvm_clock_data { ...@@ -656,6 +656,7 @@ struct kvm_clock_data {
4.29 KVM_GET_VCPU_EVENTS 4.29 KVM_GET_VCPU_EVENTS
Capability: KVM_CAP_VCPU_EVENTS Capability: KVM_CAP_VCPU_EVENTS
Extended by: KVM_CAP_INTR_SHADOW
Architectures: x86 Architectures: x86
Type: vm ioctl Type: vm ioctl
Parameters: struct kvm_vcpu_event (out) Parameters: struct kvm_vcpu_event (out)
...@@ -676,7 +677,7 @@ struct kvm_vcpu_events { ...@@ -676,7 +677,7 @@ struct kvm_vcpu_events {
__u8 injected; __u8 injected;
__u8 nr; __u8 nr;
__u8 soft; __u8 soft;
__u8 pad; __u8 shadow;
} interrupt; } interrupt;
struct { struct {
__u8 injected; __u8 injected;
...@@ -688,9 +689,13 @@ struct kvm_vcpu_events { ...@@ -688,9 +689,13 @@ struct kvm_vcpu_events {
__u32 flags; __u32 flags;
}; };
KVM_VCPUEVENT_VALID_SHADOW may be set in the flags field to signal that
interrupt.shadow contains a valid state. Otherwise, this field is undefined.
4.30 KVM_SET_VCPU_EVENTS 4.30 KVM_SET_VCPU_EVENTS
Capability: KVM_CAP_VCPU_EVENTS Capability: KVM_CAP_VCPU_EVENTS
Extended by: KVM_CAP_INTR_SHADOW
Architectures: x86 Architectures: x86
Type: vm ioctl Type: vm ioctl
Parameters: struct kvm_vcpu_event (in) Parameters: struct kvm_vcpu_event (in)
...@@ -709,6 +714,10 @@ current in-kernel state. The bits are: ...@@ -709,6 +714,10 @@ current in-kernel state. The bits are:
KVM_VCPUEVENT_VALID_NMI_PENDING - transfer nmi.pending to the kernel KVM_VCPUEVENT_VALID_NMI_PENDING - transfer nmi.pending to the kernel
KVM_VCPUEVENT_VALID_SIPI_VECTOR - transfer sipi_vector KVM_VCPUEVENT_VALID_SIPI_VECTOR - transfer sipi_vector
If KVM_CAP_INTR_SHADOW is available, KVM_VCPUEVENT_VALID_SHADOW can be set in
the flags field to signal that interrupt.shadow contains a valid state and
shall be written into the VCPU.
5. The kvm_run structure 5. The kvm_run structure
......
...@@ -257,6 +257,11 @@ struct kvm_reinject_control { ...@@ -257,6 +257,11 @@ struct kvm_reinject_control {
/* When set in flags, include corresponding fields on KVM_SET_VCPU_EVENTS */ /* When set in flags, include corresponding fields on KVM_SET_VCPU_EVENTS */
#define KVM_VCPUEVENT_VALID_NMI_PENDING 0x00000001 #define KVM_VCPUEVENT_VALID_NMI_PENDING 0x00000001
#define KVM_VCPUEVENT_VALID_SIPI_VECTOR 0x00000002 #define KVM_VCPUEVENT_VALID_SIPI_VECTOR 0x00000002
#define KVM_VCPUEVENT_VALID_SHADOW 0x00000004
/* Interrupt shadow states */
#define KVM_X86_SHADOW_INT_MOV_SS 0x01
#define KVM_X86_SHADOW_INT_STI 0x02
/* for KVM_GET/SET_VCPU_EVENTS */ /* for KVM_GET/SET_VCPU_EVENTS */
struct kvm_vcpu_events { struct kvm_vcpu_events {
...@@ -271,7 +276,7 @@ struct kvm_vcpu_events { ...@@ -271,7 +276,7 @@ struct kvm_vcpu_events {
__u8 injected; __u8 injected;
__u8 nr; __u8 nr;
__u8 soft; __u8 soft;
__u8 pad; __u8 shadow;
} interrupt; } interrupt;
struct { struct {
__u8 injected; __u8 injected;
......
...@@ -153,9 +153,6 @@ struct decode_cache { ...@@ -153,9 +153,6 @@ struct decode_cache {
struct fetch_cache fetch; struct fetch_cache fetch;
}; };
#define X86_SHADOW_INT_MOV_SS 1
#define X86_SHADOW_INT_STI 2
struct x86_emulate_ctxt { struct x86_emulate_ctxt {
/* Register state before/after emulation. */ /* Register state before/after emulation. */
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
......
...@@ -2128,7 +2128,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) ...@@ -2128,7 +2128,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
} }
if (c->modrm_reg == VCPU_SREG_SS) if (c->modrm_reg == VCPU_SREG_SS)
toggle_interruptibility(ctxt, X86_SHADOW_INT_MOV_SS); toggle_interruptibility(ctxt, KVM_X86_SHADOW_INT_MOV_SS);
rc = kvm_load_segment_descriptor(ctxt->vcpu, sel, c->modrm_reg); rc = kvm_load_segment_descriptor(ctxt->vcpu, sel, c->modrm_reg);
...@@ -2366,7 +2366,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) ...@@ -2366,7 +2366,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
if (emulator_bad_iopl(ctxt)) if (emulator_bad_iopl(ctxt))
kvm_inject_gp(ctxt->vcpu, 0); kvm_inject_gp(ctxt->vcpu, 0);
else { else {
toggle_interruptibility(ctxt, X86_SHADOW_INT_STI); toggle_interruptibility(ctxt, KVM_X86_SHADOW_INT_STI);
ctxt->eflags |= X86_EFLAGS_IF; ctxt->eflags |= X86_EFLAGS_IF;
c->dst.type = OP_NONE; /* Disable writeback. */ c->dst.type = OP_NONE; /* Disable writeback. */
} }
......
...@@ -265,7 +265,7 @@ static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu, int mask) ...@@ -265,7 +265,7 @@ static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
u32 ret = 0; u32 ret = 0;
if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK)
ret |= X86_SHADOW_INT_STI | X86_SHADOW_INT_MOV_SS; ret |= KVM_X86_SHADOW_INT_STI | KVM_X86_SHADOW_INT_MOV_SS;
return ret & mask; return ret & mask;
} }
......
...@@ -846,9 +846,9 @@ static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu, int mask) ...@@ -846,9 +846,9 @@ static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
int ret = 0; int ret = 0;
if (interruptibility & GUEST_INTR_STATE_STI) if (interruptibility & GUEST_INTR_STATE_STI)
ret |= X86_SHADOW_INT_STI; ret |= KVM_X86_SHADOW_INT_STI;
if (interruptibility & GUEST_INTR_STATE_MOV_SS) if (interruptibility & GUEST_INTR_STATE_MOV_SS)
ret |= X86_SHADOW_INT_MOV_SS; ret |= KVM_X86_SHADOW_INT_MOV_SS;
return ret & mask; return ret & mask;
} }
...@@ -860,9 +860,9 @@ static void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask) ...@@ -860,9 +860,9 @@ static void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
interruptibility &= ~(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS); interruptibility &= ~(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS);
if (mask & X86_SHADOW_INT_MOV_SS) if (mask & KVM_X86_SHADOW_INT_MOV_SS)
interruptibility |= GUEST_INTR_STATE_MOV_SS; interruptibility |= GUEST_INTR_STATE_MOV_SS;
if (mask & X86_SHADOW_INT_STI) else if (mask & KVM_X86_SHADOW_INT_STI)
interruptibility |= GUEST_INTR_STATE_STI; interruptibility |= GUEST_INTR_STATE_STI;
if ((interruptibility != interruptibility_old)) if ((interruptibility != interruptibility_old))
......
...@@ -2111,6 +2111,9 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, ...@@ -2111,6 +2111,9 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft; vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft;
events->interrupt.nr = vcpu->arch.interrupt.nr; events->interrupt.nr = vcpu->arch.interrupt.nr;
events->interrupt.soft = 0; events->interrupt.soft = 0;
events->interrupt.shadow =
kvm_x86_ops->get_interrupt_shadow(vcpu,
KVM_X86_SHADOW_INT_MOV_SS | KVM_X86_SHADOW_INT_STI);
events->nmi.injected = vcpu->arch.nmi_injected; events->nmi.injected = vcpu->arch.nmi_injected;
events->nmi.pending = vcpu->arch.nmi_pending; events->nmi.pending = vcpu->arch.nmi_pending;
...@@ -2119,7 +2122,8 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, ...@@ -2119,7 +2122,8 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
events->sipi_vector = vcpu->arch.sipi_vector; events->sipi_vector = vcpu->arch.sipi_vector;
events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
| KVM_VCPUEVENT_VALID_SIPI_VECTOR); | KVM_VCPUEVENT_VALID_SIPI_VECTOR
| KVM_VCPUEVENT_VALID_SHADOW);
vcpu_put(vcpu); vcpu_put(vcpu);
} }
...@@ -2128,7 +2132,8 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, ...@@ -2128,7 +2132,8 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
struct kvm_vcpu_events *events) struct kvm_vcpu_events *events)
{ {
if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING
| KVM_VCPUEVENT_VALID_SIPI_VECTOR)) | KVM_VCPUEVENT_VALID_SIPI_VECTOR
| KVM_VCPUEVENT_VALID_SHADOW))
return -EINVAL; return -EINVAL;
vcpu_load(vcpu); vcpu_load(vcpu);
...@@ -2143,6 +2148,9 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, ...@@ -2143,6 +2148,9 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
vcpu->arch.interrupt.soft = events->interrupt.soft; vcpu->arch.interrupt.soft = events->interrupt.soft;
if (vcpu->arch.interrupt.pending && irqchip_in_kernel(vcpu->kvm)) if (vcpu->arch.interrupt.pending && irqchip_in_kernel(vcpu->kvm))
kvm_pic_clear_isr_ack(vcpu->kvm); kvm_pic_clear_isr_ack(vcpu->kvm);
if (events->flags & KVM_VCPUEVENT_VALID_SHADOW)
kvm_x86_ops->set_interrupt_shadow(vcpu,
events->interrupt.shadow);
vcpu->arch.nmi_injected = events->nmi.injected; vcpu->arch.nmi_injected = events->nmi.injected;
if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING) if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING)
......
...@@ -502,6 +502,7 @@ struct kvm_ioeventfd { ...@@ -502,6 +502,7 @@ struct kvm_ioeventfd {
#define KVM_CAP_HYPERV_SPIN 46 #define KVM_CAP_HYPERV_SPIN 46
#define KVM_CAP_PCI_SEGMENT 47 #define KVM_CAP_PCI_SEGMENT 47
#define KVM_CAP_PPC_PAIRED_SINGLES 48 #define KVM_CAP_PPC_PAIRED_SINGLES 48
#define KVM_CAP_INTR_SHADOW 49
#define KVM_CAP_X86_ROBUST_SINGLESTEP 51 #define KVM_CAP_X86_ROBUST_SINGLESTEP 51
#ifdef KVM_CAP_IRQ_ROUTING #ifdef KVM_CAP_IRQ_ROUTING
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册