提交 1b3618b6 编写于 作者: L Linus Torvalds

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull kvm fixes from Paolo Bonzini:
 "Except for the preempt notifiers fix, these are all small bugfixes
  that could have been waited for -rc2.  Sending them now since I was
  taking care of Peter's patch anyway"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
  kvm: add hyper-v crash msrs values
  KVM: x86: remove data variable from kvm_get_msr_common
  KVM: s390: virtio-ccw: don't overwrite config space values
  KVM: x86: keep track of LVT0 changes under APICv
  KVM: x86: properly restore LVT0
  KVM: x86: make vapics_in_nmi_mode atomic
  sched, preempt_notifier: separate notifier registration from static_key inc/dec
...@@ -607,7 +607,7 @@ struct kvm_arch { ...@@ -607,7 +607,7 @@ struct kvm_arch {
struct kvm_pic *vpic; struct kvm_pic *vpic;
struct kvm_ioapic *vioapic; struct kvm_ioapic *vioapic;
struct kvm_pit *vpit; struct kvm_pit *vpit;
int vapics_in_nmi_mode; atomic_t vapics_in_nmi_mode;
struct mutex apic_map_lock; struct mutex apic_map_lock;
struct kvm_apic_map *apic_map; struct kvm_apic_map *apic_map;
......
...@@ -199,6 +199,17 @@ ...@@ -199,6 +199,17 @@
#define HV_X64_MSR_STIMER3_CONFIG 0x400000B6 #define HV_X64_MSR_STIMER3_CONFIG 0x400000B6
#define HV_X64_MSR_STIMER3_COUNT 0x400000B7 #define HV_X64_MSR_STIMER3_COUNT 0x400000B7
/* Hyper-V guest crash notification MSR's */
#define HV_X64_MSR_CRASH_P0 0x40000100
#define HV_X64_MSR_CRASH_P1 0x40000101
#define HV_X64_MSR_CRASH_P2 0x40000102
#define HV_X64_MSR_CRASH_P3 0x40000103
#define HV_X64_MSR_CRASH_P4 0x40000104
#define HV_X64_MSR_CRASH_CTL 0x40000105
#define HV_X64_MSR_CRASH_CTL_NOTIFY (1ULL << 63)
#define HV_X64_MSR_CRASH_PARAMS \
(1 + (HV_X64_MSR_CRASH_P4 - HV_X64_MSR_CRASH_P0))
#define HV_X64_MSR_HYPERCALL_ENABLE 0x00000001 #define HV_X64_MSR_HYPERCALL_ENABLE 0x00000001
#define HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT 12 #define HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT 12
#define HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_MASK \ #define HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_MASK \
......
...@@ -305,7 +305,7 @@ static void pit_do_work(struct kthread_work *work) ...@@ -305,7 +305,7 @@ static void pit_do_work(struct kthread_work *work)
* LVT0 to NMI delivery. Other PIC interrupts are just sent to * LVT0 to NMI delivery. Other PIC interrupts are just sent to
* VCPU0, and only if its LVT0 is in EXTINT mode. * VCPU0, and only if its LVT0 is in EXTINT mode.
*/ */
if (kvm->arch.vapics_in_nmi_mode > 0) if (atomic_read(&kvm->arch.vapics_in_nmi_mode) > 0)
kvm_for_each_vcpu(i, vcpu, kvm) kvm_for_each_vcpu(i, vcpu, kvm)
kvm_apic_nmi_wd_deliver(vcpu); kvm_apic_nmi_wd_deliver(vcpu);
} }
......
...@@ -1257,16 +1257,17 @@ static void start_apic_timer(struct kvm_lapic *apic) ...@@ -1257,16 +1257,17 @@ static void start_apic_timer(struct kvm_lapic *apic)
static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val) static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val)
{ {
int nmi_wd_enabled = apic_lvt_nmi_mode(kvm_apic_get_reg(apic, APIC_LVT0)); bool lvt0_in_nmi_mode = apic_lvt_nmi_mode(lvt0_val);
if (apic_lvt_nmi_mode(lvt0_val)) { if (apic->lvt0_in_nmi_mode != lvt0_in_nmi_mode) {
if (!nmi_wd_enabled) { apic->lvt0_in_nmi_mode = lvt0_in_nmi_mode;
if (lvt0_in_nmi_mode) {
apic_debug("Receive NMI setting on APIC_LVT0 " apic_debug("Receive NMI setting on APIC_LVT0 "
"for cpu %d\n", apic->vcpu->vcpu_id); "for cpu %d\n", apic->vcpu->vcpu_id);
apic->vcpu->kvm->arch.vapics_in_nmi_mode++; atomic_inc(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
} } else
} else if (nmi_wd_enabled) atomic_dec(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
apic->vcpu->kvm->arch.vapics_in_nmi_mode--; }
} }
static int apic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val) static int apic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
...@@ -1597,6 +1598,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event) ...@@ -1597,6 +1598,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
if (!(vcpu->kvm->arch.disabled_quirks & KVM_QUIRK_LINT0_REENABLED)) if (!(vcpu->kvm->arch.disabled_quirks & KVM_QUIRK_LINT0_REENABLED))
apic_set_reg(apic, APIC_LVT0, apic_set_reg(apic, APIC_LVT0,
SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT)); SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT));
apic_manage_nmi_watchdog(apic, kvm_apic_get_reg(apic, APIC_LVT0));
apic_set_reg(apic, APIC_DFR, 0xffffffffU); apic_set_reg(apic, APIC_DFR, 0xffffffffU);
apic_set_spiv(apic, 0xff); apic_set_spiv(apic, 0xff);
...@@ -1822,6 +1824,7 @@ void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu, ...@@ -1822,6 +1824,7 @@ void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu,
apic_update_ppr(apic); apic_update_ppr(apic);
hrtimer_cancel(&apic->lapic_timer.timer); hrtimer_cancel(&apic->lapic_timer.timer);
apic_update_lvtt(apic); apic_update_lvtt(apic);
apic_manage_nmi_watchdog(apic, kvm_apic_get_reg(apic, APIC_LVT0));
update_divide_count(apic); update_divide_count(apic);
start_apic_timer(apic); start_apic_timer(apic);
apic->irr_pending = true; apic->irr_pending = true;
......
...@@ -26,6 +26,7 @@ struct kvm_lapic { ...@@ -26,6 +26,7 @@ struct kvm_lapic {
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
bool sw_enabled; bool sw_enabled;
bool irr_pending; bool irr_pending;
bool lvt0_in_nmi_mode;
/* Number of bits set in ISR. */ /* Number of bits set in ISR. */
s16 isr_count; s16 isr_count;
/* The highest vector set in ISR; if -1 - invalid, must scan ISR. */ /* The highest vector set in ISR; if -1 - invalid, must scan ISR. */
......
...@@ -2379,8 +2379,6 @@ static int get_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) ...@@ -2379,8 +2379,6 @@ static int get_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{ {
u64 data;
switch (msr_info->index) { switch (msr_info->index) {
case MSR_IA32_PLATFORM_ID: case MSR_IA32_PLATFORM_ID:
case MSR_IA32_EBL_CR_POWERON: case MSR_IA32_EBL_CR_POWERON:
...@@ -2453,7 +2451,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -2453,7 +2451,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
/* TSC increment by tick */ /* TSC increment by tick */
msr_info->data = 1000ULL; msr_info->data = 1000ULL;
/* CPU multiplier */ /* CPU multiplier */
data |= (((uint64_t)4ULL) << 40); msr_info->data |= (((uint64_t)4ULL) << 40);
break; break;
case MSR_EFER: case MSR_EFER:
msr_info->data = vcpu->arch.efer; msr_info->data = vcpu->arch.efer;
......
...@@ -65,6 +65,7 @@ struct virtio_ccw_device { ...@@ -65,6 +65,7 @@ struct virtio_ccw_device {
bool is_thinint; bool is_thinint;
bool going_away; bool going_away;
bool device_lost; bool device_lost;
unsigned int config_ready;
void *airq_info; void *airq_info;
}; };
...@@ -833,8 +834,11 @@ static void virtio_ccw_get_config(struct virtio_device *vdev, ...@@ -833,8 +834,11 @@ static void virtio_ccw_get_config(struct virtio_device *vdev,
if (ret) if (ret)
goto out_free; goto out_free;
memcpy(vcdev->config, config_area, sizeof(vcdev->config)); memcpy(vcdev->config, config_area, offset + len);
memcpy(buf, &vcdev->config[offset], len); if (buf)
memcpy(buf, &vcdev->config[offset], len);
if (vcdev->config_ready < offset + len)
vcdev->config_ready = offset + len;
out_free: out_free:
kfree(config_area); kfree(config_area);
...@@ -857,6 +861,9 @@ static void virtio_ccw_set_config(struct virtio_device *vdev, ...@@ -857,6 +861,9 @@ static void virtio_ccw_set_config(struct virtio_device *vdev,
if (!config_area) if (!config_area)
goto out_free; goto out_free;
/* Make sure we don't overwrite fields. */
if (vcdev->config_ready < offset)
virtio_ccw_get_config(vdev, 0, NULL, offset);
memcpy(&vcdev->config[offset], buf, len); memcpy(&vcdev->config[offset], buf, len);
/* Write the config area to the host. */ /* Write the config area to the host. */
memcpy(config_area, vcdev->config, sizeof(vcdev->config)); memcpy(config_area, vcdev->config, sizeof(vcdev->config));
......
...@@ -293,6 +293,8 @@ struct preempt_notifier { ...@@ -293,6 +293,8 @@ struct preempt_notifier {
struct preempt_ops *ops; struct preempt_ops *ops;
}; };
void preempt_notifier_inc(void);
void preempt_notifier_dec(void);
void preempt_notifier_register(struct preempt_notifier *notifier); void preempt_notifier_register(struct preempt_notifier *notifier);
void preempt_notifier_unregister(struct preempt_notifier *notifier); void preempt_notifier_unregister(struct preempt_notifier *notifier);
......
...@@ -2320,13 +2320,27 @@ void wake_up_new_task(struct task_struct *p) ...@@ -2320,13 +2320,27 @@ void wake_up_new_task(struct task_struct *p)
static struct static_key preempt_notifier_key = STATIC_KEY_INIT_FALSE; static struct static_key preempt_notifier_key = STATIC_KEY_INIT_FALSE;
void preempt_notifier_inc(void)
{
static_key_slow_inc(&preempt_notifier_key);
}
EXPORT_SYMBOL_GPL(preempt_notifier_inc);
void preempt_notifier_dec(void)
{
static_key_slow_dec(&preempt_notifier_key);
}
EXPORT_SYMBOL_GPL(preempt_notifier_dec);
/** /**
* preempt_notifier_register - tell me when current is being preempted & rescheduled * preempt_notifier_register - tell me when current is being preempted & rescheduled
* @notifier: notifier struct to register * @notifier: notifier struct to register
*/ */
void preempt_notifier_register(struct preempt_notifier *notifier) void preempt_notifier_register(struct preempt_notifier *notifier)
{ {
static_key_slow_inc(&preempt_notifier_key); if (!static_key_false(&preempt_notifier_key))
WARN(1, "registering preempt_notifier while notifiers disabled\n");
hlist_add_head(&notifier->link, &current->preempt_notifiers); hlist_add_head(&notifier->link, &current->preempt_notifiers);
} }
EXPORT_SYMBOL_GPL(preempt_notifier_register); EXPORT_SYMBOL_GPL(preempt_notifier_register);
...@@ -2340,7 +2354,6 @@ EXPORT_SYMBOL_GPL(preempt_notifier_register); ...@@ -2340,7 +2354,6 @@ EXPORT_SYMBOL_GPL(preempt_notifier_register);
void preempt_notifier_unregister(struct preempt_notifier *notifier) void preempt_notifier_unregister(struct preempt_notifier *notifier)
{ {
hlist_del(&notifier->link); hlist_del(&notifier->link);
static_key_slow_dec(&preempt_notifier_key);
} }
EXPORT_SYMBOL_GPL(preempt_notifier_unregister); EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
......
...@@ -553,6 +553,8 @@ static struct kvm *kvm_create_vm(unsigned long type) ...@@ -553,6 +553,8 @@ static struct kvm *kvm_create_vm(unsigned long type)
list_add(&kvm->vm_list, &vm_list); list_add(&kvm->vm_list, &vm_list);
spin_unlock(&kvm_lock); spin_unlock(&kvm_lock);
preempt_notifier_inc();
return kvm; return kvm;
out_err: out_err:
...@@ -620,6 +622,7 @@ static void kvm_destroy_vm(struct kvm *kvm) ...@@ -620,6 +622,7 @@ static void kvm_destroy_vm(struct kvm *kvm)
cleanup_srcu_struct(&kvm->irq_srcu); cleanup_srcu_struct(&kvm->irq_srcu);
cleanup_srcu_struct(&kvm->srcu); cleanup_srcu_struct(&kvm->srcu);
kvm_arch_free_vm(kvm); kvm_arch_free_vm(kvm);
preempt_notifier_dec();
hardware_disable_all(); hardware_disable_all();
mmdrop(mm); mmdrop(mm);
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册