提交 e2ed4078 编写于 作者: S Suravee Suthikulpanit 提交者: Paolo Bonzini

kvm: i8254: Deactivate APICv when using in-kernel PIT re-injection mode.

AMD SVM AVIC accelerates EOI write and does not trap. This causes
in-kernel PIT re-injection mode to fail since it relies on irq-ack
notifier mechanism. So, APICv is activated only when in-kernel PIT
is in discard mode e.g. w/ qemu option:

  -global kvm-pit.lost_tick_policy=discard

Also, introduce APICV_INHIBIT_REASON_PIT_REINJ bit to be used for this
reason.
Suggested-by: NPaolo Bonzini <pbonzini@redhat.com>
Signed-off-by: NSuravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Signed-off-by: NPaolo Bonzini <pbonzini@redhat.com>
上级 f3515dc3
...@@ -879,6 +879,7 @@ enum kvm_irqchip_mode { ...@@ -879,6 +879,7 @@ enum kvm_irqchip_mode {
#define APICV_INHIBIT_REASON_HYPERV 1 #define APICV_INHIBIT_REASON_HYPERV 1
#define APICV_INHIBIT_REASON_NESTED 2 #define APICV_INHIBIT_REASON_NESTED 2
#define APICV_INHIBIT_REASON_IRQWIN 3 #define APICV_INHIBIT_REASON_IRQWIN 3
#define APICV_INHIBIT_REASON_PIT_REINJ 4
struct kvm_arch { struct kvm_arch {
unsigned long n_used_mmu_pages; unsigned long n_used_mmu_pages;
......
...@@ -295,12 +295,24 @@ void kvm_pit_set_reinject(struct kvm_pit *pit, bool reinject) ...@@ -295,12 +295,24 @@ void kvm_pit_set_reinject(struct kvm_pit *pit, bool reinject)
if (atomic_read(&ps->reinject) == reinject) if (atomic_read(&ps->reinject) == reinject)
return; return;
/*
* AMD SVM AVIC accelerates EOI write and does not trap.
* This cause in-kernel PIT re-inject mode to fail
* since it checks ps->irq_ack before kvm_set_irq()
* and relies on the ack notifier to timely queue
* the pt->worker work iterm and reinject the missed tick.
* So, deactivate APICv when PIT is in reinject mode.
*/
if (reinject) { if (reinject) {
kvm_request_apicv_update(kvm, false,
APICV_INHIBIT_REASON_PIT_REINJ);
/* The initial state is preserved while ps->reinject == 0. */ /* The initial state is preserved while ps->reinject == 0. */
kvm_pit_reset_reinject(pit); kvm_pit_reset_reinject(pit);
kvm_register_irq_ack_notifier(kvm, &ps->irq_ack_notifier); kvm_register_irq_ack_notifier(kvm, &ps->irq_ack_notifier);
kvm_register_irq_mask_notifier(kvm, 0, &pit->mask_notifier); kvm_register_irq_mask_notifier(kvm, 0, &pit->mask_notifier);
} else { } else {
kvm_request_apicv_update(kvm, true,
APICV_INHIBIT_REASON_PIT_REINJ);
kvm_unregister_irq_ack_notifier(kvm, &ps->irq_ack_notifier); kvm_unregister_irq_ack_notifier(kvm, &ps->irq_ack_notifier);
kvm_unregister_irq_mask_notifier(kvm, 0, &pit->mask_notifier); kvm_unregister_irq_mask_notifier(kvm, 0, &pit->mask_notifier);
} }
......
...@@ -1739,7 +1739,13 @@ static int avic_update_access_page(struct kvm *kvm, bool activate) ...@@ -1739,7 +1739,13 @@ static int avic_update_access_page(struct kvm *kvm, bool activate)
int ret = 0; int ret = 0;
mutex_lock(&kvm->slots_lock); mutex_lock(&kvm->slots_lock);
if (kvm->arch.apic_access_page_done == activate) /*
* During kvm_destroy_vm(), kvm_pit_set_reinject() could trigger
* APICv mode change, which update APIC_ACCESS_PAGE_PRIVATE_MEMSLOT
* memory region. So, we need to ensure that kvm->mm == current->mm.
*/
if ((kvm->arch.apic_access_page_done == activate) ||
(kvm->mm != current->mm))
goto out; goto out;
ret = __x86_set_memory_region(kvm, ret = __x86_set_memory_region(kvm,
...@@ -7353,7 +7359,8 @@ static bool svm_check_apicv_inhibit_reasons(ulong bit) ...@@ -7353,7 +7359,8 @@ static bool svm_check_apicv_inhibit_reasons(ulong bit)
ulong supported = BIT(APICV_INHIBIT_REASON_DISABLE) | ulong supported = BIT(APICV_INHIBIT_REASON_DISABLE) |
BIT(APICV_INHIBIT_REASON_HYPERV) | BIT(APICV_INHIBIT_REASON_HYPERV) |
BIT(APICV_INHIBIT_REASON_NESTED) | BIT(APICV_INHIBIT_REASON_NESTED) |
BIT(APICV_INHIBIT_REASON_IRQWIN); BIT(APICV_INHIBIT_REASON_IRQWIN) |
BIT(APICV_INHIBIT_REASON_PIT_REINJ);
return supported & BIT(bit); return supported & BIT(bit);
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册