提交 58c2dde1 编写于 作者: G Gleb Natapov 提交者: Avi Kivity

KVM: APIC: get rid of deliver_bitmask

Deliver interrupt during destination matching loop.
Signed-off-by: NGleb Natapov <gleb@redhat.com>
Acked-by: NXiantao Zhang <xiantao.zhang@intel.com>
Signed-off-by: NMarcelo Tosatti <mtosatti@redhat.com>
上级 e1035715
...@@ -283,6 +283,18 @@ static int handle_sal_call(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -283,6 +283,18 @@ static int handle_sal_call(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
} }
static int __apic_accept_irq(struct kvm_vcpu *vcpu, uint64_t vector)
{
struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
if (!test_and_set_bit(vector, &vpd->irr[0])) {
vcpu->arch.irq_new_pending = 1;
kvm_vcpu_kick(vcpu);
return 1;
}
return 0;
}
/* /*
* offset: address offset to IPI space. * offset: address offset to IPI space.
* value: deliver value. * value: deliver value.
...@@ -292,20 +304,20 @@ static void vcpu_deliver_ipi(struct kvm_vcpu *vcpu, uint64_t dm, ...@@ -292,20 +304,20 @@ static void vcpu_deliver_ipi(struct kvm_vcpu *vcpu, uint64_t dm,
{ {
switch (dm) { switch (dm) {
case SAPIC_FIXED: case SAPIC_FIXED:
kvm_apic_set_irq(vcpu, vector, dm, 0);
break; break;
case SAPIC_NMI: case SAPIC_NMI:
kvm_apic_set_irq(vcpu, 2, dm, 0); vector = 2;
break; break;
case SAPIC_EXTINT: case SAPIC_EXTINT:
kvm_apic_set_irq(vcpu, 0, dm, 0); vector = 0;
break; break;
case SAPIC_INIT: case SAPIC_INIT:
case SAPIC_PMI: case SAPIC_PMI:
default: default:
printk(KERN_ERR"kvm: Unimplemented Deliver reserved IPI!\n"); printk(KERN_ERR"kvm: Unimplemented Deliver reserved IPI!\n");
break; return;
} }
__apic_accept_irq(vcpu, vector);
} }
static struct kvm_vcpu *lid_to_vcpu(struct kvm *kvm, unsigned long id, static struct kvm_vcpu *lid_to_vcpu(struct kvm *kvm, unsigned long id,
...@@ -1813,17 +1825,9 @@ void kvm_vcpu_kick(struct kvm_vcpu *vcpu) ...@@ -1813,17 +1825,9 @@ void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
put_cpu(); put_cpu();
} }
int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 dmode, u8 trig) int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq)
{ {
return __apic_accept_irq(vcpu, irq->vector);
struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
if (!test_and_set_bit(vec, &vpd->irr[0])) {
vcpu->arch.irq_new_pending = 1;
kvm_vcpu_kick(vcpu);
return 1;
}
return 0;
} }
int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest) int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest)
...@@ -1844,6 +1848,7 @@ int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2) ...@@ -1844,6 +1848,7 @@ int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2)
int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source, int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
int short_hand, int dest, int dest_mode) int short_hand, int dest, int dest_mode)
{ {
struct kvm_lapic *target = vcpu->arch.apic;
return (dest_mode == 0) ? return (dest_mode == 0) ?
kvm_apic_match_physical_addr(target, dest) : kvm_apic_match_physical_addr(target, dest) :
kvm_apic_match_logical_addr(target, dest); kvm_apic_match_logical_addr(target, dest);
......
...@@ -23,7 +23,7 @@ int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda); ...@@ -23,7 +23,7 @@ int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda);
int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source, int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
int short_hand, int dest, int dest_mode); int short_hand, int dest, int dest_mode);
int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2); int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2);
bool kvm_apic_present(struct kvm_vcpu *vcpu); int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq);
int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 dmode, u8 trig); #define kvm_apic_present(x) (true)
#endif #endif
...@@ -199,27 +199,12 @@ EXPORT_SYMBOL_GPL(kvm_lapic_find_highest_irr); ...@@ -199,27 +199,12 @@ EXPORT_SYMBOL_GPL(kvm_lapic_find_highest_irr);
static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode, static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
int vector, int level, int trig_mode); int vector, int level, int trig_mode);
int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 dmode, u8 trig) int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq)
{ {
struct kvm_lapic *apic = vcpu->arch.apic; struct kvm_lapic *apic = vcpu->arch.apic;
int lapic_dmode;
switch (dmode) { return __apic_accept_irq(apic, irq->delivery_mode, irq->vector,
case IOAPIC_LOWEST_PRIORITY: irq->level, irq->trig_mode);
lapic_dmode = APIC_DM_LOWEST;
break;
case IOAPIC_FIXED:
lapic_dmode = APIC_DM_FIXED;
break;
case IOAPIC_NMI:
lapic_dmode = APIC_DM_NMI;
break;
default:
printk(KERN_DEBUG"Ignoring delivery mode %d\n", dmode);
return 0;
break;
}
return __apic_accept_irq(apic, lapic_dmode, vec, 1, trig);
} }
static inline int apic_find_highest_isr(struct kvm_lapic *apic) static inline int apic_find_highest_isr(struct kvm_lapic *apic)
...@@ -447,36 +432,24 @@ static void apic_send_ipi(struct kvm_lapic *apic) ...@@ -447,36 +432,24 @@ static void apic_send_ipi(struct kvm_lapic *apic)
{ {
u32 icr_low = apic_get_reg(apic, APIC_ICR); u32 icr_low = apic_get_reg(apic, APIC_ICR);
u32 icr_high = apic_get_reg(apic, APIC_ICR2); u32 icr_high = apic_get_reg(apic, APIC_ICR2);
struct kvm_lapic_irq irq;
unsigned int dest = GET_APIC_DEST_FIELD(icr_high); irq.vector = icr_low & APIC_VECTOR_MASK;
unsigned int short_hand = icr_low & APIC_SHORT_MASK; irq.delivery_mode = icr_low & APIC_MODE_MASK;
unsigned int trig_mode = icr_low & APIC_INT_LEVELTRIG; irq.dest_mode = icr_low & APIC_DEST_MASK;
unsigned int level = icr_low & APIC_INT_ASSERT; irq.level = icr_low & APIC_INT_ASSERT;
unsigned int dest_mode = icr_low & APIC_DEST_MASK; irq.trig_mode = icr_low & APIC_INT_LEVELTRIG;
unsigned int delivery_mode = icr_low & APIC_MODE_MASK; irq.shorthand = icr_low & APIC_SHORT_MASK;
unsigned int vector = icr_low & APIC_VECTOR_MASK; irq.dest_id = GET_APIC_DEST_FIELD(icr_high);
DECLARE_BITMAP(deliver_bitmask, KVM_MAX_VCPUS);
int i;
apic_debug("icr_high 0x%x, icr_low 0x%x, " apic_debug("icr_high 0x%x, icr_low 0x%x, "
"short_hand 0x%x, dest 0x%x, trig_mode 0x%x, level 0x%x, " "short_hand 0x%x, dest 0x%x, trig_mode 0x%x, level 0x%x, "
"dest_mode 0x%x, delivery_mode 0x%x, vector 0x%x\n", "dest_mode 0x%x, delivery_mode 0x%x, vector 0x%x\n",
icr_high, icr_low, short_hand, dest, icr_high, icr_low, irq.shorthand, irq.dest,
trig_mode, level, dest_mode, delivery_mode, vector); irq.trig_mode, irq.level, irq.dest_mode, irq.delivery_mode,
irq.vector);
kvm_get_intr_delivery_bitmask(apic->vcpu->kvm, apic, dest, dest_mode,
delivery_mode == APIC_DM_LOWEST, short_hand, kvm_irq_delivery_to_apic(apic->vcpu->kvm, apic, &irq);
deliver_bitmask);
while ((i = find_first_bit(deliver_bitmask, KVM_MAX_VCPUS))
< KVM_MAX_VCPUS) {
struct kvm_vcpu *vcpu = apic->vcpu->kvm->vcpus[i];
__clear_bit(i, deliver_bitmask);
if (vcpu)
__apic_accept_irq(vcpu->arch.apic, delivery_mode,
vector, level, trig_mode);
}
} }
static u32 apic_get_tmcct(struct kvm_lapic *apic) static u32 apic_get_tmcct(struct kvm_lapic *apic)
......
...@@ -31,14 +31,13 @@ u64 kvm_lapic_get_base(struct kvm_vcpu *vcpu); ...@@ -31,14 +31,13 @@ u64 kvm_lapic_get_base(struct kvm_vcpu *vcpu);
int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest); int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest);
int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda); int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda);
int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 dmode, u8 trig); int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq);
u64 kvm_get_apic_base(struct kvm_vcpu *vcpu); u64 kvm_get_apic_base(struct kvm_vcpu *vcpu);
void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data); void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data);
void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu); void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu);
int kvm_lapic_enabled(struct kvm_vcpu *vcpu); int kvm_lapic_enabled(struct kvm_vcpu *vcpu);
bool kvm_apic_present(struct kvm_vcpu *vcpu); bool kvm_apic_present(struct kvm_vcpu *vcpu);
bool kvm_lapic_present(struct kvm_vcpu *vcpu);
int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu); int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu);
void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr); void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr);
......
...@@ -57,4 +57,14 @@ union kvm_ioapic_redirect_entry { ...@@ -57,4 +57,14 @@ union kvm_ioapic_redirect_entry {
} fields; } fields;
}; };
struct kvm_lapic_irq {
u32 vector;
u32 delivery_mode;
u32 dest_mode;
u32 level;
u32 trig_mode;
u32 shorthand;
u32 dest_id;
};
#endif /* __KVM_TYPES_H__ */ #endif /* __KVM_TYPES_H__ */
...@@ -142,58 +142,33 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val) ...@@ -142,58 +142,33 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
} }
} }
int ioapic_deliver_entry(struct kvm *kvm, union kvm_ioapic_redirect_entry *e)
{
DECLARE_BITMAP(deliver_bitmask, KVM_MAX_VCPUS);
int i, r = -1;
kvm_get_intr_delivery_bitmask(kvm, NULL, e->fields.dest_id,
e->fields.dest_mode,
e->fields.delivery_mode == IOAPIC_LOWEST_PRIORITY,
0, deliver_bitmask);
if (find_first_bit(deliver_bitmask, KVM_MAX_VCPUS) >= KVM_MAX_VCPUS) {
ioapic_debug("no target on destination\n");
return r;
}
while ((i = find_first_bit(deliver_bitmask, KVM_MAX_VCPUS))
< KVM_MAX_VCPUS) {
struct kvm_vcpu *vcpu = kvm->vcpus[i];
__clear_bit(i, deliver_bitmask);
if (vcpu) {
if (r < 0)
r = 0;
r += kvm_apic_set_irq(vcpu, e->fields.vector,
e->fields.delivery_mode,
e->fields.trig_mode);
} else
ioapic_debug("null destination vcpu: "
"mask=%x vector=%x delivery_mode=%x\n",
e->fields.deliver_bitmask,
e->fields.vector, e->fields.delivery_mode);
}
return r;
}
static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq) static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq)
{ {
union kvm_ioapic_redirect_entry entry = ioapic->redirtbl[irq]; union kvm_ioapic_redirect_entry *entry = &ioapic->redirtbl[irq];
struct kvm_lapic_irq irqe;
ioapic_debug("dest=%x dest_mode=%x delivery_mode=%x " ioapic_debug("dest=%x dest_mode=%x delivery_mode=%x "
"vector=%x trig_mode=%x\n", "vector=%x trig_mode=%x\n",
entry.fields.dest, entry.fields.dest_mode, entry->fields.dest, entry->fields.dest_mode,
entry.fields.delivery_mode, entry.fields.vector, entry->fields.delivery_mode, entry->fields.vector,
entry.fields.trig_mode); entry->fields.trig_mode);
irqe.dest_id = entry->fields.dest_id;
irqe.vector = entry->fields.vector;
irqe.dest_mode = entry->fields.dest_mode;
irqe.trig_mode = entry->fields.trig_mode;
irqe.delivery_mode = entry->fields.delivery_mode << 8;
irqe.level = 1;
irqe.shorthand = 0;
#ifdef CONFIG_X86 #ifdef CONFIG_X86
/* Always delivery PIT interrupt to vcpu 0 */ /* Always delivery PIT interrupt to vcpu 0 */
if (irq == 0) { if (irq == 0) {
entry.fields.dest_mode = 0; /* Physical mode. */ irqe.dest_mode = 0; /* Physical mode. */
entry.fields.dest_id = ioapic->kvm->vcpus[0]->vcpu_id; irqe.dest_id = ioapic->kvm->vcpus[0]->vcpu_id;
} }
#endif #endif
return ioapic_deliver_entry(ioapic->kvm, &entry); return kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe);
} }
int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level) int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level)
......
...@@ -71,8 +71,6 @@ void kvm_ioapic_update_eoi(struct kvm *kvm, int vector, int trigger_mode); ...@@ -71,8 +71,6 @@ void kvm_ioapic_update_eoi(struct kvm *kvm, int vector, int trigger_mode);
int kvm_ioapic_init(struct kvm *kvm); int kvm_ioapic_init(struct kvm *kvm);
int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level); int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level);
void kvm_ioapic_reset(struct kvm_ioapic *ioapic); void kvm_ioapic_reset(struct kvm_ioapic *ioapic);
void kvm_get_intr_delivery_bitmask(struct kvm *kvm, struct kvm_lapic *src, int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
int dest_id, int dest_mode, bool low_prio, int short_hand, struct kvm_lapic_irq *irq);
unsigned long *deliver_bitmask);
int ioapic_deliver_entry(struct kvm *kvm, union kvm_ioapic_redirect_entry *e);
#endif #endif
...@@ -22,6 +22,9 @@ ...@@ -22,6 +22,9 @@
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include <asm/msidef.h> #include <asm/msidef.h>
#ifdef CONFIG_IA64
#include <asm/iosapic.h>
#endif
#include "irq.h" #include "irq.h"
...@@ -43,61 +46,71 @@ static int kvm_set_ioapic_irq(struct kvm_kernel_irq_routing_entry *e, ...@@ -43,61 +46,71 @@ static int kvm_set_ioapic_irq(struct kvm_kernel_irq_routing_entry *e,
return kvm_ioapic_set_irq(kvm->arch.vioapic, e->irqchip.pin, level); return kvm_ioapic_set_irq(kvm->arch.vioapic, e->irqchip.pin, level);
} }
void kvm_get_intr_delivery_bitmask(struct kvm *kvm, struct kvm_lapic *src, inline static bool kvm_is_dm_lowest_prio(struct kvm_lapic_irq *irq)
int dest_id, int dest_mode, bool low_prio, int short_hand,
unsigned long *deliver_bitmask)
{ {
int i, lowest = -1; #ifdef CONFIG_IA64
struct kvm_vcpu *vcpu; return irq->delivery_mode ==
(IOSAPIC_LOWEST_PRIORITY << IOSAPIC_DELIVERY_SHIFT);
#else
return irq->delivery_mode == APIC_DM_LOWEST;
#endif
}
if (dest_mode == 0 && dest_id == 0xff && low_prio) int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
struct kvm_lapic_irq *irq)
{
int i, r = -1;
struct kvm_vcpu *vcpu, *lowest = NULL;
if (irq->dest_mode == 0 && irq->dest_id == 0xff &&
kvm_is_dm_lowest_prio(irq))
printk(KERN_INFO "kvm: apic: phys broadcast and lowest prio\n"); printk(KERN_INFO "kvm: apic: phys broadcast and lowest prio\n");
bitmap_zero(deliver_bitmask, KVM_MAX_VCPUS);
for (i = 0; i < KVM_MAX_VCPUS; i++) { for (i = 0; i < KVM_MAX_VCPUS; i++) {
vcpu = kvm->vcpus[i]; vcpu = kvm->vcpus[i];
if (!vcpu || !kvm_apic_present(vcpu)) if (!vcpu || !kvm_apic_present(vcpu))
continue; continue;
if (!kvm_apic_match_dest(vcpu, src, short_hand, dest_id, if (!kvm_apic_match_dest(vcpu, src, irq->shorthand,
dest_mode)) irq->dest_id, irq->dest_mode))
continue; continue;
if (!low_prio) { if (!kvm_is_dm_lowest_prio(irq)) {
__set_bit(i, deliver_bitmask); if (r < 0)
r = 0;
r += kvm_apic_set_irq(vcpu, irq);
} else { } else {
if (lowest < 0) if (!lowest)
lowest = i; lowest = vcpu;
if (kvm_apic_compare_prio(vcpu, kvm->vcpus[lowest]) < 0) else if (kvm_apic_compare_prio(vcpu, lowest) < 0)
lowest = i; lowest = vcpu;
} }
} }
if (lowest != -1) if (lowest)
__set_bit(lowest, deliver_bitmask); r = kvm_apic_set_irq(lowest, irq);
return r;
} }
static int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, static int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
struct kvm *kvm, int level) struct kvm *kvm, int level)
{ {
union kvm_ioapic_redirect_entry entry; struct kvm_lapic_irq irq;
entry.bits = 0; irq.dest_id = (e->msi.address_lo &
entry.fields.dest_id = (e->msi.address_lo &
MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT; MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT;
entry.fields.vector = (e->msi.data & irq.vector = (e->msi.data &
MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT; MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT;
entry.fields.dest_mode = test_bit(MSI_ADDR_DEST_MODE_SHIFT, irq.dest_mode = (1 << MSI_ADDR_DEST_MODE_SHIFT) & e->msi.address_lo;
(unsigned long *)&e->msi.address_lo); irq.trig_mode = (1 << MSI_DATA_TRIGGER_SHIFT) & e->msi.data;
entry.fields.trig_mode = test_bit(MSI_DATA_TRIGGER_SHIFT, irq.delivery_mode = e->msi.data & 0x700;
(unsigned long *)&e->msi.data); irq.level = 1;
entry.fields.delivery_mode = test_bit( irq.shorthand = 0;
MSI_DATA_DELIVERY_MODE_SHIFT,
(unsigned long *)&e->msi.data);
/* TODO Deal with RH bit of MSI message address */ /* TODO Deal with RH bit of MSI message address */
return ioapic_deliver_entry(kvm, &entry); return kvm_irq_delivery_to_apic(kvm, NULL, &irq);
} }
/* This should be called with the kvm->lock mutex held /* This should be called with the kvm->lock mutex held
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册