提交 212c7654 编写于 作者: P Pavel Fedin 提交者: Christoffer Dall

KVM: arm/arm64: Clean up vgic_retire_lr() and surroundings

1. Remove unnecessary 'irq' argument, because irq number can be retrieved
   from the LR.
2. Since cff9211e
   ("arm/arm64: KVM: Fix arch timer behavior for disabled interrupts ")
   LR_STATE_PENDING is queued back by vgic_retire_lr() itself. Also, it
   clears vlr.state itself. Therefore, we remove the same, now duplicated,
   check with all accompanying bit manipulations from vgic_unqueue_irqs().
3. vgic_retire_lr() is always accompanied by vgic_irq_clear_queued(). Since
   it already does more than just clearing the LR, move
   vgic_irq_clear_queued() inside of it.
Signed-off-by: NPavel Fedin <p.fedin@samsung.com>
Signed-off-by: NChristoffer Dall <christoffer.dall@linaro.org>
上级 c4cd4c16
......@@ -105,7 +105,7 @@
#include "vgic.h"
static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu);
static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu);
static void vgic_retire_lr(int lr_nr, struct kvm_vcpu *vcpu);
static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr);
static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr, struct vgic_lr lr_desc);
static u64 vgic_get_elrsr(struct kvm_vcpu *vcpu);
......@@ -717,30 +717,14 @@ void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
* interrupt then move the active state to the
* distributor tracking bit.
*/
if (lr.state & LR_STATE_ACTIVE) {
if (lr.state & LR_STATE_ACTIVE)
vgic_irq_set_active(vcpu, lr.irq);
lr.state &= ~LR_STATE_ACTIVE;
}
/*
* Reestablish the pending state on the distributor and the
* CPU interface. It may have already been pending, but that
* is fine, then we are only setting a few bits that were
* already set.
* CPU interface and mark the LR as free for other use.
*/
if (lr.state & LR_STATE_PENDING) {
vgic_dist_irq_set_pending(vcpu, lr.irq);
lr.state &= ~LR_STATE_PENDING;
}
vgic_set_lr(vcpu, i, lr);
/*
* Mark the LR as free for other use.
*/
BUG_ON(lr.state & LR_STATE_MASK);
vgic_retire_lr(i, lr.irq, vcpu);
vgic_irq_clear_queued(vcpu, lr.irq);
vgic_retire_lr(i, vcpu);
/* Finally update the VGIC state. */
vgic_update_state(vcpu->kvm);
......@@ -1099,16 +1083,18 @@ static inline void vgic_enable(struct kvm_vcpu *vcpu)
vgic_ops->enable(vcpu);
}
static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu)
static void vgic_retire_lr(int lr_nr, struct kvm_vcpu *vcpu)
{
struct vgic_lr vlr = vgic_get_lr(vcpu, lr_nr);
vgic_irq_clear_queued(vcpu, vlr.irq);
/*
* We must transfer the pending state back to the distributor before
* retiring the LR, otherwise we may loose edge-triggered interrupts.
*/
if (vlr.state & LR_STATE_PENDING) {
vgic_dist_irq_set_pending(vcpu, irq);
vgic_dist_irq_set_pending(vcpu, vlr.irq);
vlr.hwirq = 0;
}
......@@ -1135,11 +1121,8 @@ static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu)
for_each_clear_bit(lr, elrsr_ptr, vgic->nr_lr) {
struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
if (!vgic_irq_is_enabled(vcpu, vlr.irq)) {
vgic_retire_lr(lr, vlr.irq, vcpu);
if (vgic_irq_is_queued(vcpu, vlr.irq))
vgic_irq_clear_queued(vcpu, vlr.irq);
}
if (!vgic_irq_is_enabled(vcpu, vlr.irq))
vgic_retire_lr(lr, vcpu);
}
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册