提交 76616fe6 编写于 作者: L Lance Roy 提交者: Yang Yingliang

KVM: arm/arm64: vgic: Replace spin_is_locked() with lockdep

mainline inclusion
from mainline-v5.0-rc1
commit d4d592a6
category: feature
feature: convert assertions of spin_is_locked() into lockdep_assert_held()

-------------------------------------------------

lockdep_assert_held() is better suited to checking locking requirements,
since it only checks if the current thread holds the lock regardless of
whether someone else does. This is also a step towards possibly removing
spin_is_locked().
Signed-off-by: NLance Roy <ldr709@gmail.com>
Cc: Marc Zyngier <marc.zyngier@arm.com>
Cc: Eric Auger <eric.auger@redhat.com>
Cc: linux-arm-kernel@lists.infradead.org
Cc: <kvmarm@lists.cs.columbia.edu>
Signed-off-by: NPaul E. McKenney <paulmck@linux.ibm.com>
Acked-by: NChristoffer Dall <christoffer.dall@arm.com>
Signed-off-by: NZenghui Yu <yuzenghui@huawei.com>
Reviewed-by: NHailiang Zhang <zhang.zhanghailiang@huawei.com>
Signed-off-by: NLance Roy <ldr709@gmail.com>
Signed-off-by: NPaul E. McKenney <paulmck@linux.ibm.com>
Acked-by: NChristoffer Dall <christoffer.dall@arm.com>
Signed-off-by: NZenghui Yu <yuzenghui@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 df5f4d78
...@@ -225,7 +225,7 @@ void vgic_irq_set_phys_active(struct vgic_irq *irq, bool active) ...@@ -225,7 +225,7 @@ void vgic_irq_set_phys_active(struct vgic_irq *irq, bool active)
*/ */
static struct kvm_vcpu *vgic_target_oracle(struct vgic_irq *irq) static struct kvm_vcpu *vgic_target_oracle(struct vgic_irq *irq)
{ {
DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock)); lockdep_assert_held(&irq->irq_lock);
/* If the interrupt is active, it must stay on the current vcpu */ /* If the interrupt is active, it must stay on the current vcpu */
if (irq->active) if (irq->active)
...@@ -309,7 +309,7 @@ static void vgic_sort_ap_list(struct kvm_vcpu *vcpu) ...@@ -309,7 +309,7 @@ static void vgic_sort_ap_list(struct kvm_vcpu *vcpu)
{ {
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock)); lockdep_assert_held(&vgic_cpu->ap_list_lock);
list_sort(NULL, &vgic_cpu->ap_list_head, vgic_irq_cmp); list_sort(NULL, &vgic_cpu->ap_list_head, vgic_irq_cmp);
} }
...@@ -347,7 +347,7 @@ bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq, ...@@ -347,7 +347,7 @@ bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq,
{ {
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock)); lockdep_assert_held(&irq->irq_lock);
retry: retry:
vcpu = vgic_target_oracle(irq); vcpu = vgic_target_oracle(irq);
...@@ -739,7 +739,7 @@ static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu) ...@@ -739,7 +739,7 @@ static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu)
static inline void vgic_populate_lr(struct kvm_vcpu *vcpu, static inline void vgic_populate_lr(struct kvm_vcpu *vcpu,
struct vgic_irq *irq, int lr) struct vgic_irq *irq, int lr)
{ {
DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock)); lockdep_assert_held(&irq->irq_lock);
if (kvm_vgic_global_state.type == VGIC_V2) if (kvm_vgic_global_state.type == VGIC_V2)
vgic_v2_populate_lr(vcpu, irq, lr); vgic_v2_populate_lr(vcpu, irq, lr);
...@@ -773,7 +773,7 @@ static int compute_ap_list_depth(struct kvm_vcpu *vcpu, ...@@ -773,7 +773,7 @@ static int compute_ap_list_depth(struct kvm_vcpu *vcpu,
*multi_sgi = false; *multi_sgi = false;
DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock)); lockdep_assert_held(&vgic_cpu->ap_list_lock);
list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
int w; int w;
...@@ -798,7 +798,7 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu) ...@@ -798,7 +798,7 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
bool multi_sgi; bool multi_sgi;
u8 prio = 0xff; u8 prio = 0xff;
DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock)); lockdep_assert_held(&vgic_cpu->ap_list_lock);
count = compute_ap_list_depth(vcpu, &multi_sgi); count = compute_ap_list_depth(vcpu, &multi_sgi);
if (count > kvm_vgic_global_state.nr_lr || multi_sgi) if (count > kvm_vgic_global_state.nr_lr || multi_sgi)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册