diff --git a/arch/arm/include/asm/mcs_spinlock.h b/arch/arm/include/asm/mcs_spinlock.h index 529d2cf4d06f4adf88170ca6c84f3e39d3305188..ae6d763477f4f115045b512784bb81c43387d9bc 100644 --- a/arch/arm/include/asm/mcs_spinlock.h +++ b/arch/arm/include/asm/mcs_spinlock.h @@ -14,9 +14,9 @@ do { \ wfe(); \ } while (0) \ -#define arch_mcs_spin_unlock_contended(lock) \ +#define arch_mcs_spin_unlock_contended(lock, val) \ do { \ - smp_store_release(lock, 1); \ + smp_store_release(lock, (val)); \ dsb_sev(); \ } while (0) diff --git a/kernel/locking/mcs_spinlock.h b/kernel/locking/mcs_spinlock.h index 5e10153b4d3c92b149a72eaae92e6bafacfef161..bc6d3244e1afa3cf118bf2f392b1d6689d42b29f 100644 --- a/kernel/locking/mcs_spinlock.h +++ b/kernel/locking/mcs_spinlock.h @@ -41,8 +41,8 @@ do { \ * operations in the critical section has been completed before * unlocking. */ -#define arch_mcs_spin_unlock_contended(l) \ - smp_store_release((l), 1) +#define arch_mcs_spin_unlock_contended(l, val) \ + smp_store_release((l), (val)) #endif /* @@ -115,7 +115,7 @@ void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node) } /* Pass lock to next waiter. */ - arch_mcs_spin_unlock_contended(&next->locked); + arch_mcs_spin_unlock_contended(&next->locked, 1); } #endif /* __LINUX_MCS_SPINLOCK_H */ diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c index b186ef2dab254baaf9c3cd3be604f7806f02c27f..12632f2c34db704e7cd3df748868ee242a966cc8 100644 --- a/kernel/locking/qspinlock.c +++ b/kernel/locking/qspinlock.c @@ -543,7 +543,7 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) if (!next) next = smp_cond_load_relaxed(&node->next, (VAL)); - arch_mcs_spin_unlock_contended(&next->locked); + arch_mcs_spin_unlock_contended(&next->locked, 1); pv_kick_node(lock, next); release: