提交 726328d9 编写于 作者: P Peter Zijlstra 提交者: Ingo Molnar

locking/spinlock, arch: Update and fix spin_unlock_wait() implementations

This patch updates/fixes all spin_unlock_wait() implementations.

The update is in semantics; where it previously was only a control
dependency, we now upgrade to a full load-acquire to match the
store-release from the spin_unlock() we waited on. This ensures that
when spin_unlock_wait() returns, we're guaranteed to observe the full
critical section we waited on.

This fixes a number of spin_unlock_wait() users that (not
unreasonably) rely on this.

I also fixed a number of ticket lock versions to only wait on the
current lock holder, instead of for a full unlock, as this is
sufficient.

Furthermore; again for ticket locks; I added an smp_rmb() in between
the initial ticket load and the spin loop testing the current value
because I could not convince myself the address dependency is
sufficient, esp. if the loads are of different sizes.

I'm more than happy to remove this smp_rmb() again if people are
certain the address dependency does indeed work as expected.

Note: PPC32 will be fixed independently
Signed-off-by: NPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: chris@zankel.net
Cc: cmetcalf@mellanox.com
Cc: davem@davemloft.net
Cc: dhowells@redhat.com
Cc: james.hogan@imgtec.com
Cc: jejb@parisc-linux.org
Cc: linux@armlinux.org.uk
Cc: mpe@ellerman.id.au
Cc: ralf@linux-mips.org
Cc: realmz6@gmail.com
Cc: rkuo@codeaurora.org
Cc: rth@twiddle.net
Cc: schwidefsky@de.ibm.com
Cc: tony.luck@intel.com
Cc: vgupta@synopsys.com
Cc: ysato@users.sourceforge.jp
Cc: linux-kernel@vger.kernel.org
Signed-off-by: NIngo Molnar <mingo@kernel.org>
上级 b464d127
...@@ -3,6 +3,8 @@ ...@@ -3,6 +3,8 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <asm/current.h> #include <asm/current.h>
#include <asm/barrier.h>
#include <asm/processor.h>
/* /*
* Simple spin lock operations. There are two variants, one clears IRQ's * Simple spin lock operations. There are two variants, one clears IRQ's
...@@ -13,8 +15,11 @@ ...@@ -13,8 +15,11 @@
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
#define arch_spin_is_locked(x) ((x)->lock != 0) #define arch_spin_is_locked(x) ((x)->lock != 0)
#define arch_spin_unlock_wait(x) \
do { cpu_relax(); } while ((x)->lock) static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
smp_cond_load_acquire(&lock->lock, !VAL);
}
static inline int arch_spin_value_unlocked(arch_spinlock_t lock) static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
{ {
......
...@@ -15,8 +15,11 @@ ...@@ -15,8 +15,11 @@
#define arch_spin_is_locked(x) ((x)->slock != __ARCH_SPIN_LOCK_UNLOCKED__) #define arch_spin_is_locked(x) ((x)->slock != __ARCH_SPIN_LOCK_UNLOCKED__)
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
#define arch_spin_unlock_wait(x) \
do { while (arch_spin_is_locked(x)) cpu_relax(); } while (0) static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
smp_cond_load_acquire(&lock->slock, !VAL);
}
#ifdef CONFIG_ARC_HAS_LLSC #ifdef CONFIG_ARC_HAS_LLSC
......
...@@ -6,6 +6,8 @@ ...@@ -6,6 +6,8 @@
#endif #endif
#include <linux/prefetch.h> #include <linux/prefetch.h>
#include <asm/barrier.h>
#include <asm/processor.h>
/* /*
* sev and wfe are ARMv6K extensions. Uniprocessor ARMv6 may not have the K * sev and wfe are ARMv6K extensions. Uniprocessor ARMv6 may not have the K
...@@ -50,8 +52,21 @@ static inline void dsb_sev(void) ...@@ -50,8 +52,21 @@ static inline void dsb_sev(void)
* memory. * memory.
*/ */
#define arch_spin_unlock_wait(lock) \ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0) {
u16 owner = READ_ONCE(lock->tickets.owner);
for (;;) {
arch_spinlock_t tmp = READ_ONCE(*lock);
if (tmp.tickets.owner == tmp.tickets.next ||
tmp.tickets.owner != owner)
break;
wfe();
}
smp_acquire__after_ctrl_dep();
}
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
......
...@@ -12,6 +12,8 @@ ...@@ -12,6 +12,8 @@
#else #else
#include <linux/atomic.h> #include <linux/atomic.h>
#include <asm/processor.h>
#include <asm/barrier.h>
asmlinkage int __raw_spin_is_locked_asm(volatile int *ptr); asmlinkage int __raw_spin_is_locked_asm(volatile int *ptr);
asmlinkage void __raw_spin_lock_asm(volatile int *ptr); asmlinkage void __raw_spin_lock_asm(volatile int *ptr);
...@@ -48,8 +50,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) ...@@ -48,8 +50,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{ {
while (arch_spin_is_locked(lock)) smp_cond_load_acquire(&lock->lock, !VAL);
cpu_relax();
} }
static inline int arch_read_can_lock(arch_rwlock_t *rw) static inline int arch_read_can_lock(arch_rwlock_t *rw)
......
...@@ -23,6 +23,8 @@ ...@@ -23,6 +23,8 @@
#define _ASM_SPINLOCK_H #define _ASM_SPINLOCK_H
#include <asm/irqflags.h> #include <asm/irqflags.h>
#include <asm/barrier.h>
#include <asm/processor.h>
/* /*
* This file is pulled in for SMP builds. * This file is pulled in for SMP builds.
...@@ -176,8 +178,12 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock) ...@@ -176,8 +178,12 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
* SMP spinlocks are intended to allow only a single CPU at the lock * SMP spinlocks are intended to allow only a single CPU at the lock
*/ */
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
#define arch_spin_unlock_wait(lock) \
do {while (arch_spin_is_locked(lock)) cpu_relax(); } while (0) static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
smp_cond_load_acquire(&lock->lock, !VAL);
}
#define arch_spin_is_locked(x) ((x)->lock != 0) #define arch_spin_is_locked(x) ((x)->lock != 0)
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock) #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
......
...@@ -15,6 +15,8 @@ ...@@ -15,6 +15,8 @@
#include <linux/atomic.h> #include <linux/atomic.h>
#include <asm/intrinsics.h> #include <asm/intrinsics.h>
#include <asm/barrier.h>
#include <asm/processor.h>
#define arch_spin_lock_init(x) ((x)->lock = 0) #define arch_spin_lock_init(x) ((x)->lock = 0)
...@@ -86,6 +88,8 @@ static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock) ...@@ -86,6 +88,8 @@ static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
return; return;
cpu_relax(); cpu_relax();
} }
smp_acquire__after_ctrl_dep();
} }
static inline int __ticket_spin_is_locked(arch_spinlock_t *lock) static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
......
...@@ -13,6 +13,8 @@ ...@@ -13,6 +13,8 @@
#include <linux/atomic.h> #include <linux/atomic.h>
#include <asm/dcache_clear.h> #include <asm/dcache_clear.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/barrier.h>
#include <asm/processor.h>
/* /*
* Your basic SMP spinlocks, allowing only a single CPU anywhere * Your basic SMP spinlocks, allowing only a single CPU anywhere
...@@ -27,8 +29,11 @@ ...@@ -27,8 +29,11 @@
#define arch_spin_is_locked(x) (*(volatile int *)(&(x)->slock) <= 0) #define arch_spin_is_locked(x) (*(volatile int *)(&(x)->slock) <= 0)
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
#define arch_spin_unlock_wait(x) \
do { cpu_relax(); } while (arch_spin_is_locked(x)) static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
smp_cond_load_acquire(&lock->slock, VAL > 0);
}
/** /**
* arch_spin_trylock - Try spin lock and return a result * arch_spin_trylock - Try spin lock and return a result
......
#ifndef __ASM_SPINLOCK_H #ifndef __ASM_SPINLOCK_H
#define __ASM_SPINLOCK_H #define __ASM_SPINLOCK_H
#include <asm/barrier.h>
#include <asm/processor.h>
#ifdef CONFIG_METAG_ATOMICITY_LOCK1 #ifdef CONFIG_METAG_ATOMICITY_LOCK1
#include <asm/spinlock_lock1.h> #include <asm/spinlock_lock1.h>
#else #else
#include <asm/spinlock_lnkget.h> #include <asm/spinlock_lnkget.h>
#endif #endif
#define arch_spin_unlock_wait(lock) \ /*
do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0) * both lock1 and lnkget are test-and-set spinlocks with 0 unlocked and 1
* locked.
*/
static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
smp_cond_load_acquire(&lock->lock, !VAL);
}
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include <linux/compiler.h> #include <linux/compiler.h>
#include <asm/barrier.h> #include <asm/barrier.h>
#include <asm/processor.h>
#include <asm/compiler.h> #include <asm/compiler.h>
#include <asm/war.h> #include <asm/war.h>
...@@ -48,8 +49,22 @@ static inline int arch_spin_value_unlocked(arch_spinlock_t lock) ...@@ -48,8 +49,22 @@ static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
} }
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
#define arch_spin_unlock_wait(x) \
while (arch_spin_is_locked(x)) { cpu_relax(); } static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
u16 owner = READ_ONCE(lock->h.serving_now);
smp_rmb();
for (;;) {
arch_spinlock_t tmp = READ_ONCE(*lock);
if (tmp.h.serving_now == tmp.h.ticket ||
tmp.h.serving_now != owner)
break;
cpu_relax();
}
smp_acquire__after_ctrl_dep();
}
static inline int arch_spin_is_contended(arch_spinlock_t *lock) static inline int arch_spin_is_contended(arch_spinlock_t *lock)
{ {
......
...@@ -12,6 +12,8 @@ ...@@ -12,6 +12,8 @@
#define _ASM_SPINLOCK_H #define _ASM_SPINLOCK_H
#include <linux/atomic.h> #include <linux/atomic.h>
#include <asm/barrier.h>
#include <asm/processor.h>
#include <asm/rwlock.h> #include <asm/rwlock.h>
#include <asm/page.h> #include <asm/page.h>
...@@ -23,7 +25,11 @@ ...@@ -23,7 +25,11 @@
*/ */
#define arch_spin_is_locked(x) (*(volatile signed char *)(&(x)->slock) != 0) #define arch_spin_is_locked(x) (*(volatile signed char *)(&(x)->slock) != 0)
#define arch_spin_unlock_wait(x) do { barrier(); } while (arch_spin_is_locked(x))
static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
smp_cond_load_acquire(&lock->slock, !VAL);
}
static inline void arch_spin_unlock(arch_spinlock_t *lock) static inline void arch_spin_unlock(arch_spinlock_t *lock)
{ {
......
...@@ -13,8 +13,13 @@ static inline int arch_spin_is_locked(arch_spinlock_t *x) ...@@ -13,8 +13,13 @@ static inline int arch_spin_is_locked(arch_spinlock_t *x)
} }
#define arch_spin_lock(lock) arch_spin_lock_flags(lock, 0) #define arch_spin_lock(lock) arch_spin_lock_flags(lock, 0)
#define arch_spin_unlock_wait(x) \
do { cpu_relax(); } while (arch_spin_is_locked(x)) static inline void arch_spin_unlock_wait(arch_spinlock_t *x)
{
volatile unsigned int *a = __ldcw_align(x);
smp_cond_load_acquire(a, VAL);
}
static inline void arch_spin_lock_flags(arch_spinlock_t *x, static inline void arch_spin_lock_flags(arch_spinlock_t *x,
unsigned long flags) unsigned long flags)
......
...@@ -10,6 +10,8 @@ ...@@ -10,6 +10,8 @@
#define __ASM_SPINLOCK_H #define __ASM_SPINLOCK_H
#include <linux/smp.h> #include <linux/smp.h>
#include <asm/barrier.h>
#include <asm/processor.h>
#define SPINLOCK_LOCKVAL (S390_lowcore.spinlock_lockval) #define SPINLOCK_LOCKVAL (S390_lowcore.spinlock_lockval)
...@@ -97,6 +99,7 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) ...@@ -97,6 +99,7 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{ {
while (arch_spin_is_locked(lock)) while (arch_spin_is_locked(lock))
arch_spin_relax(lock); arch_spin_relax(lock);
smp_acquire__after_ctrl_dep();
} }
/* /*
......
...@@ -19,14 +19,20 @@ ...@@ -19,14 +19,20 @@
#error "Need movli.l/movco.l for spinlocks" #error "Need movli.l/movco.l for spinlocks"
#endif #endif
#include <asm/barrier.h>
#include <asm/processor.h>
/* /*
* Your basic SMP spinlocks, allowing only a single CPU anywhere * Your basic SMP spinlocks, allowing only a single CPU anywhere
*/ */
#define arch_spin_is_locked(x) ((x)->lock <= 0) #define arch_spin_is_locked(x) ((x)->lock <= 0)
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
#define arch_spin_unlock_wait(x) \
do { while (arch_spin_is_locked(x)) cpu_relax(); } while (0) static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
smp_cond_load_acquire(&lock->lock, VAL > 0);
}
/* /*
* Simple spin lock operations. There are two variants, one clears IRQ's * Simple spin lock operations. There are two variants, one clears IRQ's
......
...@@ -9,12 +9,15 @@ ...@@ -9,12 +9,15 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <asm/psr.h> #include <asm/psr.h>
#include <asm/barrier.h>
#include <asm/processor.h> /* for cpu_relax */ #include <asm/processor.h> /* for cpu_relax */
#define arch_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0) #define arch_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0)
#define arch_spin_unlock_wait(lock) \ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0) {
smp_cond_load_acquire(&lock->lock, !VAL);
}
static inline void arch_spin_lock(arch_spinlock_t *lock) static inline void arch_spin_lock(arch_spinlock_t *lock)
{ {
......
...@@ -8,6 +8,9 @@ ...@@ -8,6 +8,9 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <asm/processor.h>
#include <asm/barrier.h>
/* To get debugging spinlocks which detect and catch /* To get debugging spinlocks which detect and catch
* deadlock situations, set CONFIG_DEBUG_SPINLOCK * deadlock situations, set CONFIG_DEBUG_SPINLOCK
* and rebuild your kernel. * and rebuild your kernel.
...@@ -23,9 +26,10 @@ ...@@ -23,9 +26,10 @@
#define arch_spin_is_locked(lp) ((lp)->lock != 0) #define arch_spin_is_locked(lp) ((lp)->lock != 0)
#define arch_spin_unlock_wait(lp) \ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
do { rmb(); \ {
} while((lp)->lock) smp_cond_load_acquire(&lock->lock, !VAL);
}
static inline void arch_spin_lock(arch_spinlock_t *lock) static inline void arch_spin_lock(arch_spinlock_t *lock)
{ {
......
...@@ -76,6 +76,12 @@ void arch_spin_unlock_wait(arch_spinlock_t *lock) ...@@ -76,6 +76,12 @@ void arch_spin_unlock_wait(arch_spinlock_t *lock)
do { do {
delay_backoff(iterations++); delay_backoff(iterations++);
} while (READ_ONCE(lock->current_ticket) == curr); } while (READ_ONCE(lock->current_ticket) == curr);
/*
* The TILE architecture doesn't do read speculation; therefore
* a control dependency guarantees a LOAD->{LOAD,STORE} order.
*/
barrier();
} }
EXPORT_SYMBOL(arch_spin_unlock_wait); EXPORT_SYMBOL(arch_spin_unlock_wait);
......
...@@ -76,6 +76,12 @@ void arch_spin_unlock_wait(arch_spinlock_t *lock) ...@@ -76,6 +76,12 @@ void arch_spin_unlock_wait(arch_spinlock_t *lock)
do { do {
delay_backoff(iterations++); delay_backoff(iterations++);
} while (arch_spin_current(READ_ONCE(lock->lock)) == curr); } while (arch_spin_current(READ_ONCE(lock->lock)) == curr);
/*
* The TILE architecture doesn't do read speculation; therefore
* a control dependency guarantees a LOAD->{LOAD,STORE} order.
*/
barrier();
} }
EXPORT_SYMBOL(arch_spin_unlock_wait); EXPORT_SYMBOL(arch_spin_unlock_wait);
......
...@@ -11,6 +11,9 @@ ...@@ -11,6 +11,9 @@
#ifndef _XTENSA_SPINLOCK_H #ifndef _XTENSA_SPINLOCK_H
#define _XTENSA_SPINLOCK_H #define _XTENSA_SPINLOCK_H
#include <asm/barrier.h>
#include <asm/processor.h>
/* /*
* spinlock * spinlock
* *
...@@ -29,8 +32,11 @@ ...@@ -29,8 +32,11 @@
*/ */
#define arch_spin_is_locked(x) ((x)->slock != 0) #define arch_spin_is_locked(x) ((x)->slock != 0)
#define arch_spin_unlock_wait(lock) \
do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0) static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
smp_cond_load_acquire(&lock->slock, !VAL);
}
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
......
...@@ -194,7 +194,7 @@ do { \ ...@@ -194,7 +194,7 @@ do { \
}) })
#endif #endif
#endif #endif /* CONFIG_SMP */
/* Barriers for virtual machine guests when talking to an SMP host */ /* Barriers for virtual machine guests when talking to an SMP host */
#define virt_mb() __smp_mb() #define virt_mb() __smp_mb()
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#endif #endif
#include <asm/processor.h> /* for cpu_relax() */ #include <asm/processor.h> /* for cpu_relax() */
#include <asm/barrier.h>
/* /*
* include/linux/spinlock_up.h - UP-debug version of spinlocks. * include/linux/spinlock_up.h - UP-debug version of spinlocks.
...@@ -25,6 +26,11 @@ ...@@ -25,6 +26,11 @@
#ifdef CONFIG_DEBUG_SPINLOCK #ifdef CONFIG_DEBUG_SPINLOCK
#define arch_spin_is_locked(x) ((x)->slock == 0) #define arch_spin_is_locked(x) ((x)->slock == 0)
static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
smp_cond_load_acquire(&lock->slock, VAL);
}
static inline void arch_spin_lock(arch_spinlock_t *lock) static inline void arch_spin_lock(arch_spinlock_t *lock)
{ {
lock->slock = 0; lock->slock = 0;
...@@ -67,6 +73,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) ...@@ -67,6 +73,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
#else /* DEBUG_SPINLOCK */ #else /* DEBUG_SPINLOCK */
#define arch_spin_is_locked(lock) ((void)(lock), 0) #define arch_spin_is_locked(lock) ((void)(lock), 0)
#define arch_spin_unlock_wait(lock) do { barrier(); (void)(lock); } while (0)
/* for sched/core.c and kernel_lock.c: */ /* for sched/core.c and kernel_lock.c: */
# define arch_spin_lock(lock) do { barrier(); (void)(lock); } while (0) # define arch_spin_lock(lock) do { barrier(); (void)(lock); } while (0)
# define arch_spin_lock_flags(lock, flags) do { barrier(); (void)(lock); } while (0) # define arch_spin_lock_flags(lock, flags) do { barrier(); (void)(lock); } while (0)
...@@ -79,7 +86,4 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) ...@@ -79,7 +86,4 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
#define arch_read_can_lock(lock) (((void)(lock), 1)) #define arch_read_can_lock(lock) (((void)(lock), 1))
#define arch_write_can_lock(lock) (((void)(lock), 1)) #define arch_write_can_lock(lock) (((void)(lock), 1))
#define arch_spin_unlock_wait(lock) \
do { cpu_relax(); } while (arch_spin_is_locked(lock))
#endif /* __LINUX_SPINLOCK_UP_H */ #endif /* __LINUX_SPINLOCK_UP_H */
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册