提交 144b9c13 编写于 作者: A Anton Blanchard 提交者: Paul Mackerras

[PATCH] powerpc: use lwsync in atomics, bitops, lock functions

eieio is only a store - store ordering. When used to order an unlock
operation loads may leak out of the critical region. This is potentially
buggy, one example is if a user wants to atomically read a couple of
values.

We can solve this with an lwsync which orders everything except store - load.

I removed the (now unused) EIEIO_ON_SMP macros and the c versions
isync_on_smp and eieio_on_smp now we dont use them. I also removed some
old comments that were used to identify inline spinlocks in assembly,
they dont make sense now our locks are out of line.

Another interesting thing was that read_unlock was using an eieio even
though the rest of the spinlock code had already been converted to
use lwsync.
Signed-off-by: NAnton Blanchard <anton@samba.org>
Signed-off-by: NPaul Mackerras <paulus@samba.org>
上级 3356bb9f
...@@ -36,7 +36,7 @@ static __inline__ int atomic_add_return(int a, atomic_t *v) ...@@ -36,7 +36,7 @@ static __inline__ int atomic_add_return(int a, atomic_t *v)
int t; int t;
__asm__ __volatile__( __asm__ __volatile__(
EIEIO_ON_SMP LWSYNC_ON_SMP
"1: lwarx %0,0,%2 # atomic_add_return\n\ "1: lwarx %0,0,%2 # atomic_add_return\n\
add %0,%1,%0\n" add %0,%1,%0\n"
PPC405_ERR77(0,%2) PPC405_ERR77(0,%2)
...@@ -72,7 +72,7 @@ static __inline__ int atomic_sub_return(int a, atomic_t *v) ...@@ -72,7 +72,7 @@ static __inline__ int atomic_sub_return(int a, atomic_t *v)
int t; int t;
__asm__ __volatile__( __asm__ __volatile__(
EIEIO_ON_SMP LWSYNC_ON_SMP
"1: lwarx %0,0,%2 # atomic_sub_return\n\ "1: lwarx %0,0,%2 # atomic_sub_return\n\
subf %0,%1,%0\n" subf %0,%1,%0\n"
PPC405_ERR77(0,%2) PPC405_ERR77(0,%2)
...@@ -106,7 +106,7 @@ static __inline__ int atomic_inc_return(atomic_t *v) ...@@ -106,7 +106,7 @@ static __inline__ int atomic_inc_return(atomic_t *v)
int t; int t;
__asm__ __volatile__( __asm__ __volatile__(
EIEIO_ON_SMP LWSYNC_ON_SMP
"1: lwarx %0,0,%1 # atomic_inc_return\n\ "1: lwarx %0,0,%1 # atomic_inc_return\n\
addic %0,%0,1\n" addic %0,%0,1\n"
PPC405_ERR77(0,%1) PPC405_ERR77(0,%1)
...@@ -150,7 +150,7 @@ static __inline__ int atomic_dec_return(atomic_t *v) ...@@ -150,7 +150,7 @@ static __inline__ int atomic_dec_return(atomic_t *v)
int t; int t;
__asm__ __volatile__( __asm__ __volatile__(
EIEIO_ON_SMP LWSYNC_ON_SMP
"1: lwarx %0,0,%1 # atomic_dec_return\n\ "1: lwarx %0,0,%1 # atomic_dec_return\n\
addic %0,%0,-1\n" addic %0,%0,-1\n"
PPC405_ERR77(0,%1) PPC405_ERR77(0,%1)
...@@ -204,7 +204,7 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v) ...@@ -204,7 +204,7 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v)
int t; int t;
__asm__ __volatile__( __asm__ __volatile__(
EIEIO_ON_SMP LWSYNC_ON_SMP
"1: lwarx %0,0,%1 # atomic_dec_if_positive\n\ "1: lwarx %0,0,%1 # atomic_dec_if_positive\n\
addic. %0,%0,-1\n\ addic. %0,%0,-1\n\
blt- 2f\n" blt- 2f\n"
...@@ -253,7 +253,7 @@ static __inline__ long atomic64_add_return(long a, atomic64_t *v) ...@@ -253,7 +253,7 @@ static __inline__ long atomic64_add_return(long a, atomic64_t *v)
long t; long t;
__asm__ __volatile__( __asm__ __volatile__(
EIEIO_ON_SMP LWSYNC_ON_SMP
"1: ldarx %0,0,%2 # atomic64_add_return\n\ "1: ldarx %0,0,%2 # atomic64_add_return\n\
add %0,%1,%0\n\ add %0,%1,%0\n\
stdcx. %0,0,%2 \n\ stdcx. %0,0,%2 \n\
...@@ -287,7 +287,7 @@ static __inline__ long atomic64_sub_return(long a, atomic64_t *v) ...@@ -287,7 +287,7 @@ static __inline__ long atomic64_sub_return(long a, atomic64_t *v)
long t; long t;
__asm__ __volatile__( __asm__ __volatile__(
EIEIO_ON_SMP LWSYNC_ON_SMP
"1: ldarx %0,0,%2 # atomic64_sub_return\n\ "1: ldarx %0,0,%2 # atomic64_sub_return\n\
subf %0,%1,%0\n\ subf %0,%1,%0\n\
stdcx. %0,0,%2 \n\ stdcx. %0,0,%2 \n\
...@@ -319,7 +319,7 @@ static __inline__ long atomic64_inc_return(atomic64_t *v) ...@@ -319,7 +319,7 @@ static __inline__ long atomic64_inc_return(atomic64_t *v)
long t; long t;
__asm__ __volatile__( __asm__ __volatile__(
EIEIO_ON_SMP LWSYNC_ON_SMP
"1: ldarx %0,0,%1 # atomic64_inc_return\n\ "1: ldarx %0,0,%1 # atomic64_inc_return\n\
addic %0,%0,1\n\ addic %0,%0,1\n\
stdcx. %0,0,%1 \n\ stdcx. %0,0,%1 \n\
...@@ -361,7 +361,7 @@ static __inline__ long atomic64_dec_return(atomic64_t *v) ...@@ -361,7 +361,7 @@ static __inline__ long atomic64_dec_return(atomic64_t *v)
long t; long t;
__asm__ __volatile__( __asm__ __volatile__(
EIEIO_ON_SMP LWSYNC_ON_SMP
"1: ldarx %0,0,%1 # atomic64_dec_return\n\ "1: ldarx %0,0,%1 # atomic64_dec_return\n\
addic %0,%0,-1\n\ addic %0,%0,-1\n\
stdcx. %0,0,%1\n\ stdcx. %0,0,%1\n\
...@@ -386,7 +386,7 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v) ...@@ -386,7 +386,7 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
long t; long t;
__asm__ __volatile__( __asm__ __volatile__(
EIEIO_ON_SMP LWSYNC_ON_SMP
"1: ldarx %0,0,%1 # atomic64_dec_if_positive\n\ "1: ldarx %0,0,%1 # atomic64_dec_if_positive\n\
addic. %0,%0,-1\n\ addic. %0,%0,-1\n\
blt- 2f\n\ blt- 2f\n\
......
...@@ -112,7 +112,7 @@ static __inline__ int test_and_set_bit(unsigned long nr, ...@@ -112,7 +112,7 @@ static __inline__ int test_and_set_bit(unsigned long nr,
unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
__asm__ __volatile__( __asm__ __volatile__(
EIEIO_ON_SMP LWSYNC_ON_SMP
"1:" PPC_LLARX "%0,0,%3 # test_and_set_bit\n" "1:" PPC_LLARX "%0,0,%3 # test_and_set_bit\n"
"or %1,%0,%2 \n" "or %1,%0,%2 \n"
PPC405_ERR77(0,%3) PPC405_ERR77(0,%3)
...@@ -134,7 +134,7 @@ static __inline__ int test_and_clear_bit(unsigned long nr, ...@@ -134,7 +134,7 @@ static __inline__ int test_and_clear_bit(unsigned long nr,
unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
__asm__ __volatile__( __asm__ __volatile__(
EIEIO_ON_SMP LWSYNC_ON_SMP
"1:" PPC_LLARX "%0,0,%3 # test_and_clear_bit\n" "1:" PPC_LLARX "%0,0,%3 # test_and_clear_bit\n"
"andc %1,%0,%2 \n" "andc %1,%0,%2 \n"
PPC405_ERR77(0,%3) PPC405_ERR77(0,%3)
...@@ -156,7 +156,7 @@ static __inline__ int test_and_change_bit(unsigned long nr, ...@@ -156,7 +156,7 @@ static __inline__ int test_and_change_bit(unsigned long nr,
unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
__asm__ __volatile__( __asm__ __volatile__(
EIEIO_ON_SMP LWSYNC_ON_SMP
"1:" PPC_LLARX "%0,0,%3 # test_and_change_bit\n" "1:" PPC_LLARX "%0,0,%3 # test_and_change_bit\n"
"xor %1,%0,%2 \n" "xor %1,%0,%2 \n"
PPC405_ERR77(0,%3) PPC405_ERR77(0,%3)
......
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ #define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
__asm__ __volatile ( \ __asm__ __volatile ( \
SYNC_ON_SMP \ LWSYNC_ON_SMP \
"1: lwarx %0,0,%2\n" \ "1: lwarx %0,0,%2\n" \
insn \ insn \
PPC405_ERR77(0, %2) \ PPC405_ERR77(0, %2) \
......
...@@ -46,7 +46,7 @@ static __inline__ unsigned long __spin_trylock(raw_spinlock_t *lock) ...@@ -46,7 +46,7 @@ static __inline__ unsigned long __spin_trylock(raw_spinlock_t *lock)
token = LOCK_TOKEN; token = LOCK_TOKEN;
__asm__ __volatile__( __asm__ __volatile__(
"1: lwarx %0,0,%2 # __spin_trylock\n\ "1: lwarx %0,0,%2\n\
cmpwi 0,%0,0\n\ cmpwi 0,%0,0\n\
bne- 2f\n\ bne- 2f\n\
stwcx. %1,0,%2\n\ stwcx. %1,0,%2\n\
...@@ -124,8 +124,8 @@ static void __inline__ __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long ...@@ -124,8 +124,8 @@ static void __inline__ __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long
static __inline__ void __raw_spin_unlock(raw_spinlock_t *lock) static __inline__ void __raw_spin_unlock(raw_spinlock_t *lock)
{ {
__asm__ __volatile__(SYNC_ON_SMP" # __raw_spin_unlock" __asm__ __volatile__("# __raw_spin_unlock\n\t"
: : :"memory"); LWSYNC_ON_SMP: : :"memory");
lock->slock = 0; lock->slock = 0;
} }
...@@ -167,7 +167,7 @@ static long __inline__ __read_trylock(raw_rwlock_t *rw) ...@@ -167,7 +167,7 @@ static long __inline__ __read_trylock(raw_rwlock_t *rw)
long tmp; long tmp;
__asm__ __volatile__( __asm__ __volatile__(
"1: lwarx %0,0,%1 # read_trylock\n" "1: lwarx %0,0,%1\n"
__DO_SIGN_EXTEND __DO_SIGN_EXTEND
" addic. %0,%0,1\n\ " addic. %0,%0,1\n\
ble- 2f\n" ble- 2f\n"
...@@ -192,7 +192,7 @@ static __inline__ long __write_trylock(raw_rwlock_t *rw) ...@@ -192,7 +192,7 @@ static __inline__ long __write_trylock(raw_rwlock_t *rw)
token = WRLOCK_TOKEN; token = WRLOCK_TOKEN;
__asm__ __volatile__( __asm__ __volatile__(
"1: lwarx %0,0,%2 # write_trylock\n\ "1: lwarx %0,0,%2\n\
cmpwi 0,%0,0\n\ cmpwi 0,%0,0\n\
bne- 2f\n" bne- 2f\n"
PPC405_ERR77(0,%1) PPC405_ERR77(0,%1)
...@@ -249,8 +249,9 @@ static void __inline__ __raw_read_unlock(raw_rwlock_t *rw) ...@@ -249,8 +249,9 @@ static void __inline__ __raw_read_unlock(raw_rwlock_t *rw)
long tmp; long tmp;
__asm__ __volatile__( __asm__ __volatile__(
"eieio # read_unlock\n\ "# read_unlock\n\t"
1: lwarx %0,0,%1\n\ LWSYNC_ON_SMP
"1: lwarx %0,0,%1\n\
addic %0,%0,-1\n" addic %0,%0,-1\n"
PPC405_ERR77(0,%1) PPC405_ERR77(0,%1)
" stwcx. %0,0,%1\n\ " stwcx. %0,0,%1\n\
...@@ -262,8 +263,8 @@ static void __inline__ __raw_read_unlock(raw_rwlock_t *rw) ...@@ -262,8 +263,8 @@ static void __inline__ __raw_read_unlock(raw_rwlock_t *rw)
static __inline__ void __raw_write_unlock(raw_rwlock_t *rw) static __inline__ void __raw_write_unlock(raw_rwlock_t *rw)
{ {
__asm__ __volatile__(SYNC_ON_SMP" # write_unlock" __asm__ __volatile__("# write_unlock\n\t"
: : :"memory"); LWSYNC_ON_SMP: : :"memory");
rw->lock = 0; rw->lock = 0;
} }
......
...@@ -2,6 +2,8 @@ ...@@ -2,6 +2,8 @@
#define _ASM_POWERPC_SYNCH_H #define _ASM_POWERPC_SYNCH_H
#ifdef __KERNEL__ #ifdef __KERNEL__
#include <linux/stringify.h>
#ifdef __powerpc64__ #ifdef __powerpc64__
#define __SUBARCH_HAS_LWSYNC #define __SUBARCH_HAS_LWSYNC
#endif #endif
...@@ -12,20 +14,12 @@ ...@@ -12,20 +14,12 @@
# define LWSYNC sync # define LWSYNC sync
#endif #endif
/*
* Arguably the bitops and *xchg operations don't imply any memory barrier
* or SMP ordering, but in fact a lot of drivers expect them to imply
* both, since they do on x86 cpus.
*/
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#define EIEIO_ON_SMP "eieio\n"
#define ISYNC_ON_SMP "\n\tisync" #define ISYNC_ON_SMP "\n\tisync"
#define SYNC_ON_SMP __stringify(LWSYNC) "\n" #define LWSYNC_ON_SMP __stringify(LWSYNC) "\n"
#else #else
#define EIEIO_ON_SMP
#define ISYNC_ON_SMP #define ISYNC_ON_SMP
#define SYNC_ON_SMP #define LWSYNC_ON_SMP
#endif #endif
static inline void eieio(void) static inline void eieio(void)
...@@ -38,14 +32,5 @@ static inline void isync(void) ...@@ -38,14 +32,5 @@ static inline void isync(void)
__asm__ __volatile__ ("isync" : : : "memory"); __asm__ __volatile__ ("isync" : : : "memory");
} }
#ifdef CONFIG_SMP
#define eieio_on_smp() eieio()
#define isync_on_smp() isync()
#else
#define eieio_on_smp() __asm__ __volatile__("": : :"memory")
#define isync_on_smp() __asm__ __volatile__("": : :"memory")
#endif
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_SYNCH_H */ #endif /* _ASM_POWERPC_SYNCH_H */
...@@ -212,7 +212,7 @@ __xchg_u32(volatile void *p, unsigned long val) ...@@ -212,7 +212,7 @@ __xchg_u32(volatile void *p, unsigned long val)
unsigned long prev; unsigned long prev;
__asm__ __volatile__( __asm__ __volatile__(
EIEIO_ON_SMP LWSYNC_ON_SMP
"1: lwarx %0,0,%2 \n" "1: lwarx %0,0,%2 \n"
PPC405_ERR77(0,%2) PPC405_ERR77(0,%2)
" stwcx. %3,0,%2 \n\ " stwcx. %3,0,%2 \n\
...@@ -232,7 +232,7 @@ __xchg_u64(volatile void *p, unsigned long val) ...@@ -232,7 +232,7 @@ __xchg_u64(volatile void *p, unsigned long val)
unsigned long prev; unsigned long prev;
__asm__ __volatile__( __asm__ __volatile__(
EIEIO_ON_SMP LWSYNC_ON_SMP
"1: ldarx %0,0,%2 \n" "1: ldarx %0,0,%2 \n"
PPC405_ERR77(0,%2) PPC405_ERR77(0,%2)
" stdcx. %3,0,%2 \n\ " stdcx. %3,0,%2 \n\
...@@ -287,7 +287,7 @@ __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new) ...@@ -287,7 +287,7 @@ __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new)
unsigned int prev; unsigned int prev;
__asm__ __volatile__ ( __asm__ __volatile__ (
EIEIO_ON_SMP LWSYNC_ON_SMP
"1: lwarx %0,0,%2 # __cmpxchg_u32\n\ "1: lwarx %0,0,%2 # __cmpxchg_u32\n\
cmpw 0,%0,%3\n\ cmpw 0,%0,%3\n\
bne- 2f\n" bne- 2f\n"
...@@ -311,7 +311,7 @@ __cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new) ...@@ -311,7 +311,7 @@ __cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new)
unsigned long prev; unsigned long prev;
__asm__ __volatile__ ( __asm__ __volatile__ (
EIEIO_ON_SMP LWSYNC_ON_SMP
"1: ldarx %0,0,%2 # __cmpxchg_u64\n\ "1: ldarx %0,0,%2 # __cmpxchg_u64\n\
cmpd 0,%0,%3\n\ cmpd 0,%0,%3\n\
bne- 2f\n\ bne- 2f\n\
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册