提交 02c503ff 编写于 作者: M Martin Schwidefsky

s390/spinlock: use atomic primitives for spinlocks

Add a couple more __atomic_xxx function to atomic_ops.h and use them
to replace the compare-and-swap inlines in the spinlock code. This
changes the type of the lock value from unsigned int to int.
Signed-off-by: NMartin Schwidefsky <schwidefsky@de.ibm.com>
上级 df26c2e8
...@@ -111,20 +111,22 @@ __ATOMIC64_OPS(__atomic64_xor, "xgr") ...@@ -111,20 +111,22 @@ __ATOMIC64_OPS(__atomic64_xor, "xgr")
static inline int __atomic_cmpxchg(int *ptr, int old, int new) static inline int __atomic_cmpxchg(int *ptr, int old, int new)
{ {
asm volatile( return __sync_val_compare_and_swap(ptr, old, new);
" cs %[old],%[new],%[ptr]" }
: [old] "+d" (old), [ptr] "+Q" (*ptr)
: [new] "d" (new) : "cc", "memory"); static inline int __atomic_cmpxchg_bool(int *ptr, int old, int new)
return old; {
return __sync_bool_compare_and_swap(ptr, old, new);
} }
static inline long __atomic64_cmpxchg(long *ptr, long old, long new) static inline long __atomic64_cmpxchg(long *ptr, long old, long new)
{ {
asm volatile( return __sync_val_compare_and_swap(ptr, old, new);
" csg %[old],%[new],%[ptr]" }
: [old] "+d" (old), [ptr] "+Q" (*ptr)
: [new] "d" (new) : "cc", "memory"); static inline long __atomic64_cmpxchg_bool(long *ptr, long old, long new)
return old; {
return __sync_bool_compare_and_swap(ptr, old, new);
} }
#endif /* __ARCH_S390_ATOMIC_OPS__ */ #endif /* __ARCH_S390_ATOMIC_OPS__ */
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#define __ASM_SPINLOCK_H #define __ASM_SPINLOCK_H
#include <linux/smp.h> #include <linux/smp.h>
#include <asm/atomic_ops.h>
#include <asm/barrier.h> #include <asm/barrier.h>
#include <asm/processor.h> #include <asm/processor.h>
...@@ -17,12 +18,6 @@ ...@@ -17,12 +18,6 @@
extern int spin_retry; extern int spin_retry;
static inline int
_raw_compare_and_swap(unsigned int *lock, unsigned int old, unsigned int new)
{
return __sync_bool_compare_and_swap(lock, old, new);
}
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
static inline bool arch_vcpu_is_preempted(int cpu) { return false; } static inline bool arch_vcpu_is_preempted(int cpu) { return false; }
#else #else
...@@ -40,7 +35,7 @@ bool arch_vcpu_is_preempted(int cpu); ...@@ -40,7 +35,7 @@ bool arch_vcpu_is_preempted(int cpu);
* (the type definitions are in asm/spinlock_types.h) * (the type definitions are in asm/spinlock_types.h)
*/ */
void arch_lock_relax(unsigned int cpu); void arch_lock_relax(int cpu);
void arch_spin_lock_wait(arch_spinlock_t *); void arch_spin_lock_wait(arch_spinlock_t *);
int arch_spin_trylock_retry(arch_spinlock_t *); int arch_spin_trylock_retry(arch_spinlock_t *);
...@@ -70,7 +65,7 @@ static inline int arch_spin_trylock_once(arch_spinlock_t *lp) ...@@ -70,7 +65,7 @@ static inline int arch_spin_trylock_once(arch_spinlock_t *lp)
{ {
barrier(); barrier();
return likely(arch_spin_value_unlocked(*lp) && return likely(arch_spin_value_unlocked(*lp) &&
_raw_compare_and_swap(&lp->lock, 0, SPINLOCK_LOCKVAL)); __atomic_cmpxchg_bool(&lp->lock, 0, SPINLOCK_LOCKVAL));
} }
static inline void arch_spin_lock(arch_spinlock_t *lp) static inline void arch_spin_lock(arch_spinlock_t *lp)
...@@ -95,7 +90,7 @@ static inline int arch_spin_trylock(arch_spinlock_t *lp) ...@@ -95,7 +90,7 @@ static inline int arch_spin_trylock(arch_spinlock_t *lp)
static inline void arch_spin_unlock(arch_spinlock_t *lp) static inline void arch_spin_unlock(arch_spinlock_t *lp)
{ {
typecheck(unsigned int, lp->lock); typecheck(int, lp->lock);
asm volatile( asm volatile(
"st %1,%0\n" "st %1,%0\n"
: "+Q" (lp->lock) : "+Q" (lp->lock)
...@@ -141,16 +136,16 @@ extern int _raw_write_trylock_retry(arch_rwlock_t *lp); ...@@ -141,16 +136,16 @@ extern int _raw_write_trylock_retry(arch_rwlock_t *lp);
static inline int arch_read_trylock_once(arch_rwlock_t *rw) static inline int arch_read_trylock_once(arch_rwlock_t *rw)
{ {
unsigned int old = ACCESS_ONCE(rw->lock); int old = ACCESS_ONCE(rw->lock);
return likely((int) old >= 0 && return likely(old >= 0 &&
_raw_compare_and_swap(&rw->lock, old, old + 1)); __atomic_cmpxchg_bool(&rw->lock, old, old + 1));
} }
static inline int arch_write_trylock_once(arch_rwlock_t *rw) static inline int arch_write_trylock_once(arch_rwlock_t *rw)
{ {
unsigned int old = ACCESS_ONCE(rw->lock); int old = ACCESS_ONCE(rw->lock);
return likely(old == 0 && return likely(old == 0 &&
_raw_compare_and_swap(&rw->lock, 0, 0x80000000)); __atomic_cmpxchg_bool(&rw->lock, 0, 0x80000000));
} }
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
...@@ -161,9 +156,9 @@ static inline int arch_write_trylock_once(arch_rwlock_t *rw) ...@@ -161,9 +156,9 @@ static inline int arch_write_trylock_once(arch_rwlock_t *rw)
#define __RAW_LOCK(ptr, op_val, op_string) \ #define __RAW_LOCK(ptr, op_val, op_string) \
({ \ ({ \
unsigned int old_val; \ int old_val; \
\ \
typecheck(unsigned int *, ptr); \ typecheck(int *, ptr); \
asm volatile( \ asm volatile( \
op_string " %0,%2,%1\n" \ op_string " %0,%2,%1\n" \
"bcr 14,0\n" \ "bcr 14,0\n" \
...@@ -175,9 +170,9 @@ static inline int arch_write_trylock_once(arch_rwlock_t *rw) ...@@ -175,9 +170,9 @@ static inline int arch_write_trylock_once(arch_rwlock_t *rw)
#define __RAW_UNLOCK(ptr, op_val, op_string) \ #define __RAW_UNLOCK(ptr, op_val, op_string) \
({ \ ({ \
unsigned int old_val; \ int old_val; \
\ \
typecheck(unsigned int *, ptr); \ typecheck(int *, ptr); \
asm volatile( \ asm volatile( \
op_string " %0,%2,%1\n" \ op_string " %0,%2,%1\n" \
: "=d" (old_val), "+Q" (*ptr) \ : "=d" (old_val), "+Q" (*ptr) \
...@@ -187,14 +182,14 @@ static inline int arch_write_trylock_once(arch_rwlock_t *rw) ...@@ -187,14 +182,14 @@ static inline int arch_write_trylock_once(arch_rwlock_t *rw)
}) })
extern void _raw_read_lock_wait(arch_rwlock_t *lp); extern void _raw_read_lock_wait(arch_rwlock_t *lp);
extern void _raw_write_lock_wait(arch_rwlock_t *lp, unsigned int prev); extern void _raw_write_lock_wait(arch_rwlock_t *lp, int prev);
static inline void arch_read_lock(arch_rwlock_t *rw) static inline void arch_read_lock(arch_rwlock_t *rw)
{ {
unsigned int old; int old;
old = __RAW_LOCK(&rw->lock, 1, __RAW_OP_ADD); old = __RAW_LOCK(&rw->lock, 1, __RAW_OP_ADD);
if ((int) old < 0) if (old < 0)
_raw_read_lock_wait(rw); _raw_read_lock_wait(rw);
} }
...@@ -205,7 +200,7 @@ static inline void arch_read_unlock(arch_rwlock_t *rw) ...@@ -205,7 +200,7 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
static inline void arch_write_lock(arch_rwlock_t *rw) static inline void arch_write_lock(arch_rwlock_t *rw)
{ {
unsigned int old; int old;
old = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR); old = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
if (old != 0) if (old != 0)
...@@ -232,11 +227,11 @@ static inline void arch_read_lock(arch_rwlock_t *rw) ...@@ -232,11 +227,11 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
static inline void arch_read_unlock(arch_rwlock_t *rw) static inline void arch_read_unlock(arch_rwlock_t *rw)
{ {
unsigned int old; int old;
do { do {
old = ACCESS_ONCE(rw->lock); old = ACCESS_ONCE(rw->lock);
} while (!_raw_compare_and_swap(&rw->lock, old, old - 1)); } while (!__atomic_cmpxchg_bool(&rw->lock, old, old - 1));
} }
static inline void arch_write_lock(arch_rwlock_t *rw) static inline void arch_write_lock(arch_rwlock_t *rw)
...@@ -248,7 +243,7 @@ static inline void arch_write_lock(arch_rwlock_t *rw) ...@@ -248,7 +243,7 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
static inline void arch_write_unlock(arch_rwlock_t *rw) static inline void arch_write_unlock(arch_rwlock_t *rw)
{ {
typecheck(unsigned int, rw->lock); typecheck(int, rw->lock);
rw->owner = 0; rw->owner = 0;
asm volatile( asm volatile(
......
...@@ -6,14 +6,14 @@ ...@@ -6,14 +6,14 @@
#endif #endif
typedef struct { typedef struct {
unsigned int lock; int lock;
} __attribute__ ((aligned (4))) arch_spinlock_t; } __attribute__ ((aligned (4))) arch_spinlock_t;
#define __ARCH_SPIN_LOCK_UNLOCKED { .lock = 0, } #define __ARCH_SPIN_LOCK_UNLOCKED { .lock = 0, }
typedef struct { typedef struct {
unsigned int lock; int lock;
unsigned int owner; int owner;
} arch_rwlock_t; } arch_rwlock_t;
#define __ARCH_RW_LOCK_UNLOCKED { 0 } #define __ARCH_RW_LOCK_UNLOCKED { 0 }
......
...@@ -32,23 +32,22 @@ static int __init spin_retry_setup(char *str) ...@@ -32,23 +32,22 @@ static int __init spin_retry_setup(char *str)
} }
__setup("spin_retry=", spin_retry_setup); __setup("spin_retry=", spin_retry_setup);
static inline void _raw_compare_and_delay(unsigned int *lock, unsigned int old) static inline void compare_and_delay(int *lock, int old)
{ {
asm(".insn rsy,0xeb0000000022,%0,0,%1" : : "d" (old), "Q" (*lock)); asm(".insn rsy,0xeb0000000022,%0,0,%1" : : "d" (old), "Q" (*lock));
} }
void arch_spin_lock_wait(arch_spinlock_t *lp) void arch_spin_lock_wait(arch_spinlock_t *lp)
{ {
unsigned int cpu = SPINLOCK_LOCKVAL; int cpu = SPINLOCK_LOCKVAL;
unsigned int owner; int owner, count, first_diag;
int count, first_diag;
first_diag = 1; first_diag = 1;
while (1) { while (1) {
owner = ACCESS_ONCE(lp->lock); owner = ACCESS_ONCE(lp->lock);
/* Try to get the lock if it is free. */ /* Try to get the lock if it is free. */
if (!owner) { if (!owner) {
if (_raw_compare_and_swap(&lp->lock, 0, cpu)) if (__atomic_cmpxchg_bool(&lp->lock, 0, cpu))
return; return;
continue; continue;
} }
...@@ -62,7 +61,7 @@ void arch_spin_lock_wait(arch_spinlock_t *lp) ...@@ -62,7 +61,7 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)
count = spin_retry; count = spin_retry;
do { do {
if (MACHINE_HAS_CAD) if (MACHINE_HAS_CAD)
_raw_compare_and_delay(&lp->lock, owner); compare_and_delay(&lp->lock, owner);
owner = ACCESS_ONCE(lp->lock); owner = ACCESS_ONCE(lp->lock);
} while (owner && count-- > 0); } while (owner && count-- > 0);
if (!owner) if (!owner)
...@@ -82,9 +81,8 @@ EXPORT_SYMBOL(arch_spin_lock_wait); ...@@ -82,9 +81,8 @@ EXPORT_SYMBOL(arch_spin_lock_wait);
void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags) void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
{ {
unsigned int cpu = SPINLOCK_LOCKVAL; int cpu = SPINLOCK_LOCKVAL;
unsigned int owner; int owner, count, first_diag;
int count, first_diag;
local_irq_restore(flags); local_irq_restore(flags);
first_diag = 1; first_diag = 1;
...@@ -93,7 +91,7 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags) ...@@ -93,7 +91,7 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
/* Try to get the lock if it is free. */ /* Try to get the lock if it is free. */
if (!owner) { if (!owner) {
local_irq_disable(); local_irq_disable();
if (_raw_compare_and_swap(&lp->lock, 0, cpu)) if (__atomic_cmpxchg_bool(&lp->lock, 0, cpu))
return; return;
local_irq_restore(flags); local_irq_restore(flags);
continue; continue;
...@@ -108,7 +106,7 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags) ...@@ -108,7 +106,7 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
count = spin_retry; count = spin_retry;
do { do {
if (MACHINE_HAS_CAD) if (MACHINE_HAS_CAD)
_raw_compare_and_delay(&lp->lock, owner); compare_and_delay(&lp->lock, owner);
owner = ACCESS_ONCE(lp->lock); owner = ACCESS_ONCE(lp->lock);
} while (owner && count-- > 0); } while (owner && count-- > 0);
if (!owner) if (!owner)
...@@ -128,18 +126,17 @@ EXPORT_SYMBOL(arch_spin_lock_wait_flags); ...@@ -128,18 +126,17 @@ EXPORT_SYMBOL(arch_spin_lock_wait_flags);
int arch_spin_trylock_retry(arch_spinlock_t *lp) int arch_spin_trylock_retry(arch_spinlock_t *lp)
{ {
unsigned int cpu = SPINLOCK_LOCKVAL; int cpu = SPINLOCK_LOCKVAL;
unsigned int owner; int owner, count;
int count;
for (count = spin_retry; count > 0; count--) { for (count = spin_retry; count > 0; count--) {
owner = READ_ONCE(lp->lock); owner = READ_ONCE(lp->lock);
/* Try to get the lock if it is free. */ /* Try to get the lock if it is free. */
if (!owner) { if (!owner) {
if (_raw_compare_and_swap(&lp->lock, 0, cpu)) if (__atomic_cmpxchg_bool(&lp->lock, 0, cpu))
return 1; return 1;
} else if (MACHINE_HAS_CAD) } else if (MACHINE_HAS_CAD)
_raw_compare_and_delay(&lp->lock, owner); compare_and_delay(&lp->lock, owner);
} }
return 0; return 0;
} }
...@@ -147,8 +144,8 @@ EXPORT_SYMBOL(arch_spin_trylock_retry); ...@@ -147,8 +144,8 @@ EXPORT_SYMBOL(arch_spin_trylock_retry);
void _raw_read_lock_wait(arch_rwlock_t *rw) void _raw_read_lock_wait(arch_rwlock_t *rw)
{ {
unsigned int owner, old;
int count = spin_retry; int count = spin_retry;
int owner, old;
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
__RAW_LOCK(&rw->lock, -1, __RAW_OP_ADD); __RAW_LOCK(&rw->lock, -1, __RAW_OP_ADD);
...@@ -162,12 +159,12 @@ void _raw_read_lock_wait(arch_rwlock_t *rw) ...@@ -162,12 +159,12 @@ void _raw_read_lock_wait(arch_rwlock_t *rw)
} }
old = ACCESS_ONCE(rw->lock); old = ACCESS_ONCE(rw->lock);
owner = ACCESS_ONCE(rw->owner); owner = ACCESS_ONCE(rw->owner);
if ((int) old < 0) { if (old < 0) {
if (MACHINE_HAS_CAD) if (MACHINE_HAS_CAD)
_raw_compare_and_delay(&rw->lock, old); compare_and_delay(&rw->lock, old);
continue; continue;
} }
if (_raw_compare_and_swap(&rw->lock, old, old + 1)) if (__atomic_cmpxchg_bool(&rw->lock, old, old + 1))
return; return;
} }
} }
...@@ -175,17 +172,17 @@ EXPORT_SYMBOL(_raw_read_lock_wait); ...@@ -175,17 +172,17 @@ EXPORT_SYMBOL(_raw_read_lock_wait);
int _raw_read_trylock_retry(arch_rwlock_t *rw) int _raw_read_trylock_retry(arch_rwlock_t *rw)
{ {
unsigned int old;
int count = spin_retry; int count = spin_retry;
int old;
while (count-- > 0) { while (count-- > 0) {
old = ACCESS_ONCE(rw->lock); old = ACCESS_ONCE(rw->lock);
if ((int) old < 0) { if (old < 0) {
if (MACHINE_HAS_CAD) if (MACHINE_HAS_CAD)
_raw_compare_and_delay(&rw->lock, old); compare_and_delay(&rw->lock, old);
continue; continue;
} }
if (_raw_compare_and_swap(&rw->lock, old, old + 1)) if (__atomic_cmpxchg_bool(&rw->lock, old, old + 1))
return 1; return 1;
} }
return 0; return 0;
...@@ -194,10 +191,10 @@ EXPORT_SYMBOL(_raw_read_trylock_retry); ...@@ -194,10 +191,10 @@ EXPORT_SYMBOL(_raw_read_trylock_retry);
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
void _raw_write_lock_wait(arch_rwlock_t *rw, unsigned int prev) void _raw_write_lock_wait(arch_rwlock_t *rw, int prev)
{ {
unsigned int owner, old;
int count = spin_retry; int count = spin_retry;
int owner, old;
owner = 0; owner = 0;
while (1) { while (1) {
...@@ -209,14 +206,14 @@ void _raw_write_lock_wait(arch_rwlock_t *rw, unsigned int prev) ...@@ -209,14 +206,14 @@ void _raw_write_lock_wait(arch_rwlock_t *rw, unsigned int prev)
old = ACCESS_ONCE(rw->lock); old = ACCESS_ONCE(rw->lock);
owner = ACCESS_ONCE(rw->owner); owner = ACCESS_ONCE(rw->owner);
smp_mb(); smp_mb();
if ((int) old >= 0) { if (old >= 0) {
prev = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR); prev = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
old = prev; old = prev;
} }
if ((old & 0x7fffffff) == 0 && (int) prev >= 0) if ((old & 0x7fffffff) == 0 && prev >= 0)
break; break;
if (MACHINE_HAS_CAD) if (MACHINE_HAS_CAD)
_raw_compare_and_delay(&rw->lock, old); compare_and_delay(&rw->lock, old);
} }
} }
EXPORT_SYMBOL(_raw_write_lock_wait); EXPORT_SYMBOL(_raw_write_lock_wait);
...@@ -225,8 +222,8 @@ EXPORT_SYMBOL(_raw_write_lock_wait); ...@@ -225,8 +222,8 @@ EXPORT_SYMBOL(_raw_write_lock_wait);
void _raw_write_lock_wait(arch_rwlock_t *rw) void _raw_write_lock_wait(arch_rwlock_t *rw)
{ {
unsigned int owner, old, prev;
int count = spin_retry; int count = spin_retry;
int owner, old, prev;
prev = 0x80000000; prev = 0x80000000;
owner = 0; owner = 0;
...@@ -238,15 +235,15 @@ void _raw_write_lock_wait(arch_rwlock_t *rw) ...@@ -238,15 +235,15 @@ void _raw_write_lock_wait(arch_rwlock_t *rw)
} }
old = ACCESS_ONCE(rw->lock); old = ACCESS_ONCE(rw->lock);
owner = ACCESS_ONCE(rw->owner); owner = ACCESS_ONCE(rw->owner);
if ((int) old >= 0 && if (old >= 0 &&
_raw_compare_and_swap(&rw->lock, old, old | 0x80000000)) __atomic_cmpxchg_bool(&rw->lock, old, old | 0x80000000))
prev = old; prev = old;
else else
smp_mb(); smp_mb();
if ((old & 0x7fffffff) == 0 && (int) prev >= 0) if ((old & 0x7fffffff) == 0 && prev >= 0)
break; break;
if (MACHINE_HAS_CAD) if (MACHINE_HAS_CAD)
_raw_compare_and_delay(&rw->lock, old); compare_and_delay(&rw->lock, old);
} }
} }
EXPORT_SYMBOL(_raw_write_lock_wait); EXPORT_SYMBOL(_raw_write_lock_wait);
...@@ -255,24 +252,24 @@ EXPORT_SYMBOL(_raw_write_lock_wait); ...@@ -255,24 +252,24 @@ EXPORT_SYMBOL(_raw_write_lock_wait);
int _raw_write_trylock_retry(arch_rwlock_t *rw) int _raw_write_trylock_retry(arch_rwlock_t *rw)
{ {
unsigned int old;
int count = spin_retry; int count = spin_retry;
int old;
while (count-- > 0) { while (count-- > 0) {
old = ACCESS_ONCE(rw->lock); old = ACCESS_ONCE(rw->lock);
if (old) { if (old) {
if (MACHINE_HAS_CAD) if (MACHINE_HAS_CAD)
_raw_compare_and_delay(&rw->lock, old); compare_and_delay(&rw->lock, old);
continue; continue;
} }
if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000)) if (__atomic_cmpxchg_bool(&rw->lock, 0, 0x80000000))
return 1; return 1;
} }
return 0; return 0;
} }
EXPORT_SYMBOL(_raw_write_trylock_retry); EXPORT_SYMBOL(_raw_write_trylock_retry);
void arch_lock_relax(unsigned int cpu) void arch_lock_relax(int cpu)
{ {
if (!cpu) if (!cpu)
return; return;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册