提交 0cd64efb 编写于 作者: P Peter Zijlstra 提交者: Ingo Molnar

arch,ia64: Convert smp_mb__*()

ia64 atomic ops are full barriers; implement the new
smp_mb__{before,after}_atomic().
Signed-off-by: NPeter Zijlstra <peterz@infradead.org>
Acked-by: NPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Link: http://lkml.kernel.org/n/tip-hyp7yj68cmqz1nqbfpr541ca@git.kernel.org
Cc: Akinobu Mita <akinobu.mita@gmail.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: linux-ia64@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: NIngo Molnar <mingo@kernel.org>
上级 94cf42f8
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <asm/intrinsics.h> #include <asm/intrinsics.h>
#include <asm/barrier.h>
#define ATOMIC_INIT(i) { (i) } #define ATOMIC_INIT(i) { (i) }
...@@ -208,10 +209,4 @@ atomic64_add_negative (__s64 i, atomic64_t *v) ...@@ -208,10 +209,4 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
#define atomic64_inc(v) atomic64_add(1, (v)) #define atomic64_inc(v) atomic64_add(1, (v))
#define atomic64_dec(v) atomic64_sub(1, (v)) #define atomic64_dec(v) atomic64_sub(1, (v))
/* Atomic operations are already serializing */
#define smp_mb__before_atomic_dec() barrier()
#define smp_mb__after_atomic_dec() barrier()
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
#endif /* _ASM_IA64_ATOMIC_H */ #endif /* _ASM_IA64_ATOMIC_H */
...@@ -55,6 +55,9 @@ ...@@ -55,6 +55,9 @@
#endif #endif
#define smp_mb__before_atomic() barrier()
#define smp_mb__after_atomic() barrier()
/* /*
* IA64 GCC turns volatile stores into st.rel and volatile loads into ld.acq no * IA64 GCC turns volatile stores into st.rel and volatile loads into ld.acq no
* need for asm trickery! * need for asm trickery!
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include <linux/compiler.h> #include <linux/compiler.h>
#include <linux/types.h> #include <linux/types.h>
#include <asm/intrinsics.h> #include <asm/intrinsics.h>
#include <asm/barrier.h>
/** /**
* set_bit - Atomically set a bit in memory * set_bit - Atomically set a bit in memory
...@@ -65,9 +66,6 @@ __set_bit (int nr, volatile void *addr) ...@@ -65,9 +66,6 @@ __set_bit (int nr, volatile void *addr)
*((__u32 *) addr + (nr >> 5)) |= (1 << (nr & 31)); *((__u32 *) addr + (nr >> 5)) |= (1 << (nr & 31));
} }
#define smp_mb__before_clear_bit() barrier();
#define smp_mb__after_clear_bit() barrier();
/** /**
* clear_bit - Clears a bit in memory * clear_bit - Clears a bit in memory
* @nr: Bit to clear * @nr: Bit to clear
...@@ -75,7 +73,7 @@ __set_bit (int nr, volatile void *addr) ...@@ -75,7 +73,7 @@ __set_bit (int nr, volatile void *addr)
* *
* clear_bit() is atomic and may not be reordered. However, it does * clear_bit() is atomic and may not be reordered. However, it does
* not contain a memory barrier, so if it is used for locking purposes, * not contain a memory barrier, so if it is used for locking purposes,
* you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
* in order to ensure changes are visible on other processors. * in order to ensure changes are visible on other processors.
*/ */
static __inline__ void static __inline__ void
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册