提交 52393ccc 编写于 作者: S Steven Rostedt 提交者: Linus Torvalds

[PATCH] remove set_wmb - arch removal

set_wmb should not be used in the kernel because it just confuses the
code more and has no benefit.  Since it is not currently used in the
kernel this patch removes it so that new code does not include it.

All archs define set_wmb(var, value) to do { var = value; wmb(); }
while(0) except ia64 and sparc which use a mb() instead.  But this is
still moot since it is not used anyway.

Hasn't been tested on any archs but x86 and x86_64 (and only compiled
tested)
Signed-off-by: NSteven Rostedt <rostedt@goodmis.org>
Signed-off-by: NLinus Torvalds <torvalds@osdl.org>
上级 f92213ba
......@@ -30,7 +30,4 @@ __asm__ __volatile__("mb": : :"memory")
#define set_mb(var, value) \
do { var = value; mb(); } while (0)
#define set_wmb(var, value) \
do { var = value; wmb(); } while (0)
#endif /* __BARRIER_H */
......@@ -176,7 +176,6 @@ extern unsigned int user_debug;
#define wmb() mb()
#define read_barrier_depends() do { } while(0)
#define set_mb(var, value) do { var = value; mb(); } while (0)
#define set_wmb(var, value) do { var = value; wmb(); } while (0)
#define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t");
/*
......
......@@ -90,7 +90,6 @@ extern unsigned int user_debug;
#define read_barrier_depends() do { } while(0)
#define set_mb(var, value) do { var = value; mb(); } while (0)
#define set_wmb(var, value) do { var = value; wmb(); } while (0)
/*
* We assume knowledge of how
......
......@@ -17,7 +17,6 @@ extern struct task_struct *resume(struct task_struct *prev, struct task_struct *
#define wmb() mb()
#define read_barrier_depends() do { } while(0)
#define set_mb(var, value) do { var = value; mb(); } while (0)
#define set_wmb(var, value) do { var = value; wmb(); } while (0)
#ifdef CONFIG_SMP
#define smp_mb() mb()
......
......@@ -179,7 +179,6 @@ do { \
#define rmb() asm volatile ("membar" : : :"memory")
#define wmb() asm volatile ("membar" : : :"memory")
#define set_mb(var, value) do { var = value; mb(); } while (0)
#define set_wmb(var, value) do { var = value; wmb(); } while (0)
#define smp_mb() mb()
#define smp_rmb() rmb()
......
......@@ -84,7 +84,6 @@ asmlinkage void resume(void);
#define wmb() asm volatile ("" : : :"memory")
#define set_rmb(var, value) do { xchg(&var, value); } while (0)
#define set_mb(var, value) set_rmb(var, value)
#define set_wmb(var, value) do { var = value; wmb(); } while (0)
#ifdef CONFIG_SMP
#define smp_mb() mb()
......
......@@ -453,8 +453,6 @@ static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long l
#define set_mb(var, value) do { var = value; barrier(); } while (0)
#endif
#define set_wmb(var, value) do { var = value; wmb(); } while (0)
#include <linux/irqflags.h>
/*
......
......@@ -98,12 +98,11 @@ extern struct ia64_boot_param {
#endif
/*
* XXX check on these---I suspect what Linus really wants here is
* XXX check on this ---I suspect what Linus really wants here is
* acquire vs release semantics but we can't discuss this stuff with
* Linus just yet. Grrr...
*/
#define set_mb(var, value) do { (var) = (value); mb(); } while (0)
#define set_wmb(var, value) do { (var) = (value); mb(); } while (0)
#define safe_halt() ia64_pal_halt_light() /* PAL_HALT_LIGHT */
......
......@@ -336,7 +336,6 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
#endif
#define set_mb(var, value) do { xchg(&var, value); } while (0)
#define set_wmb(var, value) do { var = value; wmb(); } while (0)
#define arch_align_stack(x) (x)
......
......@@ -80,7 +80,6 @@ static inline int irqs_disabled(void)
#define wmb() barrier()
#define read_barrier_depends() do { } while(0)
#define set_mb(var, value) do { xchg(&var, value); } while (0)
#define set_wmb(var, value) do { var = value; wmb(); } while (0)
#define smp_mb() barrier()
#define smp_rmb() barrier()
......
......@@ -106,7 +106,6 @@ asmlinkage void resume(void);
#define wmb() asm volatile ("" : : :"memory")
#define set_rmb(var, value) do { xchg(&var, value); } while (0)
#define set_mb(var, value) set_rmb(var, value)
#define set_wmb(var, value) do { var = value; wmb(); } while (0)
#ifdef CONFIG_SMP
#define smp_mb() mb()
......
......@@ -143,9 +143,6 @@
#define set_mb(var, value) \
do { var = value; mb(); } while (0)
#define set_wmb(var, value) \
do { var = value; wmb(); } while (0)
/*
* switch_to(n) should switch tasks to task nr n, first
* checking that n isn't the current task, in which case it does nothing.
......
......@@ -143,8 +143,6 @@ static inline void set_eiem(unsigned long val)
#define read_barrier_depends() do { } while(0)
#define set_mb(var, value) do { var = value; mb(); } while (0)
#define set_wmb(var, value) do { var = value; wmb(); } while (0)
#ifndef CONFIG_PA20
/* Because kmalloc only guarantees 8-byte alignment for kmalloc'd data,
......
......@@ -39,7 +39,6 @@
#define read_barrier_depends() do { } while(0)
#define set_mb(var, value) do { var = value; mb(); } while (0)
#define set_wmb(var, value) do { var = value; wmb(); } while (0)
#ifdef __KERNEL__
#ifdef CONFIG_SMP
......
......@@ -33,7 +33,6 @@
#define read_barrier_depends() do { } while(0)
#define set_mb(var, value) do { var = value; mb(); } while (0)
#define set_wmb(var, value) do { var = value; wmb(); } while (0)
#ifdef CONFIG_SMP
#define smp_mb() mb()
......
......@@ -299,7 +299,6 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
#define set_mb(var, value) do { var = value; mb(); } while (0)
#define set_wmb(var, value) do { var = value; wmb(); } while (0)
#ifdef __s390x__
......
......@@ -101,7 +101,6 @@ extern void __xchg_called_with_bad_pointer(void);
#endif
#define set_mb(var, value) do { xchg(&var, value); } while (0)
#define set_wmb(var, value) do { var = value; wmb(); } while (0)
/* Interrupt Control */
static __inline__ void local_irq_enable(void)
......
......@@ -66,7 +66,6 @@ extern void __xchg_called_with_bad_pointer(void);
#define set_rmb(var, value) do { xchg(&var, value); } while (0)
#define set_mb(var, value) set_rmb(var, value)
#define set_wmb(var, value) do { var = value; wmb(); } while (0)
/* Interrupt Control */
#ifndef HARD_CLI
......
......@@ -199,7 +199,6 @@ static inline unsigned long getipl(void)
#define wmb() mb()
#define read_barrier_depends() do { } while(0)
#define set_mb(__var, __value) do { __var = __value; mb(); } while(0)
#define set_wmb(__var, __value) set_mb(__var, __value)
#define smp_mb() __asm__ __volatile__("":::"memory")
#define smp_rmb() __asm__ __volatile__("":::"memory")
#define smp_wmb() __asm__ __volatile__("":::"memory")
......
......@@ -123,8 +123,6 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
#define read_barrier_depends() do { } while(0)
#define set_mb(__var, __value) \
do { __var = __value; membar_storeload_storestore(); } while(0)
#define set_wmb(__var, __value) \
do { __var = __value; wmb(); } while(0)
#ifdef CONFIG_SMP
#define smp_mb() mb()
......
......@@ -68,7 +68,6 @@ static inline int irqs_disabled (void)
#define read_barrier_depends() ((void)0)
#define set_rmb(var, value) do { xchg (&var, value); } while (0)
#define set_mb(var, value) set_rmb (var, value)
#define set_wmb(var, value) do { var = value; wmb (); } while (0)
#define smp_mb() mb ()
#define smp_rmb() rmb ()
......
......@@ -240,7 +240,6 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
#endif
#define read_barrier_depends() do {} while(0)
#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
#define set_wmb(var, value) do { var = value; wmb(); } while (0)
#define warn_if_not_ulong(x) do { unsigned long foo; (void) (&(x) == &foo); } while (0)
......
......@@ -99,7 +99,6 @@ static inline void disable_coprocessor(int i)
#endif
#define set_mb(var, value) do { var = value; mb(); } while (0)
#define set_wmb(var, value) do { var = value; wmb(); } while (0)
#if !defined (__ASSEMBLY__)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册