提交 febdbfe8 编写于 作者: P Peter Zijlstra 提交者: Ingo Molnar

arch: Prepare for smp_mb__{before,after}_atomic()

Since the smp_mb__{before,after}*() ops are fundamentally dependent on
how an arch can implement atomics it doesn't make sense to have 3
variants of them. They must all be the same.

Furthermore, the 3 variants suggest they're only valid for those 3
atomic ops, while we have many more where they could be applied.

So move away from
smp_mb__{before,after}_{atomic,clear}_{dec,inc,bit}() and reduce the
interface to just the two: smp_mb__{before,after}_atomic().

This patch prepares the way by introducing default implementations in
asm-generic/barrier.h that default to a full barrier and providing
__deprecated inlines for the previous 6 barriers if they're not
provided by the arch.

This should allow for a mostly painless transition (lots of deprecated
warns in the interim).
Signed-off-by: NPeter Zijlstra <peterz@infradead.org>
Acked-by: NPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Link: http://lkml.kernel.org/n/tip-wr59327qdyi9mbzn6x937s4e@git.kernel.org
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: "Chen, Gong" <gong.chen@linux.intel.com>
Cc: John Sullivan <jsrhbz@kanargh.force9.co.uk>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mauro Carvalho Chehab <m.chehab@samsung.com>
Cc: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
Cc: "Theodore Ts'o" <tytso@mit.edu>
Cc: linux-arch@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: NIngo Molnar <mingo@kernel.org>
上级 2ab08ee9
......@@ -16,6 +16,7 @@
#define __ASM_GENERIC_ATOMIC_H
#include <asm/cmpxchg.h>
#include <asm/barrier.h>
#ifdef CONFIG_SMP
/* Force people to define core atomics */
......@@ -182,11 +183,5 @@ static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
}
#endif
/* Assume that atomic operations are already serializing */
#define smp_mb__before_atomic_dec() barrier()
#define smp_mb__after_atomic_dec() barrier()
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
#endif /* __KERNEL__ */
#endif /* __ASM_GENERIC_ATOMIC_H */
......@@ -62,6 +62,14 @@
#define set_mb(var, value) do { (var) = (value); mb(); } while (0)
#endif
#ifndef smp_mb__before_atomic
#define smp_mb__before_atomic() smp_mb()
#endif
#ifndef smp_mb__after_atomic
#define smp_mb__after_atomic() smp_mb()
#endif
#define smp_store_release(p, v) \
do { \
compiletime_assert_atomic_type(*p); \
......
......@@ -11,14 +11,7 @@
#include <linux/irqflags.h>
#include <linux/compiler.h>
/*
* clear_bit may not imply a memory barrier
*/
#ifndef smp_mb__before_clear_bit
#define smp_mb__before_clear_bit() smp_mb()
#define smp_mb__after_clear_bit() smp_mb()
#endif
#include <asm/barrier.h>
#include <asm-generic/bitops/__ffs.h>
#include <asm-generic/bitops/ffz.h>
......
......@@ -3,6 +3,42 @@
#define _LINUX_ATOMIC_H
#include <asm/atomic.h>
/*
* Provide __deprecated wrappers for the new interface, avoid flag day changes.
* We need the ugly external functions to break header recursion hell.
*/
#ifndef smp_mb__before_atomic_inc
static inline void __deprecated smp_mb__before_atomic_inc(void)
{
extern void __smp_mb__before_atomic(void);
__smp_mb__before_atomic();
}
#endif
#ifndef smp_mb__after_atomic_inc
static inline void __deprecated smp_mb__after_atomic_inc(void)
{
extern void __smp_mb__after_atomic(void);
__smp_mb__after_atomic();
}
#endif
#ifndef smp_mb__before_atomic_dec
static inline void __deprecated smp_mb__before_atomic_dec(void)
{
extern void __smp_mb__before_atomic(void);
__smp_mb__before_atomic();
}
#endif
#ifndef smp_mb__after_atomic_dec
static inline void __deprecated smp_mb__after_atomic_dec(void)
{
extern void __smp_mb__after_atomic(void);
__smp_mb__after_atomic();
}
#endif
/**
* atomic_add_unless - add unless the number is already a given value
* @v: pointer of type atomic_t
......
......@@ -32,6 +32,26 @@ extern unsigned long __sw_hweight64(__u64 w);
*/
#include <asm/bitops.h>
/*
* Provide __deprecated wrappers for the new interface, avoid flag day changes.
* We need the ugly external functions to break header recursion hell.
*/
#ifndef smp_mb__before_clear_bit
static inline void __deprecated smp_mb__before_clear_bit(void)
{
extern void __smp_mb__before_atomic(void);
__smp_mb__before_atomic();
}
#endif
#ifndef smp_mb__after_clear_bit
static inline void __deprecated smp_mb__after_clear_bit(void)
{
extern void __smp_mb__after_atomic(void);
__smp_mb__after_atomic();
}
#endif
#define for_each_set_bit(bit, addr, size) \
for ((bit) = find_first_bit((addr), (size)); \
(bit) < (size); \
......
......@@ -90,6 +90,22 @@
#define CREATE_TRACE_POINTS
#include <trace/events/sched.h>
#ifdef smp_mb__before_atomic
void __smp_mb__before_atomic(void)
{
smp_mb__before_atomic();
}
EXPORT_SYMBOL(__smp_mb__before_atomic);
#endif
#ifdef smp_mb__after_atomic
void __smp_mb__after_atomic(void)
{
smp_mb__after_atomic();
}
EXPORT_SYMBOL(__smp_mb__after_atomic);
#endif
void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period)
{
unsigned long delta;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册