提交 560cb12a 编写于 作者: P Peter Zijlstra 提交者: Ingo Molnar

locking,arch: Rewrite generic atomic support

Rewrite generic atomic support to only require cmpxchg(), generate all
other primitives from that.

Furthermore reduce the endless repetition for all these primitives to
a few CPP macros. This way we get more for less lines.
Signed-off-by: NPeter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/20140508135852.940119622@infradead.org
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: David Howells <dhowells@redhat.com>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: linux-arch@vger.kernel.org
Signed-off-by: NIngo Molnar <mingo@kernel.org>
上级 d4608dd5
...@@ -18,14 +18,100 @@ ...@@ -18,14 +18,100 @@
#include <asm/cmpxchg.h> #include <asm/cmpxchg.h>
#include <asm/barrier.h> #include <asm/barrier.h>
/*
* atomic_$op() - $op integer to atomic variable
* @i: integer value to $op
* @v: pointer to the atomic variable
*
* Atomically $ops @i to @v. Does not strictly guarantee a memory-barrier, use
* smp_mb__{before,after}_atomic().
*/
/*
* atomic_$op_return() - $op interer to atomic variable and returns the result
* @i: integer value to $op
* @v: pointer to the atomic variable
*
* Atomically $ops @i to @v. Does imply a full memory barrier.
*/
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/* Force people to define core atomics */
# if !defined(atomic_add_return) || !defined(atomic_sub_return) || \ /* we can build all atomic primitives from cmpxchg */
!defined(atomic_clear_mask) || !defined(atomic_set_mask)
# error "SMP requires a little arch-specific magic" #define ATOMIC_OP(op, c_op) \
# endif static inline void atomic_##op(int i, atomic_t *v) \
{ \
int c, old; \
\
c = v->counter; \
while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \
c = old; \
}
#define ATOMIC_OP_RETURN(op, c_op) \
static inline int atomic_##op##_return(int i, atomic_t *v) \
{ \
int c, old; \
\
c = v->counter; \
while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \
c = old; \
\
return c c_op i; \
}
#else
#include <linux/irqflags.h>
#define ATOMIC_OP(op, c_op) \
static inline void atomic_##op(int i, atomic_t *v) \
{ \
unsigned long flags; \
\
raw_local_irq_save(flags); \
v->counter = v->counter c_op i; \
raw_local_irq_restore(flags); \
}
#define ATOMIC_OP_RETURN(op, c_op) \
static inline int atomic_##op##_return(int i, atomic_t *v) \
{ \
unsigned long flags; \
int ret; \
\
raw_local_irq_save(flags); \
ret = (v->counter = v->counter c_op i); \
raw_local_irq_restore(flags); \
\
return ret; \
}
#endif /* CONFIG_SMP */
#ifndef atomic_add_return
ATOMIC_OP_RETURN(add, +)
#endif
#ifndef atomic_sub_return
ATOMIC_OP_RETURN(sub, -)
#endif
#ifndef atomic_clear_mask
ATOMIC_OP(and, &)
#define atomic_clear_mask(i, v) atomic_and(~(i), (v))
#endif #endif
#ifndef atomic_set_mask
#define CONFIG_ARCH_HAS_ATOMIC_OR
ATOMIC_OP(or, |)
#define atomic_set_mask(i, v) atomic_or((i), (v))
#endif
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
/* /*
* Atomic operations that C can't guarantee us. Useful for * Atomic operations that C can't guarantee us. Useful for
* resource counting etc.. * resource counting etc..
...@@ -33,8 +119,6 @@ ...@@ -33,8 +119,6 @@
#define ATOMIC_INIT(i) { (i) } #define ATOMIC_INIT(i) { (i) }
#ifdef __KERNEL__
/** /**
* atomic_read - read atomic variable * atomic_read - read atomic variable
* @v: pointer of type atomic_t * @v: pointer of type atomic_t
...@@ -56,52 +140,6 @@ ...@@ -56,52 +140,6 @@
#include <linux/irqflags.h> #include <linux/irqflags.h>
/**
* atomic_add_return - add integer to atomic variable
* @i: integer value to add
* @v: pointer of type atomic_t
*
* Atomically adds @i to @v and returns the result
*/
#ifndef atomic_add_return
static inline int atomic_add_return(int i, atomic_t *v)
{
unsigned long flags;
int temp;
raw_local_irq_save(flags); /* Don't trace it in an irqsoff handler */
temp = v->counter;
temp += i;
v->counter = temp;
raw_local_irq_restore(flags);
return temp;
}
#endif
/**
* atomic_sub_return - subtract integer from atomic variable
* @i: integer value to subtract
* @v: pointer of type atomic_t
*
* Atomically subtracts @i from @v and returns the result
*/
#ifndef atomic_sub_return
static inline int atomic_sub_return(int i, atomic_t *v)
{
unsigned long flags;
int temp;
raw_local_irq_save(flags); /* Don't trace it in an irqsoff handler */
temp = v->counter;
temp -= i;
v->counter = temp;
raw_local_irq_restore(flags);
return temp;
}
#endif
static inline int atomic_add_negative(int i, atomic_t *v) static inline int atomic_add_negative(int i, atomic_t *v)
{ {
return atomic_add_return(i, v) < 0; return atomic_add_return(i, v) < 0;
...@@ -146,42 +184,4 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) ...@@ -146,42 +184,4 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
return c; return c;
} }
/**
* atomic_clear_mask - Atomically clear bits in atomic variable
* @mask: Mask of the bits to be cleared
* @v: pointer of type atomic_t
*
* Atomically clears the bits set in @mask from @v
*/
#ifndef atomic_clear_mask
static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
{
unsigned long flags;
mask = ~mask;
raw_local_irq_save(flags); /* Don't trace it in a irqsoff handler */
v->counter &= mask;
raw_local_irq_restore(flags);
}
#endif
/**
* atomic_set_mask - Atomically set bits in atomic variable
* @mask: Mask of the bits to be set
* @v: pointer of type atomic_t
*
* Atomically sets the bits set in @mask in @v
*/
#ifndef atomic_set_mask
static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
{
unsigned long flags;
raw_local_irq_save(flags); /* Don't trace it in a irqsoff handler */
v->counter |= mask;
raw_local_irq_restore(flags);
}
#endif
#endif /* __KERNEL__ */
#endif /* __ASM_GENERIC_ATOMIC_H */ #endif /* __ASM_GENERIC_ATOMIC_H */
...@@ -20,10 +20,22 @@ typedef struct { ...@@ -20,10 +20,22 @@ typedef struct {
extern long long atomic64_read(const atomic64_t *v); extern long long atomic64_read(const atomic64_t *v);
extern void atomic64_set(atomic64_t *v, long long i); extern void atomic64_set(atomic64_t *v, long long i);
extern void atomic64_add(long long a, atomic64_t *v);
extern long long atomic64_add_return(long long a, atomic64_t *v); #define ATOMIC64_OP(op) \
extern void atomic64_sub(long long a, atomic64_t *v); extern void atomic64_##op(long long a, atomic64_t *v);
extern long long atomic64_sub_return(long long a, atomic64_t *v);
#define ATOMIC64_OP_RETURN(op) \
extern long long atomic64_##op##_return(long long a, atomic64_t *v);
#define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op)
ATOMIC64_OPS(add)
ATOMIC64_OPS(sub)
#undef ATOMIC64_OPS
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
extern long long atomic64_dec_if_positive(atomic64_t *v); extern long long atomic64_dec_if_positive(atomic64_t *v);
extern long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n); extern long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n);
extern long long atomic64_xchg(atomic64_t *v, long long new); extern long long atomic64_xchg(atomic64_t *v, long long new);
......
...@@ -70,53 +70,42 @@ void atomic64_set(atomic64_t *v, long long i) ...@@ -70,53 +70,42 @@ void atomic64_set(atomic64_t *v, long long i)
} }
EXPORT_SYMBOL(atomic64_set); EXPORT_SYMBOL(atomic64_set);
void atomic64_add(long long a, atomic64_t *v) #define ATOMIC64_OP(op, c_op) \
{ void atomic64_##op(long long a, atomic64_t *v) \
unsigned long flags; { \
raw_spinlock_t *lock = lock_addr(v); unsigned long flags; \
raw_spinlock_t *lock = lock_addr(v); \
raw_spin_lock_irqsave(lock, flags); \
v->counter += a; raw_spin_lock_irqsave(lock, flags); \
raw_spin_unlock_irqrestore(lock, flags); v->counter c_op a; \
} raw_spin_unlock_irqrestore(lock, flags); \
EXPORT_SYMBOL(atomic64_add); } \
EXPORT_SYMBOL(atomic64_##op);
long long atomic64_add_return(long long a, atomic64_t *v)
{ #define ATOMIC64_OP_RETURN(op, c_op) \
unsigned long flags; long long atomic64_##op##_return(long long a, atomic64_t *v) \
raw_spinlock_t *lock = lock_addr(v); { \
long long val; unsigned long flags; \
raw_spinlock_t *lock = lock_addr(v); \
raw_spin_lock_irqsave(lock, flags); long long val; \
val = v->counter += a; \
raw_spin_unlock_irqrestore(lock, flags); raw_spin_lock_irqsave(lock, flags); \
return val; val = (v->counter c_op a); \
} raw_spin_unlock_irqrestore(lock, flags); \
EXPORT_SYMBOL(atomic64_add_return); return val; \
} \
void atomic64_sub(long long a, atomic64_t *v) EXPORT_SYMBOL(atomic64_##op##_return);
{
unsigned long flags; #define ATOMIC64_OPS(op, c_op) \
raw_spinlock_t *lock = lock_addr(v); ATOMIC64_OP(op, c_op) \
ATOMIC64_OP_RETURN(op, c_op)
raw_spin_lock_irqsave(lock, flags);
v->counter -= a; ATOMIC64_OPS(add, +=)
raw_spin_unlock_irqrestore(lock, flags); ATOMIC64_OPS(sub, -=)
}
EXPORT_SYMBOL(atomic64_sub); #undef ATOMIC64_OPS
#undef ATOMIC64_OP_RETURN
long long atomic64_sub_return(long long a, atomic64_t *v) #undef ATOMIC64_OP
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
long long val;
raw_spin_lock_irqsave(lock, flags);
val = v->counter -= a;
raw_spin_unlock_irqrestore(lock, flags);
return val;
}
EXPORT_SYMBOL(atomic64_sub_return);
long long atomic64_dec_if_positive(atomic64_t *v) long long atomic64_dec_if_positive(atomic64_t *v)
{ {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册