提交 7c72aaf2 编写于 作者: H Hugh Dickins 提交者: Linus Torvalds

[PATCH] mm: fill arch atomic64 gaps

alpha, sparc64, x86_64 are each missing some primitives from their atomic64
support: fill in the gaps I've noticed by extrapolating asm, follow the
groupings in each file.  But powerpc and parisc still lack atomic64.
Signed-off-by: NHugh Dickins <hugh@veritas.com>
Cc: Richard Henderson <rth@twiddle.net>
Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Andi Kleen <ak@muc.de>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Signed-off-by: NAndrew Morton <akpm@osdl.org>
Signed-off-by: NLinus Torvalds <torvalds@osdl.org>
上级 7ce774b4
......@@ -118,8 +118,6 @@ static __inline__ long atomic_add_return(int i, atomic_t * v)
return result;
}
#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
static __inline__ long atomic64_add_return(long i, atomic64_t * v)
{
long temp, result;
......@@ -189,6 +187,9 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
})
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
#define atomic_dec_return(v) atomic_sub_return(1,(v))
#define atomic64_dec_return(v) atomic64_sub_return(1,(v))
......@@ -199,6 +200,8 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
#define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0)
#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
#define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
......
......@@ -54,6 +54,7 @@ extern int atomic64_sub_ret(int, atomic64_t *);
* other cases.
*/
#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
#define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
#define atomic64_sub_and_test(i, v) (atomic64_sub_ret(i, v) == 0)
......
......@@ -160,8 +160,8 @@ static __inline__ int atomic_inc_and_test(atomic_t *v)
/**
* atomic_add_negative - add and test if negative
* @v: pointer of type atomic_t
* @i: integer value to add
* @v: pointer of type atomic_t
*
* Atomically adds @i to @v and returns true
* if the result is negative, or false when
......@@ -178,6 +178,31 @@ static __inline__ int atomic_add_negative(int i, atomic_t *v)
return c;
}
/**
* atomic_add_return - add and return
* @i: integer value to add
* @v: pointer of type atomic_t
*
* Atomically adds @i to @v and returns @i + @v
*/
static __inline__ int atomic_add_return(int i, atomic_t *v)
{
int __i = i;
__asm__ __volatile__(
LOCK "xaddl %0, %1;"
:"=r"(i)
:"m"(v->counter), "0"(i));
return i + __i;
}
static __inline__ int atomic_sub_return(int i, atomic_t *v)
{
return atomic_add_return(-i,v);
}
#define atomic_inc_return(v) (atomic_add_return(1,v))
#define atomic_dec_return(v) (atomic_sub_return(1,v))
/* An 64bit atomic type */
typedef struct { volatile long counter; } atomic64_t;
......@@ -320,14 +345,14 @@ static __inline__ int atomic64_inc_and_test(atomic64_t *v)
/**
* atomic64_add_negative - add and test if negative
* @v: pointer to atomic64_t
* @i: integer value to add
* @v: pointer to type atomic64_t
*
* Atomically adds @i to @v and returns true
* if the result is negative, or false when
* result is greater than or equal to zero.
*/
static __inline__ long atomic64_add_negative(long i, atomic64_t *v)
static __inline__ int atomic64_add_negative(long i, atomic64_t *v)
{
unsigned char c;
......@@ -339,27 +364,30 @@ static __inline__ long atomic64_add_negative(long i, atomic64_t *v)
}
/**
* atomic_add_return - add and return
* @v: pointer of type atomic_t
* atomic64_add_return - add and return
* @i: integer value to add
* @v: pointer to type atomic64_t
*
* Atomically adds @i to @v and returns @i + @v
*/
static __inline__ int atomic_add_return(int i, atomic_t *v)
static __inline__ long atomic64_add_return(long i, atomic64_t *v)
{
int __i = i;
long __i = i;
__asm__ __volatile__(
LOCK "xaddl %0, %1;"
LOCK "xaddq %0, %1;"
:"=r"(i)
:"m"(v->counter), "0"(i));
return i + __i;
}
static __inline__ int atomic_sub_return(int i, atomic_t *v)
static __inline__ long atomic64_sub_return(long i, atomic64_t *v)
{
return atomic_add_return(-i,v);
return atomic64_add_return(-i,v);
}
#define atomic64_inc_return(v) (atomic64_add_return(1,v))
#define atomic64_dec_return(v) (atomic64_sub_return(1,v))
#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new))
/**
......@@ -381,9 +409,6 @@ static __inline__ int atomic_sub_return(int i, atomic_t *v)
})
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
#define atomic_inc_return(v) (atomic_add_return(1,v))
#define atomic_dec_return(v) (atomic_sub_return(1,v))
/* These are x86-specific, used by some header files */
#define atomic_clear_mask(mask, addr) \
__asm__ __volatile__(LOCK "andl %0,%1" \
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册