提交 25547b6b 编写于 作者: I Ingo Molnar

Merge branch 'WIP.locking/atomics' into locking/core

Merge two uncontroversial cleanups from this branch while the rest is being reworked.
Signed-off-by: NIngo Molnar <mingo@kernel.org>
......@@ -197,35 +197,56 @@ static inline int atomic_xchg(atomic_t *v, int new)
return xchg(&v->counter, new);
}
#define ATOMIC_OP(op) \
static inline void atomic_##op(int i, atomic_t *v) \
{ \
asm volatile(LOCK_PREFIX #op"l %1,%0" \
: "+m" (v->counter) \
: "ir" (i) \
: "memory"); \
static inline void atomic_and(int i, atomic_t *v)
{
asm volatile(LOCK_PREFIX "andl %1,%0"
: "+m" (v->counter)
: "ir" (i)
: "memory");
}
static inline int atomic_fetch_and(int i, atomic_t *v)
{
int val = atomic_read(v);
do { } while (!atomic_try_cmpxchg(v, &val, val & i));
return val;
}
#define ATOMIC_FETCH_OP(op, c_op) \
static inline int atomic_fetch_##op(int i, atomic_t *v) \
{ \
int val = atomic_read(v); \
do { \
} while (!atomic_try_cmpxchg(v, &val, val c_op i)); \
return val; \
static inline void atomic_or(int i, atomic_t *v)
{
asm volatile(LOCK_PREFIX "orl %1,%0"
: "+m" (v->counter)
: "ir" (i)
: "memory");
}
#define ATOMIC_OPS(op, c_op) \
ATOMIC_OP(op) \
ATOMIC_FETCH_OP(op, c_op)
static inline int atomic_fetch_or(int i, atomic_t *v)
{
int val = atomic_read(v);
ATOMIC_OPS(and, &)
ATOMIC_OPS(or , |)
ATOMIC_OPS(xor, ^)
do { } while (!atomic_try_cmpxchg(v, &val, val | i));
#undef ATOMIC_OPS
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP
return val;
}
static inline void atomic_xor(int i, atomic_t *v)
{
asm volatile(LOCK_PREFIX "xorl %1,%0"
: "+m" (v->counter)
: "ir" (i)
: "memory");
}
static inline int atomic_fetch_xor(int i, atomic_t *v)
{
int val = atomic_read(v);
do { } while (!atomic_try_cmpxchg(v, &val, val ^ i));
return val;
}
/**
* __atomic_add_unless - add unless the number is already a given value
......@@ -239,10 +260,12 @@ ATOMIC_OPS(xor, ^)
static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
{
int c = atomic_read(v);
do {
if (unlikely(c == u))
break;
} while (!atomic_try_cmpxchg(v, &c, c + a));
return c;
}
......
......@@ -312,37 +312,70 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
#undef alternative_atomic64
#undef __alternative_atomic64
#define ATOMIC64_OP(op, c_op) \
static inline void atomic64_##op(long long i, atomic64_t *v) \
{ \
long long old, c = 0; \
while ((old = atomic64_cmpxchg(v, c, c c_op i)) != c) \
c = old; \
static inline void atomic64_and(long long i, atomic64_t *v)
{
long long old, c = 0;
while ((old = atomic64_cmpxchg(v, c, c & i)) != c)
c = old;
}
#define ATOMIC64_FETCH_OP(op, c_op) \
static inline long long atomic64_fetch_##op(long long i, atomic64_t *v) \
{ \
long long old, c = 0; \
while ((old = atomic64_cmpxchg(v, c, c c_op i)) != c) \
c = old; \
return old; \
static inline long long atomic64_fetch_and(long long i, atomic64_t *v)
{
long long old, c = 0;
while ((old = atomic64_cmpxchg(v, c, c & i)) != c)
c = old;
return old;
}
ATOMIC64_FETCH_OP(add, +)
static inline void atomic64_or(long long i, atomic64_t *v)
{
long long old, c = 0;
#define atomic64_fetch_sub(i, v) atomic64_fetch_add(-(i), (v))
while ((old = atomic64_cmpxchg(v, c, c | i)) != c)
c = old;
}
static inline long long atomic64_fetch_or(long long i, atomic64_t *v)
{
long long old, c = 0;
while ((old = atomic64_cmpxchg(v, c, c | i)) != c)
c = old;
return old;
}
#define ATOMIC64_OPS(op, c_op) \
ATOMIC64_OP(op, c_op) \
ATOMIC64_FETCH_OP(op, c_op)
static inline void atomic64_xor(long long i, atomic64_t *v)
{
long long old, c = 0;
while ((old = atomic64_cmpxchg(v, c, c ^ i)) != c)
c = old;
}
ATOMIC64_OPS(and, &)
ATOMIC64_OPS(or, |)
ATOMIC64_OPS(xor, ^)
static inline long long atomic64_fetch_xor(long long i, atomic64_t *v)
{
long long old, c = 0;
while ((old = atomic64_cmpxchg(v, c, c ^ i)) != c)
c = old;
return old;
}
#undef ATOMIC64_OPS
#undef ATOMIC64_FETCH_OP
#undef ATOMIC64_OP
static inline long long atomic64_fetch_add(long long i, atomic64_t *v)
{
long long old, c = 0;
while ((old = atomic64_cmpxchg(v, c, c + i)) != c)
c = old;
return old;
}
#define atomic64_fetch_sub(i, v) atomic64_fetch_add(-(i), (v))
#endif /* _ASM_X86_ATOMIC64_32_H */
......@@ -177,7 +177,7 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
}
#define atomic64_try_cmpxchg atomic64_try_cmpxchg
static __always_inline bool atomic64_try_cmpxchg(atomic64_t *v, long *old, long new)
static __always_inline bool atomic64_try_cmpxchg(atomic64_t *v, s64 *old, long new)
{
return try_cmpxchg(&v->counter, old, new);
}
......@@ -198,7 +198,7 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
*/
static inline bool atomic64_add_unless(atomic64_t *v, long a, long u)
{
long c = atomic64_read(v);
s64 c = atomic64_read(v);
do {
if (unlikely(c == u))
return false;
......@@ -217,7 +217,7 @@ static inline bool atomic64_add_unless(atomic64_t *v, long a, long u)
*/
static inline long atomic64_dec_if_positive(atomic64_t *v)
{
long dec, c = atomic64_read(v);
s64 dec, c = atomic64_read(v);
do {
dec = c - 1;
if (unlikely(dec < 0))
......@@ -226,34 +226,55 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
return dec;
}
#define ATOMIC64_OP(op) \
static inline void atomic64_##op(long i, atomic64_t *v) \
{ \
asm volatile(LOCK_PREFIX #op"q %1,%0" \
: "+m" (v->counter) \
: "er" (i) \
: "memory"); \
static inline void atomic64_and(long i, atomic64_t *v)
{
asm volatile(LOCK_PREFIX "andq %1,%0"
: "+m" (v->counter)
: "er" (i)
: "memory");
}
#define ATOMIC64_FETCH_OP(op, c_op) \
static inline long atomic64_fetch_##op(long i, atomic64_t *v) \
{ \
long val = atomic64_read(v); \
do { \
} while (!atomic64_try_cmpxchg(v, &val, val c_op i)); \
return val; \
static inline long atomic64_fetch_and(long i, atomic64_t *v)
{
s64 val = atomic64_read(v);
do {
} while (!atomic64_try_cmpxchg(v, &val, val & i));
return val;
}
#define ATOMIC64_OPS(op, c_op) \
ATOMIC64_OP(op) \
ATOMIC64_FETCH_OP(op, c_op)
static inline void atomic64_or(long i, atomic64_t *v)
{
asm volatile(LOCK_PREFIX "orq %1,%0"
: "+m" (v->counter)
: "er" (i)
: "memory");
}
ATOMIC64_OPS(and, &)
ATOMIC64_OPS(or, |)
ATOMIC64_OPS(xor, ^)
static inline long atomic64_fetch_or(long i, atomic64_t *v)
{
s64 val = atomic64_read(v);
#undef ATOMIC64_OPS
#undef ATOMIC64_FETCH_OP
#undef ATOMIC64_OP
do {
} while (!atomic64_try_cmpxchg(v, &val, val | i));
return val;
}
static inline void atomic64_xor(long i, atomic64_t *v)
{
asm volatile(LOCK_PREFIX "xorq %1,%0"
: "+m" (v->counter)
: "er" (i)
: "memory");
}
static inline long atomic64_fetch_xor(long i, atomic64_t *v)
{
s64 val = atomic64_read(v);
do {
} while (!atomic64_try_cmpxchg(v, &val, val ^ i));
return val;
}
#endif /* _ASM_X86_ATOMIC64_64_H */
......@@ -157,7 +157,7 @@ extern void __add_wrong_size(void)
#define __raw_try_cmpxchg(_ptr, _pold, _new, size, lock) \
({ \
bool success; \
__typeof__(_ptr) _old = (_pold); \
__typeof__(_ptr) _old = (__typeof__(_ptr))(_pold); \
__typeof__(*(_ptr)) __old = *_old; \
__typeof__(*(_ptr)) __new = (_new); \
switch (size) { \
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册