提交 8b8bc2f7 编写于 作者: J Jeremy Fitzhardinge 提交者: H. Peter Anvin

x86: Use xadd helper more widely

This covers the trivial cases from open-coded xadd to the xadd macros.
Signed-off-by: NJeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Link: http://lkml.kernel.org/r/4E5BCC40.3030501@goop.orgSigned-off-by: NH. Peter Anvin <hpa@linux.intel.com>
上级 433b3520
...@@ -172,18 +172,14 @@ static inline int atomic_add_negative(int i, atomic_t *v) ...@@ -172,18 +172,14 @@ static inline int atomic_add_negative(int i, atomic_t *v)
*/ */
static inline int atomic_add_return(int i, atomic_t *v) static inline int atomic_add_return(int i, atomic_t *v)
{ {
int __i;
#ifdef CONFIG_M386 #ifdef CONFIG_M386
int __i;
unsigned long flags; unsigned long flags;
if (unlikely(boot_cpu_data.x86 <= 3)) if (unlikely(boot_cpu_data.x86 <= 3))
goto no_xadd; goto no_xadd;
#endif #endif
/* Modern 486+ processor */ /* Modern 486+ processor */
__i = i; return i + xadd(&v->counter, i);
asm volatile(LOCK_PREFIX "xaddl %0, %1"
: "+r" (i), "+m" (v->counter)
: : "memory");
return i + __i;
#ifdef CONFIG_M386 #ifdef CONFIG_M386
no_xadd: /* Legacy 386 processor */ no_xadd: /* Legacy 386 processor */
......
...@@ -170,11 +170,7 @@ static inline int atomic64_add_negative(long i, atomic64_t *v) ...@@ -170,11 +170,7 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
*/ */
static inline long atomic64_add_return(long i, atomic64_t *v) static inline long atomic64_add_return(long i, atomic64_t *v)
{ {
long __i = i; return i + xadd(&v->counter, i);
asm volatile(LOCK_PREFIX "xaddq %0, %1;"
: "+r" (i), "+m" (v->counter)
: : "memory");
return i + __i;
} }
static inline long atomic64_sub_return(long i, atomic64_t *v) static inline long atomic64_sub_return(long i, atomic64_t *v)
......
...@@ -204,13 +204,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem) ...@@ -204,13 +204,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
*/ */
static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem) static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
{ {
long tmp = delta; return delta + xadd(&sem->count, delta);
asm volatile(LOCK_PREFIX "xadd %0,%1"
: "+r" (tmp), "+m" (sem->count)
: : "memory");
return tmp + delta;
} }
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
......
...@@ -656,11 +656,7 @@ static inline int atomic_read_short(const struct atomic_short *v) ...@@ -656,11 +656,7 @@ static inline int atomic_read_short(const struct atomic_short *v)
*/ */
static inline int atom_asr(short i, struct atomic_short *v) static inline int atom_asr(short i, struct atomic_short *v)
{ {
short __i = i; return i + xadd(&v->counter, i);
asm volatile(LOCK_PREFIX "xaddw %0, %1"
: "+r" (i), "+m" (v->counter)
: : "memory");
return i + __i;
} }
/* /*
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册