提交 8896fab3 编写于 作者: J Jan Beulich 提交者: Linus Torvalds

[PATCH] x86: cmpxchg improvements

This adjusts i386's cmpxchg patterns so that

- for word and long cmpxchg-es the compiler can utilize all possible
  registers

- cmpxchg8b gets disabled when the minimum specified hardware architectur
  doesn't support it (like was already happening for the byte, word, and
  long ones).
Signed-off-by: NJan Beulich <jbeulich@novell.com>
Signed-off-by: NAndrew Morton <akpm@osdl.org>
Signed-off-by: NLinus Torvalds <torvalds@osdl.org>
上级 dacb16b1
...@@ -424,6 +424,11 @@ config X86_POPAD_OK ...@@ -424,6 +424,11 @@ config X86_POPAD_OK
depends on !M386 depends on !M386
default y default y
config X86_CMPXCHG64
bool
depends on !M386 && !M486
default y
config X86_ALIGNMENT_16 config X86_ALIGNMENT_16
bool bool
depends on MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1 depends on MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
......
...@@ -167,6 +167,8 @@ struct __xchg_dummy { unsigned long a[100]; }; ...@@ -167,6 +167,8 @@ struct __xchg_dummy { unsigned long a[100]; };
#define __xg(x) ((struct __xchg_dummy *)(x)) #define __xg(x) ((struct __xchg_dummy *)(x))
#ifdef CONFIG_X86_CMPXCHG64
/* /*
* The semantics of XCHGCMP8B are a bit strange, this is why * The semantics of XCHGCMP8B are a bit strange, this is why
* there is a loop and the loading of %%eax and %%edx has to * there is a loop and the loading of %%eax and %%edx has to
...@@ -221,6 +223,8 @@ static inline void __set_64bit_var (unsigned long long *ptr, ...@@ -221,6 +223,8 @@ static inline void __set_64bit_var (unsigned long long *ptr,
__set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \ __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \
__set_64bit(ptr, ll_low(value), ll_high(value)) ) __set_64bit(ptr, ll_low(value), ll_high(value)) )
#endif
/* /*
* Note: no "lock" prefix even on SMP: xchg always implies lock anyway * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
* Note 2: xchg has side effect, so that attribute volatile is necessary, * Note 2: xchg has side effect, so that attribute volatile is necessary,
...@@ -259,7 +263,6 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz ...@@ -259,7 +263,6 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
#ifdef CONFIG_X86_CMPXCHG #ifdef CONFIG_X86_CMPXCHG
#define __HAVE_ARCH_CMPXCHG 1 #define __HAVE_ARCH_CMPXCHG 1
#endif
static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
unsigned long new, int size) unsigned long new, int size)
...@@ -275,13 +278,13 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, ...@@ -275,13 +278,13 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
case 2: case 2:
__asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2" __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
: "=a"(prev) : "=a"(prev)
: "q"(new), "m"(*__xg(ptr)), "0"(old) : "r"(new), "m"(*__xg(ptr)), "0"(old)
: "memory"); : "memory");
return prev; return prev;
case 4: case 4:
__asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2" __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2"
: "=a"(prev) : "=a"(prev)
: "q"(new), "m"(*__xg(ptr)), "0"(old) : "r"(new), "m"(*__xg(ptr)), "0"(old)
: "memory"); : "memory");
return prev; return prev;
} }
...@@ -291,6 +294,30 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, ...@@ -291,6 +294,30 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
#define cmpxchg(ptr,o,n)\ #define cmpxchg(ptr,o,n)\
((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\ ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
(unsigned long)(n),sizeof(*(ptr)))) (unsigned long)(n),sizeof(*(ptr))))
#endif
#ifdef CONFIG_X86_CMPXCHG64
static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long long old,
unsigned long long new)
{
unsigned long long prev;
__asm__ __volatile__(LOCK_PREFIX "cmpxchg8b %3"
: "=A"(prev)
: "b"((unsigned long)new),
"c"((unsigned long)(new >> 32)),
"m"(*__xg(ptr)),
"0"(old)
: "memory");
return prev;
}
#define cmpxchg64(ptr,o,n)\
((__typeof__(*(ptr)))__cmpxchg64((ptr),(unsigned long long)(o),\
(unsigned long long)(n)))
#endif
#ifdef __KERNEL__ #ifdef __KERNEL__
struct alt_instr { struct alt_instr {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册