提交 485832a5 编写于 作者: A Andi Kleen 提交者: Linus Torvalds

[PATCH] x86_64: Use int operations in spinlocks to support more than 128 CPUs spinning.

Pointed out by Eric Dumazet
Signed-off-by: NAndi Kleen <ak@suse.de>
Signed-off-by: NLinus Torvalds <torvalds@osdl.org>
上级 8315eca2
...@@ -18,22 +18,22 @@ ...@@ -18,22 +18,22 @@
*/ */
#define __raw_spin_is_locked(x) \ #define __raw_spin_is_locked(x) \
(*(volatile signed char *)(&(x)->slock) <= 0) (*(volatile signed int *)(&(x)->slock) <= 0)
#define __raw_spin_lock_string \ #define __raw_spin_lock_string \
"\n1:\t" \ "\n1:\t" \
"lock ; decb %0\n\t" \ "lock ; decl %0\n\t" \
"js 2f\n" \ "js 2f\n" \
LOCK_SECTION_START("") \ LOCK_SECTION_START("") \
"2:\t" \ "2:\t" \
"rep;nop\n\t" \ "rep;nop\n\t" \
"cmpb $0,%0\n\t" \ "cmpl $0,%0\n\t" \
"jle 2b\n\t" \ "jle 2b\n\t" \
"jmp 1b\n" \ "jmp 1b\n" \
LOCK_SECTION_END LOCK_SECTION_END
#define __raw_spin_unlock_string \ #define __raw_spin_unlock_string \
"movb $1,%0" \ "movl $1,%0" \
:"=m" (lock->slock) : : "memory" :"=m" (lock->slock) : : "memory"
static inline void __raw_spin_lock(raw_spinlock_t *lock) static inline void __raw_spin_lock(raw_spinlock_t *lock)
...@@ -47,10 +47,10 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) ...@@ -47,10 +47,10 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
static inline int __raw_spin_trylock(raw_spinlock_t *lock) static inline int __raw_spin_trylock(raw_spinlock_t *lock)
{ {
char oldval; int oldval;
__asm__ __volatile__( __asm__ __volatile__(
"xchgb %b0,%1" "xchgl %0,%1"
:"=q" (oldval), "=m" (lock->slock) :"=q" (oldval), "=m" (lock->slock)
:"0" (0) : "memory"); :"0" (0) : "memory");
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册