提交 ef1f3413 编写于 作者: J Jan Beulich 提交者: Ingo Molnar

x86: ticket spin locks: fix asm constraints

In addition to these changes I doubt the 'volatile' on all the ticket
lock asm()-s are really necessary.
Signed-off-by: NJan Beulich <jbeulich@novell.com>
Signed-off-by: NIngo Molnar <mingo@elte.hu>
上级 0a328ea4
...@@ -101,7 +101,7 @@ static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) ...@@ -101,7 +101,7 @@ static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
"1:" "1:"
"sete %b1\n\t" "sete %b1\n\t"
"movzbl %b1,%0\n\t" "movzbl %b1,%0\n\t"
: "=&a" (tmp), "=Q" (new), "+m" (lock->slock) : "=&a" (tmp), "=&Q" (new), "+m" (lock->slock)
: :
: "memory", "cc"); : "memory", "cc");
...@@ -146,7 +146,7 @@ static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) ...@@ -146,7 +146,7 @@ static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
/* don't need lfence here, because loads are in-order */ /* don't need lfence here, because loads are in-order */
"jmp 1b\n" "jmp 1b\n"
"2:" "2:"
: "+Q" (inc), "+m" (lock->slock), "=r" (tmp) : "+r" (inc), "+m" (lock->slock), "=&r" (tmp)
: :
: "memory", "cc"); : "memory", "cc");
} }
...@@ -166,7 +166,7 @@ static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) ...@@ -166,7 +166,7 @@ static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
"1:" "1:"
"sete %b1\n\t" "sete %b1\n\t"
"movzbl %b1,%0\n\t" "movzbl %b1,%0\n\t"
: "=&a" (tmp), "=r" (new), "+m" (lock->slock) : "=&a" (tmp), "=&q" (new), "+m" (lock->slock)
: :
: "memory", "cc"); : "memory", "cc");
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册