提交 d3bf60a6 编写于 作者: J Joe Perches 提交者: Ingo Molnar

include/asm-x86/spinlock.h: checkpatch cleanups - formatting only

Signed-off-by: NJoe Perches <joe@perches.com>
Signed-off-by: NIngo Molnar <mingo@elte.hu>
上级 ceb7ce10
......@@ -82,7 +82,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
{
short inc = 0x0100;
__asm__ __volatile__ (
asm volatile (
LOCK_PREFIX "xaddw %w0, %1\n"
"1:\t"
"cmpb %h0, %b0\n\t"
......@@ -92,9 +92,9 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
/* don't need lfence here, because loads are in-order */
"jmp 1b\n"
"2:"
:"+Q" (inc), "+m" (lock->slock)
: "+Q" (inc), "+m" (lock->slock)
:
:"memory", "cc");
: "memory", "cc");
}
#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
......@@ -104,8 +104,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
int tmp;
short new;
asm volatile(
"movw %2,%w0\n\t"
asm volatile("movw %2,%w0\n\t"
"cmpb %h0,%b0\n\t"
"jne 1f\n\t"
"movw %w0,%w1\n\t"
......@@ -114,7 +113,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
"1:"
"sete %b1\n\t"
"movzbl %b1,%0\n\t"
:"=&a" (tmp), "=Q" (new), "+m" (lock->slock)
: "=&a" (tmp), "=Q" (new), "+m" (lock->slock)
:
: "memory", "cc");
......@@ -123,11 +122,10 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
static inline void __raw_spin_unlock(raw_spinlock_t *lock)
{
__asm__ __volatile__(
UNLOCK_LOCK_PREFIX "incb %0"
:"+m" (lock->slock)
asm volatile(UNLOCK_LOCK_PREFIX "incb %0"
: "+m" (lock->slock)
:
:"memory", "cc");
: "memory", "cc");
}
#else
static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
......@@ -149,8 +147,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
int inc = 0x00010000;
int tmp;
__asm__ __volatile__ (
"lock ; xaddl %0, %1\n"
asm volatile("lock ; xaddl %0, %1\n"
"movzwl %w0, %2\n\t"
"shrl $16, %0\n\t"
"1:\t"
......@@ -161,9 +158,9 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
/* don't need lfence here, because loads are in-order */
"jmp 1b\n"
"2:"
:"+Q" (inc), "+m" (lock->slock), "=r" (tmp)
: "+Q" (inc), "+m" (lock->slock), "=r" (tmp)
:
:"memory", "cc");
: "memory", "cc");
}
#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
......@@ -173,8 +170,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
int tmp;
int new;
asm volatile(
"movl %2,%0\n\t"
asm volatile("movl %2,%0\n\t"
"movl %0,%1\n\t"
"roll $16, %0\n\t"
"cmpl %0,%1\n\t"
......@@ -184,7 +180,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
"1:"
"sete %b1\n\t"
"movzbl %b1,%0\n\t"
:"=&a" (tmp), "=r" (new), "+m" (lock->slock)
: "=&a" (tmp), "=r" (new), "+m" (lock->slock)
:
: "memory", "cc");
......@@ -193,11 +189,10 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
static inline void __raw_spin_unlock(raw_spinlock_t *lock)
{
__asm__ __volatile__(
UNLOCK_LOCK_PREFIX "incw %0"
:"+m" (lock->slock)
asm volatile(UNLOCK_LOCK_PREFIX "incw %0"
: "+m" (lock->slock)
:
:"memory", "cc");
: "memory", "cc");
}
#endif
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册