提交 26b7fcc4 编写于 作者: J Joe Perches 提交者: Ingo Molnar

include/asm-x86/sync_bitops.h: checkpatch cleanups - formatting only

Signed-off-by: NJoe Perches <joe@perches.com>
Signed-off-by: NIngo Molnar <mingo@elte.hu>
上级 a4c2d7d9
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
* bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
*/ */
#define ADDR (*(volatile long *) addr) #define ADDR (*(volatile long *)addr)
/** /**
* sync_set_bit - Atomically set a bit in memory * sync_set_bit - Atomically set a bit in memory
...@@ -26,12 +26,12 @@ ...@@ -26,12 +26,12 @@
* Note that @nr may be almost arbitrarily large; this function is not * Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity. * restricted to acting on a single-word quantity.
*/ */
static inline void sync_set_bit(int nr, volatile unsigned long * addr) static inline void sync_set_bit(int nr, volatile unsigned long *addr)
{ {
__asm__ __volatile__("lock; btsl %1,%0" asm volatile("lock; btsl %1,%0"
:"+m" (ADDR) : "+m" (ADDR)
:"Ir" (nr) : "Ir" (nr)
: "memory"); : "memory");
} }
/** /**
...@@ -44,12 +44,12 @@ static inline void sync_set_bit(int nr, volatile unsigned long * addr) ...@@ -44,12 +44,12 @@ static inline void sync_set_bit(int nr, volatile unsigned long * addr)
* you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
* in order to ensure changes are visible on other processors. * in order to ensure changes are visible on other processors.
*/ */
static inline void sync_clear_bit(int nr, volatile unsigned long * addr) static inline void sync_clear_bit(int nr, volatile unsigned long *addr)
{ {
__asm__ __volatile__("lock; btrl %1,%0" asm volatile("lock; btrl %1,%0"
:"+m" (ADDR) : "+m" (ADDR)
:"Ir" (nr) : "Ir" (nr)
: "memory"); : "memory");
} }
/** /**
...@@ -61,12 +61,12 @@ static inline void sync_clear_bit(int nr, volatile unsigned long * addr) ...@@ -61,12 +61,12 @@ static inline void sync_clear_bit(int nr, volatile unsigned long * addr)
* Note that @nr may be almost arbitrarily large; this function is not * Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity. * restricted to acting on a single-word quantity.
*/ */
static inline void sync_change_bit(int nr, volatile unsigned long * addr) static inline void sync_change_bit(int nr, volatile unsigned long *addr)
{ {
__asm__ __volatile__("lock; btcl %1,%0" asm volatile("lock; btcl %1,%0"
:"+m" (ADDR) : "+m" (ADDR)
:"Ir" (nr) : "Ir" (nr)
: "memory"); : "memory");
} }
/** /**
...@@ -77,13 +77,13 @@ static inline void sync_change_bit(int nr, volatile unsigned long * addr) ...@@ -77,13 +77,13 @@ static inline void sync_change_bit(int nr, volatile unsigned long * addr)
* This operation is atomic and cannot be reordered. * This operation is atomic and cannot be reordered.
* It also implies a memory barrier. * It also implies a memory barrier.
*/ */
static inline int sync_test_and_set_bit(int nr, volatile unsigned long * addr) static inline int sync_test_and_set_bit(int nr, volatile unsigned long *addr)
{ {
int oldbit; int oldbit;
__asm__ __volatile__("lock; btsl %2,%1\n\tsbbl %0,%0" asm volatile("lock; btsl %2,%1\n\tsbbl %0,%0"
:"=r" (oldbit),"+m" (ADDR) : "=r" (oldbit), "+m" (ADDR)
:"Ir" (nr) : "memory"); : "Ir" (nr) : "memory");
return oldbit; return oldbit;
} }
...@@ -95,13 +95,13 @@ static inline int sync_test_and_set_bit(int nr, volatile unsigned long * addr) ...@@ -95,13 +95,13 @@ static inline int sync_test_and_set_bit(int nr, volatile unsigned long * addr)
* This operation is atomic and cannot be reordered. * This operation is atomic and cannot be reordered.
* It also implies a memory barrier. * It also implies a memory barrier.
*/ */
static inline int sync_test_and_clear_bit(int nr, volatile unsigned long * addr) static inline int sync_test_and_clear_bit(int nr, volatile unsigned long *addr)
{ {
int oldbit; int oldbit;
__asm__ __volatile__("lock; btrl %2,%1\n\tsbbl %0,%0" asm volatile("lock; btrl %2,%1\n\tsbbl %0,%0"
:"=r" (oldbit),"+m" (ADDR) : "=r" (oldbit), "+m" (ADDR)
:"Ir" (nr) : "memory"); : "Ir" (nr) : "memory");
return oldbit; return oldbit;
} }
...@@ -113,13 +113,13 @@ static inline int sync_test_and_clear_bit(int nr, volatile unsigned long * addr) ...@@ -113,13 +113,13 @@ static inline int sync_test_and_clear_bit(int nr, volatile unsigned long * addr)
* This operation is atomic and cannot be reordered. * This operation is atomic and cannot be reordered.
* It also implies a memory barrier. * It also implies a memory barrier.
*/ */
static inline int sync_test_and_change_bit(int nr, volatile unsigned long* addr) static inline int sync_test_and_change_bit(int nr, volatile unsigned long *addr)
{ {
int oldbit; int oldbit;
__asm__ __volatile__("lock; btcl %2,%1\n\tsbbl %0,%0" asm volatile("lock; btcl %2,%1\n\tsbbl %0,%0"
:"=r" (oldbit),"+m" (ADDR) : "=r" (oldbit), "+m" (ADDR)
:"Ir" (nr) : "memory"); : "Ir" (nr) : "memory");
return oldbit; return oldbit;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册