提交 e2a3d402 编写于 作者: L Linus Torvalds

power: improve inline asm memory constraints

Use "+m" rather than a combination of "=m" and "m" for improved
clarity and consistency.
Signed-off-by: NLinus Torvalds <torvalds@osdl.org>
上级 a496e25d
...@@ -27,8 +27,8 @@ static __inline__ void atomic_add(int a, atomic_t *v) ...@@ -27,8 +27,8 @@ static __inline__ void atomic_add(int a, atomic_t *v)
PPC405_ERR77(0,%3) PPC405_ERR77(0,%3)
" stwcx. %0,0,%3 \n\ " stwcx. %0,0,%3 \n\
bne- 1b" bne- 1b"
: "=&r" (t), "=m" (v->counter) : "=&r" (t), "+m" (v->counter)
: "r" (a), "r" (&v->counter), "m" (v->counter) : "r" (a), "r" (&v->counter)
: "cc"); : "cc");
} }
...@@ -63,8 +63,8 @@ static __inline__ void atomic_sub(int a, atomic_t *v) ...@@ -63,8 +63,8 @@ static __inline__ void atomic_sub(int a, atomic_t *v)
PPC405_ERR77(0,%3) PPC405_ERR77(0,%3)
" stwcx. %0,0,%3 \n\ " stwcx. %0,0,%3 \n\
bne- 1b" bne- 1b"
: "=&r" (t), "=m" (v->counter) : "=&r" (t), "+m" (v->counter)
: "r" (a), "r" (&v->counter), "m" (v->counter) : "r" (a), "r" (&v->counter)
: "cc"); : "cc");
} }
...@@ -97,8 +97,8 @@ static __inline__ void atomic_inc(atomic_t *v) ...@@ -97,8 +97,8 @@ static __inline__ void atomic_inc(atomic_t *v)
PPC405_ERR77(0,%2) PPC405_ERR77(0,%2)
" stwcx. %0,0,%2 \n\ " stwcx. %0,0,%2 \n\
bne- 1b" bne- 1b"
: "=&r" (t), "=m" (v->counter) : "=&r" (t), "+m" (v->counter)
: "r" (&v->counter), "m" (v->counter) : "r" (&v->counter)
: "cc"); : "cc");
} }
...@@ -141,8 +141,8 @@ static __inline__ void atomic_dec(atomic_t *v) ...@@ -141,8 +141,8 @@ static __inline__ void atomic_dec(atomic_t *v)
PPC405_ERR77(0,%2)\ PPC405_ERR77(0,%2)\
" stwcx. %0,0,%2\n\ " stwcx. %0,0,%2\n\
bne- 1b" bne- 1b"
: "=&r" (t), "=m" (v->counter) : "=&r" (t), "+m" (v->counter)
: "r" (&v->counter), "m" (v->counter) : "r" (&v->counter)
: "cc"); : "cc");
} }
...@@ -253,8 +253,8 @@ static __inline__ void atomic64_add(long a, atomic64_t *v) ...@@ -253,8 +253,8 @@ static __inline__ void atomic64_add(long a, atomic64_t *v)
add %0,%2,%0\n\ add %0,%2,%0\n\
stdcx. %0,0,%3 \n\ stdcx. %0,0,%3 \n\
bne- 1b" bne- 1b"
: "=&r" (t), "=m" (v->counter) : "=&r" (t), "+m" (v->counter)
: "r" (a), "r" (&v->counter), "m" (v->counter) : "r" (a), "r" (&v->counter)
: "cc"); : "cc");
} }
...@@ -287,8 +287,8 @@ static __inline__ void atomic64_sub(long a, atomic64_t *v) ...@@ -287,8 +287,8 @@ static __inline__ void atomic64_sub(long a, atomic64_t *v)
subf %0,%2,%0\n\ subf %0,%2,%0\n\
stdcx. %0,0,%3 \n\ stdcx. %0,0,%3 \n\
bne- 1b" bne- 1b"
: "=&r" (t), "=m" (v->counter) : "=&r" (t), "+m" (v->counter)
: "r" (a), "r" (&v->counter), "m" (v->counter) : "r" (a), "r" (&v->counter)
: "cc"); : "cc");
} }
...@@ -319,8 +319,8 @@ static __inline__ void atomic64_inc(atomic64_t *v) ...@@ -319,8 +319,8 @@ static __inline__ void atomic64_inc(atomic64_t *v)
addic %0,%0,1\n\ addic %0,%0,1\n\
stdcx. %0,0,%2 \n\ stdcx. %0,0,%2 \n\
bne- 1b" bne- 1b"
: "=&r" (t), "=m" (v->counter) : "=&r" (t), "+m" (v->counter)
: "r" (&v->counter), "m" (v->counter) : "r" (&v->counter)
: "cc"); : "cc");
} }
...@@ -361,8 +361,8 @@ static __inline__ void atomic64_dec(atomic64_t *v) ...@@ -361,8 +361,8 @@ static __inline__ void atomic64_dec(atomic64_t *v)
addic %0,%0,-1\n\ addic %0,%0,-1\n\
stdcx. %0,0,%2\n\ stdcx. %0,0,%2\n\
bne- 1b" bne- 1b"
: "=&r" (t), "=m" (v->counter) : "=&r" (t), "+m" (v->counter)
: "r" (&v->counter), "m" (v->counter) : "r" (&v->counter)
: "cc"); : "cc");
} }
......
...@@ -65,8 +65,8 @@ static __inline__ void set_bit(int nr, volatile unsigned long *addr) ...@@ -65,8 +65,8 @@ static __inline__ void set_bit(int nr, volatile unsigned long *addr)
PPC405_ERR77(0,%3) PPC405_ERR77(0,%3)
PPC_STLCX "%0,0,%3\n" PPC_STLCX "%0,0,%3\n"
"bne- 1b" "bne- 1b"
: "=&r"(old), "=m"(*p) : "=&r" (old), "+m" (*p)
: "r"(mask), "r"(p), "m"(*p) : "r" (mask), "r" (p)
: "cc" ); : "cc" );
} }
...@@ -82,8 +82,8 @@ static __inline__ void clear_bit(int nr, volatile unsigned long *addr) ...@@ -82,8 +82,8 @@ static __inline__ void clear_bit(int nr, volatile unsigned long *addr)
PPC405_ERR77(0,%3) PPC405_ERR77(0,%3)
PPC_STLCX "%0,0,%3\n" PPC_STLCX "%0,0,%3\n"
"bne- 1b" "bne- 1b"
: "=&r"(old), "=m"(*p) : "=&r" (old), "+m" (*p)
: "r"(mask), "r"(p), "m"(*p) : "r" (mask), "r" (p)
: "cc" ); : "cc" );
} }
...@@ -99,8 +99,8 @@ static __inline__ void change_bit(int nr, volatile unsigned long *addr) ...@@ -99,8 +99,8 @@ static __inline__ void change_bit(int nr, volatile unsigned long *addr)
PPC405_ERR77(0,%3) PPC405_ERR77(0,%3)
PPC_STLCX "%0,0,%3\n" PPC_STLCX "%0,0,%3\n"
"bne- 1b" "bne- 1b"
: "=&r"(old), "=m"(*p) : "=&r" (old), "+m" (*p)
: "r"(mask), "r"(p), "m"(*p) : "r" (mask), "r" (p)
: "cc" ); : "cc" );
} }
...@@ -179,8 +179,8 @@ static __inline__ void set_bits(unsigned long mask, unsigned long *addr) ...@@ -179,8 +179,8 @@ static __inline__ void set_bits(unsigned long mask, unsigned long *addr)
"or %0,%0,%2\n" "or %0,%0,%2\n"
PPC_STLCX "%0,0,%3\n" PPC_STLCX "%0,0,%3\n"
"bne- 1b" "bne- 1b"
: "=&r" (old), "=m" (*addr) : "=&r" (old), "+m" (*addr)
: "r" (mask), "r" (addr), "m" (*addr) : "r" (mask), "r" (addr)
: "cc"); : "cc");
} }
......
...@@ -220,8 +220,8 @@ __xchg_u32(volatile void *p, unsigned long val) ...@@ -220,8 +220,8 @@ __xchg_u32(volatile void *p, unsigned long val)
" stwcx. %3,0,%2 \n\ " stwcx. %3,0,%2 \n\
bne- 1b" bne- 1b"
ISYNC_ON_SMP ISYNC_ON_SMP
: "=&r" (prev), "=m" (*(volatile unsigned int *)p) : "=&r" (prev), "+m" (*(volatile unsigned int *)p)
: "r" (p), "r" (val), "m" (*(volatile unsigned int *)p) : "r" (p), "r" (val)
: "cc", "memory"); : "cc", "memory");
return prev; return prev;
...@@ -240,8 +240,8 @@ __xchg_u64(volatile void *p, unsigned long val) ...@@ -240,8 +240,8 @@ __xchg_u64(volatile void *p, unsigned long val)
" stdcx. %3,0,%2 \n\ " stdcx. %3,0,%2 \n\
bne- 1b" bne- 1b"
ISYNC_ON_SMP ISYNC_ON_SMP
: "=&r" (prev), "=m" (*(volatile unsigned long *)p) : "=&r" (prev), "+m" (*(volatile unsigned long *)p)
: "r" (p), "r" (val), "m" (*(volatile unsigned long *)p) : "r" (p), "r" (val)
: "cc", "memory"); : "cc", "memory");
return prev; return prev;
...@@ -299,8 +299,8 @@ __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new) ...@@ -299,8 +299,8 @@ __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new)
ISYNC_ON_SMP ISYNC_ON_SMP
"\n\ "\n\
2:" 2:"
: "=&r" (prev), "=m" (*p) : "=&r" (prev), "+m" (*p)
: "r" (p), "r" (old), "r" (new), "m" (*p) : "r" (p), "r" (old), "r" (new)
: "cc", "memory"); : "cc", "memory");
return prev; return prev;
...@@ -322,8 +322,8 @@ __cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new) ...@@ -322,8 +322,8 @@ __cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new)
ISYNC_ON_SMP ISYNC_ON_SMP
"\n\ "\n\
2:" 2:"
: "=&r" (prev), "=m" (*p) : "=&r" (prev), "+m" (*p)
: "r" (p), "r" (old), "r" (new), "m" (*p) : "r" (p), "r" (old), "r" (new)
: "cc", "memory"); : "cc", "memory");
return prev; return prev;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册