提交 940cac4e 编写于 作者: P Peter Zijlstra 提交者: Yang Yingliang

x86/asm: 'Simplify' GEN_*_RMWcc() macros

mainline inclusion
from mainline-v4.20-rc1
commit 288e4521
category: bugfix
bugzilla: NA
CVE: NA
---------------------------

Currently the GEN_*_RMWcc() macros include a return statement, which
pretty much mandates we directly wrap them in a (inline) function.

Macros with return statements are tricky and, as per the above, limit
use, so remove the return statement and make them
statement-expressions. This allows them to be used more widely.

Also, shuffle the arguments a bit. Place the @cc argument as 3rd, this
makes it consistent between UNARY and BINARY, but more importantly, it
makes the @arg0 argument last.

Since the @arg0 argument is now last, we can do CPP trickery and make
it an optional argument, simplifying the users; 17 out of 18
occurences do not need this argument.

Finally, change to asm symbolic names, instead of the numeric ordering
of operands, which allows us to get rid of __BINARY_RMWcc_ARG and get
cleaner code overall.
Signed-off-by: NPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: JBeulich@suse.com
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: bp@alien8.de
Cc: hpa@linux.intel.com
Link: https://lkml.kernel.org/r/20181003130957.108960094@infradead.orgSigned-off-by: NIngo Molnar <mingo@kernel.org>

Conflicts:
	arch/x86/include/asm/refcount.h
[hanjun: update arch/x86/include/asm/qspinlock.h as the change of
GEN_BINARY_RMWcc interface, and update arch/x86/include/asm/refcount.h
to change 'counter' to 'var']
Signed-off-by: NHanjun Guo <guohanjun@huawei.com>
Reviewed-by: NXie XiuQi <xiexiuqi@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 e6c110fd
...@@ -82,7 +82,7 @@ static __always_inline void arch_atomic_sub(int i, atomic_t *v) ...@@ -82,7 +82,7 @@ static __always_inline void arch_atomic_sub(int i, atomic_t *v)
*/ */
static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v) static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v)
{ {
GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", e); return GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, e, "er", i);
} }
#define arch_atomic_sub_and_test arch_atomic_sub_and_test #define arch_atomic_sub_and_test arch_atomic_sub_and_test
...@@ -122,7 +122,7 @@ static __always_inline void arch_atomic_dec(atomic_t *v) ...@@ -122,7 +122,7 @@ static __always_inline void arch_atomic_dec(atomic_t *v)
*/ */
static __always_inline bool arch_atomic_dec_and_test(atomic_t *v) static __always_inline bool arch_atomic_dec_and_test(atomic_t *v)
{ {
GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", e); return GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, e);
} }
#define arch_atomic_dec_and_test arch_atomic_dec_and_test #define arch_atomic_dec_and_test arch_atomic_dec_and_test
...@@ -136,7 +136,7 @@ static __always_inline bool arch_atomic_dec_and_test(atomic_t *v) ...@@ -136,7 +136,7 @@ static __always_inline bool arch_atomic_dec_and_test(atomic_t *v)
*/ */
static __always_inline bool arch_atomic_inc_and_test(atomic_t *v) static __always_inline bool arch_atomic_inc_and_test(atomic_t *v)
{ {
GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", e); return GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, e);
} }
#define arch_atomic_inc_and_test arch_atomic_inc_and_test #define arch_atomic_inc_and_test arch_atomic_inc_and_test
...@@ -151,7 +151,7 @@ static __always_inline bool arch_atomic_inc_and_test(atomic_t *v) ...@@ -151,7 +151,7 @@ static __always_inline bool arch_atomic_inc_and_test(atomic_t *v)
*/ */
static __always_inline bool arch_atomic_add_negative(int i, atomic_t *v) static __always_inline bool arch_atomic_add_negative(int i, atomic_t *v)
{ {
GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", s); return GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, s, "er", i);
} }
#define arch_atomic_add_negative arch_atomic_add_negative #define arch_atomic_add_negative arch_atomic_add_negative
......
...@@ -73,7 +73,7 @@ static inline void arch_atomic64_sub(long i, atomic64_t *v) ...@@ -73,7 +73,7 @@ static inline void arch_atomic64_sub(long i, atomic64_t *v)
*/ */
static inline bool arch_atomic64_sub_and_test(long i, atomic64_t *v) static inline bool arch_atomic64_sub_and_test(long i, atomic64_t *v)
{ {
GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", e); return GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, e, "er", i);
} }
#define arch_atomic64_sub_and_test arch_atomic64_sub_and_test #define arch_atomic64_sub_and_test arch_atomic64_sub_and_test
...@@ -115,7 +115,7 @@ static __always_inline void arch_atomic64_dec(atomic64_t *v) ...@@ -115,7 +115,7 @@ static __always_inline void arch_atomic64_dec(atomic64_t *v)
*/ */
static inline bool arch_atomic64_dec_and_test(atomic64_t *v) static inline bool arch_atomic64_dec_and_test(atomic64_t *v)
{ {
GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", e); return GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, e);
} }
#define arch_atomic64_dec_and_test arch_atomic64_dec_and_test #define arch_atomic64_dec_and_test arch_atomic64_dec_and_test
...@@ -129,7 +129,7 @@ static inline bool arch_atomic64_dec_and_test(atomic64_t *v) ...@@ -129,7 +129,7 @@ static inline bool arch_atomic64_dec_and_test(atomic64_t *v)
*/ */
static inline bool arch_atomic64_inc_and_test(atomic64_t *v) static inline bool arch_atomic64_inc_and_test(atomic64_t *v)
{ {
GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", e); return GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, e);
} }
#define arch_atomic64_inc_and_test arch_atomic64_inc_and_test #define arch_atomic64_inc_and_test arch_atomic64_inc_and_test
...@@ -144,7 +144,7 @@ static inline bool arch_atomic64_inc_and_test(atomic64_t *v) ...@@ -144,7 +144,7 @@ static inline bool arch_atomic64_inc_and_test(atomic64_t *v)
*/ */
static inline bool arch_atomic64_add_negative(long i, atomic64_t *v) static inline bool arch_atomic64_add_negative(long i, atomic64_t *v)
{ {
GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", s); return GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, s, "er", i);
} }
#define arch_atomic64_add_negative arch_atomic64_add_negative #define arch_atomic64_add_negative arch_atomic64_add_negative
......
...@@ -206,8 +206,7 @@ static __always_inline void change_bit(long nr, volatile unsigned long *addr) ...@@ -206,8 +206,7 @@ static __always_inline void change_bit(long nr, volatile unsigned long *addr)
*/ */
static __always_inline bool test_and_set_bit(long nr, volatile unsigned long *addr) static __always_inline bool test_and_set_bit(long nr, volatile unsigned long *addr)
{ {
GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(bts), return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(bts), *addr, c, "Ir", nr);
*addr, "Ir", nr, "%0", c);
} }
/** /**
...@@ -253,8 +252,7 @@ static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long * ...@@ -253,8 +252,7 @@ static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long *
*/ */
static __always_inline bool test_and_clear_bit(long nr, volatile unsigned long *addr) static __always_inline bool test_and_clear_bit(long nr, volatile unsigned long *addr)
{ {
GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btr), return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btr), *addr, c, "Ir", nr);
*addr, "Ir", nr, "%0", c);
} }
/** /**
...@@ -307,8 +305,7 @@ static __always_inline bool __test_and_change_bit(long nr, volatile unsigned lon ...@@ -307,8 +305,7 @@ static __always_inline bool __test_and_change_bit(long nr, volatile unsigned lon
*/ */
static __always_inline bool test_and_change_bit(long nr, volatile unsigned long *addr) static __always_inline bool test_and_change_bit(long nr, volatile unsigned long *addr)
{ {
GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btc), return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btc), *addr, c, "Ir", nr);
*addr, "Ir", nr, "%0", c);
} }
static __always_inline bool constant_test_bit(long nr, const volatile unsigned long *addr) static __always_inline bool constant_test_bit(long nr, const volatile unsigned long *addr)
......
...@@ -53,7 +53,7 @@ static inline void local_sub(long i, local_t *l) ...@@ -53,7 +53,7 @@ static inline void local_sub(long i, local_t *l)
*/ */
static inline bool local_sub_and_test(long i, local_t *l) static inline bool local_sub_and_test(long i, local_t *l)
{ {
GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, "er", i, "%0", e); return GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, e, "er", i);
} }
/** /**
...@@ -66,7 +66,7 @@ static inline bool local_sub_and_test(long i, local_t *l) ...@@ -66,7 +66,7 @@ static inline bool local_sub_and_test(long i, local_t *l)
*/ */
static inline bool local_dec_and_test(local_t *l) static inline bool local_dec_and_test(local_t *l)
{ {
GEN_UNARY_RMWcc(_ASM_DEC, l->a.counter, "%0", e); return GEN_UNARY_RMWcc(_ASM_DEC, l->a.counter, e);
} }
/** /**
...@@ -79,7 +79,7 @@ static inline bool local_dec_and_test(local_t *l) ...@@ -79,7 +79,7 @@ static inline bool local_dec_and_test(local_t *l)
*/ */
static inline bool local_inc_and_test(local_t *l) static inline bool local_inc_and_test(local_t *l)
{ {
GEN_UNARY_RMWcc(_ASM_INC, l->a.counter, "%0", e); return GEN_UNARY_RMWcc(_ASM_INC, l->a.counter, e);
} }
/** /**
...@@ -93,7 +93,7 @@ static inline bool local_inc_and_test(local_t *l) ...@@ -93,7 +93,7 @@ static inline bool local_inc_and_test(local_t *l)
*/ */
static inline bool local_add_negative(long i, local_t *l) static inline bool local_add_negative(long i, local_t *l)
{ {
GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, "er", i, "%0", s); return GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, s, "er", i);
} }
/** /**
......
...@@ -88,7 +88,7 @@ static __always_inline void __preempt_count_sub(int val) ...@@ -88,7 +88,7 @@ static __always_inline void __preempt_count_sub(int val)
*/ */
static __always_inline bool __preempt_count_dec_and_test(void) static __always_inline bool __preempt_count_dec_and_test(void)
{ {
GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), e); return GEN_UNARY_RMWcc("decl", __preempt_count, e, __percpu_arg([var]));
} }
/* /*
......
...@@ -14,8 +14,8 @@ ...@@ -14,8 +14,8 @@
static __always_inline bool __queued_RMW_btsl(struct qspinlock *lock) static __always_inline bool __queued_RMW_btsl(struct qspinlock *lock)
{ {
GEN_BINARY_RMWcc(LOCK_PREFIX "btsl", lock->val.counter, return GEN_BINARY_RMWcc(LOCK_PREFIX "btsl", lock->val.counter,
"I", _Q_PENDING_OFFSET, "%0", c); c, "I", _Q_PENDING_OFFSET);
} }
static __always_inline u32 queued_fetch_set_pending_acquire(struct qspinlock *lock) static __always_inline u32 queued_fetch_set_pending_acquire(struct qspinlock *lock)
......
...@@ -17,7 +17,7 @@ ...@@ -17,7 +17,7 @@
*/ */
#define _REFCOUNT_EXCEPTION \ #define _REFCOUNT_EXCEPTION \
".pushsection .text..refcount\n" \ ".pushsection .text..refcount\n" \
"111:\tlea %[counter], %%" _ASM_CX "\n" \ "111:\tlea %[var], %%" _ASM_CX "\n" \
"112:\t" ASM_UD2 "\n" \ "112:\t" ASM_UD2 "\n" \
ASM_UNREACHABLE \ ASM_UNREACHABLE \
".popsection\n" \ ".popsection\n" \
...@@ -43,7 +43,7 @@ static __always_inline void refcount_add(unsigned int i, refcount_t *r) ...@@ -43,7 +43,7 @@ static __always_inline void refcount_add(unsigned int i, refcount_t *r)
{ {
asm volatile(LOCK_PREFIX "addl %1,%0\n\t" asm volatile(LOCK_PREFIX "addl %1,%0\n\t"
REFCOUNT_CHECK_LT_ZERO REFCOUNT_CHECK_LT_ZERO
: [counter] "+m" (r->refs.counter) : [var] "+m" (r->refs.counter)
: "ir" (i) : "ir" (i)
: "cc", "cx"); : "cc", "cx");
} }
...@@ -52,7 +52,7 @@ static __always_inline void refcount_inc(refcount_t *r) ...@@ -52,7 +52,7 @@ static __always_inline void refcount_inc(refcount_t *r)
{ {
asm volatile(LOCK_PREFIX "incl %0\n\t" asm volatile(LOCK_PREFIX "incl %0\n\t"
REFCOUNT_CHECK_LT_ZERO REFCOUNT_CHECK_LT_ZERO
: [counter] "+m" (r->refs.counter) : [var] "+m" (r->refs.counter)
: : "cc", "cx"); : : "cc", "cx");
} }
...@@ -60,21 +60,23 @@ static __always_inline void refcount_dec(refcount_t *r) ...@@ -60,21 +60,23 @@ static __always_inline void refcount_dec(refcount_t *r)
{ {
asm volatile(LOCK_PREFIX "decl %0\n\t" asm volatile(LOCK_PREFIX "decl %0\n\t"
REFCOUNT_CHECK_LE_ZERO REFCOUNT_CHECK_LE_ZERO
: [counter] "+m" (r->refs.counter) : [var] "+m" (r->refs.counter)
: : "cc", "cx"); : : "cc", "cx");
} }
static __always_inline __must_check static __always_inline __must_check
bool refcount_sub_and_test(unsigned int i, refcount_t *r) bool refcount_sub_and_test(unsigned int i, refcount_t *r)
{ {
GEN_BINARY_SUFFIXED_RMWcc(LOCK_PREFIX "subl", REFCOUNT_CHECK_LT_ZERO, return GEN_BINARY_SUFFIXED_RMWcc(LOCK_PREFIX "subl",
r->refs.counter, "er", i, "%0", e, "cx"); REFCOUNT_CHECK_LT_ZERO,
r->refs.counter, e, "er", i, "cx");
} }
static __always_inline __must_check bool refcount_dec_and_test(refcount_t *r) static __always_inline __must_check bool refcount_dec_and_test(refcount_t *r)
{ {
GEN_UNARY_SUFFIXED_RMWcc(LOCK_PREFIX "decl", REFCOUNT_CHECK_LT_ZERO, return GEN_UNARY_SUFFIXED_RMWcc(LOCK_PREFIX "decl",
r->refs.counter, "%0", e, "cx"); REFCOUNT_CHECK_LT_ZERO,
r->refs.counter, e, "cx");
} }
static __always_inline __must_check static __always_inline __must_check
...@@ -92,7 +94,7 @@ bool refcount_add_not_zero(unsigned int i, refcount_t *r) ...@@ -92,7 +94,7 @@ bool refcount_add_not_zero(unsigned int i, refcount_t *r)
/* Did we try to increment from/to an undesirable state? */ /* Did we try to increment from/to an undesirable state? */
if (unlikely(c < 0 || c == INT_MAX || result < c)) { if (unlikely(c < 0 || c == INT_MAX || result < c)) {
asm volatile(REFCOUNT_ERROR asm volatile(REFCOUNT_ERROR
: : [counter] "m" (r->refs.counter) : : [var] "m" (r->refs.counter)
: "cc", "cx"); : "cc", "cx");
break; break;
} }
......
...@@ -2,56 +2,69 @@ ...@@ -2,56 +2,69 @@
#ifndef _ASM_X86_RMWcc #ifndef _ASM_X86_RMWcc
#define _ASM_X86_RMWcc #define _ASM_X86_RMWcc
/* This counts to 12. Any more, it will return 13th argument. */
#define __RMWcc_ARGS(_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _n, X...) _n
#define RMWcc_ARGS(X...) __RMWcc_ARGS(, ##X, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
#define __RMWcc_CONCAT(a, b) a ## b
#define RMWcc_CONCAT(a, b) __RMWcc_CONCAT(a, b)
#define __CLOBBERS_MEM(clb...) "memory", ## clb #define __CLOBBERS_MEM(clb...) "memory", ## clb
#if !defined(__GCC_ASM_FLAG_OUTPUTS__) && defined(CONFIG_CC_HAS_ASM_GOTO) #if !defined(__GCC_ASM_FLAG_OUTPUTS__) && defined(CONFIG_CC_HAS_ASM_GOTO)
/* Use asm goto */ /* Use asm goto */
#define __GEN_RMWcc(fullop, var, cc, clobbers, ...) \ #define __GEN_RMWcc(fullop, _var, cc, clobbers, ...) \
do { \ ({ \
bool c = false; \
asm_volatile_goto (fullop "; j" #cc " %l[cc_label]" \ asm_volatile_goto (fullop "; j" #cc " %l[cc_label]" \
: : [counter] "m" (var), ## __VA_ARGS__ \ : : [var] "m" (_var), ## __VA_ARGS__ \
: clobbers : cc_label); \ : clobbers : cc_label); \
return 0; \ if (0) { \
cc_label: \ cc_label: c = true; \
return 1; \ } \
} while (0) c; \
})
#define __BINARY_RMWcc_ARG " %1, "
#else /* defined(__GCC_ASM_FLAG_OUTPUTS__) || !defined(CONFIG_CC_HAS_ASM_GOTO) */ #else /* defined(__GCC_ASM_FLAG_OUTPUTS__) || !defined(CONFIG_CC_HAS_ASM_GOTO) */
/* Use flags output or a set instruction */ /* Use flags output or a set instruction */
#define __GEN_RMWcc(fullop, var, cc, clobbers, ...) \ #define __GEN_RMWcc(fullop, _var, cc, clobbers, ...) \
do { \ ({ \
bool c; \ bool c; \
asm volatile (fullop CC_SET(cc) \ asm volatile (fullop CC_SET(cc) \
: [counter] "+m" (var), CC_OUT(cc) (c) \ : [var] "+m" (_var), CC_OUT(cc) (c) \
: __VA_ARGS__ : clobbers); \ : __VA_ARGS__ : clobbers); \
return c; \ c; \
} while (0) })
#define __BINARY_RMWcc_ARG " %2, "
#endif /* defined(__GCC_ASM_FLAG_OUTPUTS__) || !defined(CONFIG_CC_HAS_ASM_GOTO) */ #endif /* defined(__GCC_ASM_FLAG_OUTPUTS__) || !defined(CONFIG_CC_HAS_ASM_GOTO) */
#define GEN_UNARY_RMWcc(op, var, arg0, cc) \ #define GEN_UNARY_RMWcc_4(op, var, cc, arg0) \
__GEN_RMWcc(op " " arg0, var, cc, __CLOBBERS_MEM()) __GEN_RMWcc(op " " arg0, var, cc, __CLOBBERS_MEM())
#define GEN_UNARY_SUFFIXED_RMWcc(op, suffix, var, arg0, cc, clobbers...)\ #define GEN_UNARY_RMWcc_3(op, var, cc) \
__GEN_RMWcc(op " " arg0 "\n\t" suffix, var, cc, \ GEN_UNARY_RMWcc_4(op, var, cc, "%[var]")
__CLOBBERS_MEM(clobbers))
#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \ #define GEN_UNARY_RMWcc(X...) RMWcc_CONCAT(GEN_UNARY_RMWcc_, RMWcc_ARGS(X))(X)
__GEN_RMWcc(op __BINARY_RMWcc_ARG arg0, var, cc, \
__CLOBBERS_MEM(), vcon (val)) #define GEN_BINARY_RMWcc_6(op, var, cc, vcon, _val, arg0) \
__GEN_RMWcc(op " %[val], " arg0, var, cc, \
__CLOBBERS_MEM(), [val] vcon (_val))
#define GEN_BINARY_RMWcc_5(op, var, cc, vcon, val) \
GEN_BINARY_RMWcc_6(op, var, cc, vcon, val, "%[var]")
#define GEN_BINARY_RMWcc(X...) RMWcc_CONCAT(GEN_BINARY_RMWcc_, RMWcc_ARGS(X))(X)
#define GEN_UNARY_SUFFIXED_RMWcc(op, suffix, var, cc, clobbers...) \
__GEN_RMWcc(op " %[var]\n\t" suffix, var, cc, \
__CLOBBERS_MEM(clobbers))
#define GEN_BINARY_SUFFIXED_RMWcc(op, suffix, var, vcon, val, arg0, cc, \ #define GEN_BINARY_SUFFIXED_RMWcc(op, suffix, var, cc, vcon, _val, clobbers...)\
clobbers...) \ __GEN_RMWcc(op " %[val], %[var]\n\t" suffix, var, cc, \
__GEN_RMWcc(op __BINARY_RMWcc_ARG arg0 "\n\t" suffix, var, cc, \ __CLOBBERS_MEM(clobbers), [val] vcon (_val))
__CLOBBERS_MEM(clobbers), vcon (val))
#endif /* _ASM_X86_RMWcc */ #endif /* _ASM_X86_RMWcc */
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册