提交 27606268 编写于 作者: M Mark Rutland 提交者: sanglipeng

arm64: atomics: format whitespace consistently

stable inclusion
from stable-v5.10.164
commit 28840e46eaafa049d10d525632eebccecc6d3f83
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I7T7G4

Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id=28840e46eaafa049d10d525632eebccecc6d3f83

--------------------------------

[ Upstream commit 8e6082e9 ]

The code for the atomic ops is formatted inconsistently, and while this
is not a functional problem it is rather distracting when working on
them.

Some have ops have consistent indentation, e.g.

| #define ATOMIC_OP_ADD_RETURN(name, mb, cl...)                           \
| static inline int __lse_atomic_add_return##name(int i, atomic_t *v)     \
| {                                                                       \
|         u32 tmp;                                                        \
|                                                                         \
|         asm volatile(                                                   \
|         __LSE_PREAMBLE                                                  \
|         "       ldadd" #mb "    %w[i], %w[tmp], %[v]\n"                 \
|         "       add     %w[i], %w[i], %w[tmp]"                          \
|         : [i] "+r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp)        \
|         : "r" (v)                                                       \
|         : cl);                                                          \
|                                                                         \
|         return i;                                                       \
| }

While others have negative indentation for some lines, and/or have
misaligned trailing backslashes, e.g.

| static inline void __lse_atomic_##op(int i, atomic_t *v)                        \
| {                                                                       \
|         asm volatile(                                                   \
|         __LSE_PREAMBLE                                                  \
| "       " #asm_op "     %w[i], %[v]\n"                                  \
|         : [i] "+r" (i), [v] "+Q" (v->counter)                           \
|         : "r" (v));                                                     \
| }

This patch makes the indentation consistent and also aligns the trailing
backslashes. This makes the code easier to read for those (like myself)
who are easily distracted by these inconsistencies.

This is intended as a cleanup.
There should be no functional change as a result of this patch.
Signed-off-by: NMark Rutland <mark.rutland@arm.com>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Will Deacon <will@kernel.org>
Acked-by: NWill Deacon <will@kernel.org>
Acked-by: NPeter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20211210151410.2782645-2-mark.rutland@arm.comSigned-off-by: NCatalin Marinas <catalin.marinas@arm.com>
Stable-dep-of: 031af500 ("arm64: cmpxchg_double*: hazard against entire exchange variable")
Signed-off-by: NSasha Levin <sashal@kernel.org>
Signed-off-by: Nsanglipeng <sanglipeng1@jd.com>
上级 7c7fed2d
...@@ -44,11 +44,11 @@ __ll_sc_atomic_##op(int i, atomic_t *v) \ ...@@ -44,11 +44,11 @@ __ll_sc_atomic_##op(int i, atomic_t *v) \
\ \
asm volatile("// atomic_" #op "\n" \ asm volatile("// atomic_" #op "\n" \
__LL_SC_FALLBACK( \ __LL_SC_FALLBACK( \
" prfm pstl1strm, %2\n" \ " prfm pstl1strm, %2\n" \
"1: ldxr %w0, %2\n" \ "1: ldxr %w0, %2\n" \
" " #asm_op " %w0, %w0, %w3\n" \ " " #asm_op " %w0, %w0, %w3\n" \
" stxr %w1, %w0, %2\n" \ " stxr %w1, %w0, %2\n" \
" cbnz %w1, 1b\n") \ " cbnz %w1, 1b\n") \
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
: __stringify(constraint) "r" (i)); \ : __stringify(constraint) "r" (i)); \
} }
...@@ -62,12 +62,12 @@ __ll_sc_atomic_##op##_return##name(int i, atomic_t *v) \ ...@@ -62,12 +62,12 @@ __ll_sc_atomic_##op##_return##name(int i, atomic_t *v) \
\ \
asm volatile("// atomic_" #op "_return" #name "\n" \ asm volatile("// atomic_" #op "_return" #name "\n" \
__LL_SC_FALLBACK( \ __LL_SC_FALLBACK( \
" prfm pstl1strm, %2\n" \ " prfm pstl1strm, %2\n" \
"1: ld" #acq "xr %w0, %2\n" \ "1: ld" #acq "xr %w0, %2\n" \
" " #asm_op " %w0, %w0, %w3\n" \ " " #asm_op " %w0, %w0, %w3\n" \
" st" #rel "xr %w1, %w0, %2\n" \ " st" #rel "xr %w1, %w0, %2\n" \
" cbnz %w1, 1b\n" \ " cbnz %w1, 1b\n" \
" " #mb ) \ " " #mb ) \
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
: __stringify(constraint) "r" (i) \ : __stringify(constraint) "r" (i) \
: cl); \ : cl); \
...@@ -84,12 +84,12 @@ __ll_sc_atomic_fetch_##op##name(int i, atomic_t *v) \ ...@@ -84,12 +84,12 @@ __ll_sc_atomic_fetch_##op##name(int i, atomic_t *v) \
\ \
asm volatile("// atomic_fetch_" #op #name "\n" \ asm volatile("// atomic_fetch_" #op #name "\n" \
__LL_SC_FALLBACK( \ __LL_SC_FALLBACK( \
" prfm pstl1strm, %3\n" \ " prfm pstl1strm, %3\n" \
"1: ld" #acq "xr %w0, %3\n" \ "1: ld" #acq "xr %w0, %3\n" \
" " #asm_op " %w1, %w0, %w4\n" \ " " #asm_op " %w1, %w0, %w4\n" \
" st" #rel "xr %w2, %w1, %3\n" \ " st" #rel "xr %w2, %w1, %3\n" \
" cbnz %w2, 1b\n" \ " cbnz %w2, 1b\n" \
" " #mb ) \ " " #mb ) \
: "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \ : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \
: __stringify(constraint) "r" (i) \ : __stringify(constraint) "r" (i) \
: cl); \ : cl); \
...@@ -143,11 +143,11 @@ __ll_sc_atomic64_##op(s64 i, atomic64_t *v) \ ...@@ -143,11 +143,11 @@ __ll_sc_atomic64_##op(s64 i, atomic64_t *v) \
\ \
asm volatile("// atomic64_" #op "\n" \ asm volatile("// atomic64_" #op "\n" \
__LL_SC_FALLBACK( \ __LL_SC_FALLBACK( \
" prfm pstl1strm, %2\n" \ " prfm pstl1strm, %2\n" \
"1: ldxr %0, %2\n" \ "1: ldxr %0, %2\n" \
" " #asm_op " %0, %0, %3\n" \ " " #asm_op " %0, %0, %3\n" \
" stxr %w1, %0, %2\n" \ " stxr %w1, %0, %2\n" \
" cbnz %w1, 1b") \ " cbnz %w1, 1b") \
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
: __stringify(constraint) "r" (i)); \ : __stringify(constraint) "r" (i)); \
} }
...@@ -161,12 +161,12 @@ __ll_sc_atomic64_##op##_return##name(s64 i, atomic64_t *v) \ ...@@ -161,12 +161,12 @@ __ll_sc_atomic64_##op##_return##name(s64 i, atomic64_t *v) \
\ \
asm volatile("// atomic64_" #op "_return" #name "\n" \ asm volatile("// atomic64_" #op "_return" #name "\n" \
__LL_SC_FALLBACK( \ __LL_SC_FALLBACK( \
" prfm pstl1strm, %2\n" \ " prfm pstl1strm, %2\n" \
"1: ld" #acq "xr %0, %2\n" \ "1: ld" #acq "xr %0, %2\n" \
" " #asm_op " %0, %0, %3\n" \ " " #asm_op " %0, %0, %3\n" \
" st" #rel "xr %w1, %0, %2\n" \ " st" #rel "xr %w1, %0, %2\n" \
" cbnz %w1, 1b\n" \ " cbnz %w1, 1b\n" \
" " #mb ) \ " " #mb ) \
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
: __stringify(constraint) "r" (i) \ : __stringify(constraint) "r" (i) \
: cl); \ : cl); \
...@@ -183,12 +183,12 @@ __ll_sc_atomic64_fetch_##op##name(s64 i, atomic64_t *v) \ ...@@ -183,12 +183,12 @@ __ll_sc_atomic64_fetch_##op##name(s64 i, atomic64_t *v) \
\ \
asm volatile("// atomic64_fetch_" #op #name "\n" \ asm volatile("// atomic64_fetch_" #op #name "\n" \
__LL_SC_FALLBACK( \ __LL_SC_FALLBACK( \
" prfm pstl1strm, %3\n" \ " prfm pstl1strm, %3\n" \
"1: ld" #acq "xr %0, %3\n" \ "1: ld" #acq "xr %0, %3\n" \
" " #asm_op " %1, %0, %4\n" \ " " #asm_op " %1, %0, %4\n" \
" st" #rel "xr %w2, %1, %3\n" \ " st" #rel "xr %w2, %1, %3\n" \
" cbnz %w2, 1b\n" \ " cbnz %w2, 1b\n" \
" " #mb ) \ " " #mb ) \
: "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \ : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \
: __stringify(constraint) "r" (i) \ : __stringify(constraint) "r" (i) \
: cl); \ : cl); \
...@@ -241,14 +241,14 @@ __ll_sc_atomic64_dec_if_positive(atomic64_t *v) ...@@ -241,14 +241,14 @@ __ll_sc_atomic64_dec_if_positive(atomic64_t *v)
asm volatile("// atomic64_dec_if_positive\n" asm volatile("// atomic64_dec_if_positive\n"
__LL_SC_FALLBACK( __LL_SC_FALLBACK(
" prfm pstl1strm, %2\n" " prfm pstl1strm, %2\n"
"1: ldxr %0, %2\n" "1: ldxr %0, %2\n"
" subs %0, %0, #1\n" " subs %0, %0, #1\n"
" b.lt 2f\n" " b.lt 2f\n"
" stlxr %w1, %0, %2\n" " stlxr %w1, %0, %2\n"
" cbnz %w1, 1b\n" " cbnz %w1, 1b\n"
" dmb ish\n" " dmb ish\n"
"2:") "2:")
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: :
: "cc", "memory"); : "cc", "memory");
......
...@@ -15,7 +15,7 @@ static inline void __lse_atomic_##op(int i, atomic_t *v) \ ...@@ -15,7 +15,7 @@ static inline void __lse_atomic_##op(int i, atomic_t *v) \
{ \ { \
asm volatile( \ asm volatile( \
__LSE_PREAMBLE \ __LSE_PREAMBLE \
" " #asm_op " %w[i], %[v]\n" \ " " #asm_op " %w[i], %[v]\n" \
: [i] "+r" (i), [v] "+Q" (v->counter) \ : [i] "+r" (i), [v] "+Q" (v->counter) \
: "r" (v)); \ : "r" (v)); \
} }
...@@ -32,7 +32,7 @@ static inline int __lse_atomic_fetch_##op##name(int i, atomic_t *v) \ ...@@ -32,7 +32,7 @@ static inline int __lse_atomic_fetch_##op##name(int i, atomic_t *v) \
{ \ { \
asm volatile( \ asm volatile( \
__LSE_PREAMBLE \ __LSE_PREAMBLE \
" " #asm_op #mb " %w[i], %w[i], %[v]" \ " " #asm_op #mb " %w[i], %w[i], %[v]" \
: [i] "+r" (i), [v] "+Q" (v->counter) \ : [i] "+r" (i), [v] "+Q" (v->counter) \
: "r" (v) \ : "r" (v) \
: cl); \ : cl); \
...@@ -168,7 +168,7 @@ static inline void __lse_atomic64_##op(s64 i, atomic64_t *v) \ ...@@ -168,7 +168,7 @@ static inline void __lse_atomic64_##op(s64 i, atomic64_t *v) \
{ \ { \
asm volatile( \ asm volatile( \
__LSE_PREAMBLE \ __LSE_PREAMBLE \
" " #asm_op " %[i], %[v]\n" \ " " #asm_op " %[i], %[v]\n" \
: [i] "+r" (i), [v] "+Q" (v->counter) \ : [i] "+r" (i), [v] "+Q" (v->counter) \
: "r" (v)); \ : "r" (v)); \
} }
...@@ -185,7 +185,7 @@ static inline long __lse_atomic64_fetch_##op##name(s64 i, atomic64_t *v)\ ...@@ -185,7 +185,7 @@ static inline long __lse_atomic64_fetch_##op##name(s64 i, atomic64_t *v)\
{ \ { \
asm volatile( \ asm volatile( \
__LSE_PREAMBLE \ __LSE_PREAMBLE \
" " #asm_op #mb " %[i], %[i], %[v]" \ " " #asm_op #mb " %[i], %[i], %[v]" \
: [i] "+r" (i), [v] "+Q" (v->counter) \ : [i] "+r" (i), [v] "+Q" (v->counter) \
: "r" (v) \ : "r" (v) \
: cl); \ : cl); \
...@@ -272,7 +272,7 @@ static inline void __lse_atomic64_sub(s64 i, atomic64_t *v) ...@@ -272,7 +272,7 @@ static inline void __lse_atomic64_sub(s64 i, atomic64_t *v)
} }
#define ATOMIC64_OP_SUB_RETURN(name, mb, cl...) \ #define ATOMIC64_OP_SUB_RETURN(name, mb, cl...) \
static inline long __lse_atomic64_sub_return##name(s64 i, atomic64_t *v) \ static inline long __lse_atomic64_sub_return##name(s64 i, atomic64_t *v)\
{ \ { \
unsigned long tmp; \ unsigned long tmp; \
\ \
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册