提交 30681358 编写于 作者: S Sami Tolvanen 提交者: Yang Yingliang

arm64: lse: fix LSE atomics with LLVM's integrated assembler

stable inclusion
from linux-4.19.164
commit 5681cce44cf41dc3eed8fd62b9e299bb57d1cb7b

--------------------------------

commit e0d5896b upstream.

Unlike gcc, clang considers each inline assembly block to be independent
and therefore, when using the integrated assembler for inline assembly,
any preambles that enable features must be repeated in each block.

This change defines __LSE_PREAMBLE and adds it to each inline assembly
block that has LSE instructions, which allows them to be compiled also
with clang's assembler.

Link: https://github.com/ClangBuiltLinux/linux/issues/671Signed-off-by: NSami Tolvanen <samitolvanen@google.com>
Tested-by: NAndrew Murray <andrew.murray@arm.com>
Tested-by: NKees Cook <keescook@chromium.org>
Reviewed-by: NAndrew Murray <andrew.murray@arm.com>
Reviewed-by: NKees Cook <keescook@chromium.org>
Reviewed-by: NNick Desaulniers <ndesaulniers@google.com>
Signed-off-by: NWill Deacon <will@kernel.org>
[nd: backport adjusted due to missing:
  commit addfc386 ("arm64: atomics: avoid out-of-line ll/sc atomics")]
Signed-off-by: NNick Desaulniers <ndesaulniers@google.com>
Signed-off-by: NGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
Signed-off-by: NCheng Jian <cj.chengjian@huawei.com>
上级 4d91e0ea
...@@ -32,7 +32,9 @@ static inline void atomic_##op(int i, atomic_t *v) \ ...@@ -32,7 +32,9 @@ static inline void atomic_##op(int i, atomic_t *v) \
register int w0 asm ("w0") = i; \ register int w0 asm ("w0") = i; \
register atomic_t *x1 asm ("x1") = v; \ register atomic_t *x1 asm ("x1") = v; \
\ \
asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(op), \ asm volatile( \
__LSE_PREAMBLE \
ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(op), \
" " #asm_op " %w[i], %[v]\n") \ " " #asm_op " %w[i], %[v]\n") \
: [i] "+r" (w0), [v] "+Q" (v->counter) \ : [i] "+r" (w0), [v] "+Q" (v->counter) \
: "r" (x1) \ : "r" (x1) \
...@@ -52,7 +54,9 @@ static inline int atomic_fetch_##op##name(int i, atomic_t *v) \ ...@@ -52,7 +54,9 @@ static inline int atomic_fetch_##op##name(int i, atomic_t *v) \
register int w0 asm ("w0") = i; \ register int w0 asm ("w0") = i; \
register atomic_t *x1 asm ("x1") = v; \ register atomic_t *x1 asm ("x1") = v; \
\ \
asm volatile(ARM64_LSE_ATOMIC_INSN( \ asm volatile( \
__LSE_PREAMBLE \
ARM64_LSE_ATOMIC_INSN( \
/* LL/SC */ \ /* LL/SC */ \
__LL_SC_ATOMIC(fetch_##op##name), \ __LL_SC_ATOMIC(fetch_##op##name), \
/* LSE atomics */ \ /* LSE atomics */ \
...@@ -84,7 +88,9 @@ static inline int atomic_add_return##name(int i, atomic_t *v) \ ...@@ -84,7 +88,9 @@ static inline int atomic_add_return##name(int i, atomic_t *v) \
register int w0 asm ("w0") = i; \ register int w0 asm ("w0") = i; \
register atomic_t *x1 asm ("x1") = v; \ register atomic_t *x1 asm ("x1") = v; \
\ \
asm volatile(ARM64_LSE_ATOMIC_INSN( \ asm volatile( \
__LSE_PREAMBLE \
ARM64_LSE_ATOMIC_INSN( \
/* LL/SC */ \ /* LL/SC */ \
__LL_SC_ATOMIC(add_return##name) \ __LL_SC_ATOMIC(add_return##name) \
__nops(1), \ __nops(1), \
...@@ -110,7 +116,9 @@ static inline void atomic_and(int i, atomic_t *v) ...@@ -110,7 +116,9 @@ static inline void atomic_and(int i, atomic_t *v)
register int w0 asm ("w0") = i; register int w0 asm ("w0") = i;
register atomic_t *x1 asm ("x1") = v; register atomic_t *x1 asm ("x1") = v;
asm volatile(ARM64_LSE_ATOMIC_INSN( asm volatile(
__LSE_PREAMBLE
ARM64_LSE_ATOMIC_INSN(
/* LL/SC */ /* LL/SC */
__LL_SC_ATOMIC(and) __LL_SC_ATOMIC(and)
__nops(1), __nops(1),
...@@ -128,7 +136,9 @@ static inline int atomic_fetch_and##name(int i, atomic_t *v) \ ...@@ -128,7 +136,9 @@ static inline int atomic_fetch_and##name(int i, atomic_t *v) \
register int w0 asm ("w0") = i; \ register int w0 asm ("w0") = i; \
register atomic_t *x1 asm ("x1") = v; \ register atomic_t *x1 asm ("x1") = v; \
\ \
asm volatile(ARM64_LSE_ATOMIC_INSN( \ asm volatile( \
__LSE_PREAMBLE \
ARM64_LSE_ATOMIC_INSN( \
/* LL/SC */ \ /* LL/SC */ \
__LL_SC_ATOMIC(fetch_and##name) \ __LL_SC_ATOMIC(fetch_and##name) \
__nops(1), \ __nops(1), \
...@@ -154,7 +164,9 @@ static inline void atomic_sub(int i, atomic_t *v) ...@@ -154,7 +164,9 @@ static inline void atomic_sub(int i, atomic_t *v)
register int w0 asm ("w0") = i; register int w0 asm ("w0") = i;
register atomic_t *x1 asm ("x1") = v; register atomic_t *x1 asm ("x1") = v;
asm volatile(ARM64_LSE_ATOMIC_INSN( asm volatile(
__LSE_PREAMBLE
ARM64_LSE_ATOMIC_INSN(
/* LL/SC */ /* LL/SC */
__LL_SC_ATOMIC(sub) __LL_SC_ATOMIC(sub)
__nops(1), __nops(1),
...@@ -172,7 +184,9 @@ static inline int atomic_sub_return##name(int i, atomic_t *v) \ ...@@ -172,7 +184,9 @@ static inline int atomic_sub_return##name(int i, atomic_t *v) \
register int w0 asm ("w0") = i; \ register int w0 asm ("w0") = i; \
register atomic_t *x1 asm ("x1") = v; \ register atomic_t *x1 asm ("x1") = v; \
\ \
asm volatile(ARM64_LSE_ATOMIC_INSN( \ asm volatile( \
__LSE_PREAMBLE \
ARM64_LSE_ATOMIC_INSN( \
/* LL/SC */ \ /* LL/SC */ \
__LL_SC_ATOMIC(sub_return##name) \ __LL_SC_ATOMIC(sub_return##name) \
__nops(2), \ __nops(2), \
...@@ -200,7 +214,9 @@ static inline int atomic_fetch_sub##name(int i, atomic_t *v) \ ...@@ -200,7 +214,9 @@ static inline int atomic_fetch_sub##name(int i, atomic_t *v) \
register int w0 asm ("w0") = i; \ register int w0 asm ("w0") = i; \
register atomic_t *x1 asm ("x1") = v; \ register atomic_t *x1 asm ("x1") = v; \
\ \
asm volatile(ARM64_LSE_ATOMIC_INSN( \ asm volatile( \
__LSE_PREAMBLE \
ARM64_LSE_ATOMIC_INSN( \
/* LL/SC */ \ /* LL/SC */ \
__LL_SC_ATOMIC(fetch_sub##name) \ __LL_SC_ATOMIC(fetch_sub##name) \
__nops(1), \ __nops(1), \
...@@ -229,7 +245,9 @@ static inline void atomic64_##op(long i, atomic64_t *v) \ ...@@ -229,7 +245,9 @@ static inline void atomic64_##op(long i, atomic64_t *v) \
register long x0 asm ("x0") = i; \ register long x0 asm ("x0") = i; \
register atomic64_t *x1 asm ("x1") = v; \ register atomic64_t *x1 asm ("x1") = v; \
\ \
asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(op), \ asm volatile( \
__LSE_PREAMBLE \
ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(op), \
" " #asm_op " %[i], %[v]\n") \ " " #asm_op " %[i], %[v]\n") \
: [i] "+r" (x0), [v] "+Q" (v->counter) \ : [i] "+r" (x0), [v] "+Q" (v->counter) \
: "r" (x1) \ : "r" (x1) \
...@@ -249,7 +267,9 @@ static inline long atomic64_fetch_##op##name(long i, atomic64_t *v) \ ...@@ -249,7 +267,9 @@ static inline long atomic64_fetch_##op##name(long i, atomic64_t *v) \
register long x0 asm ("x0") = i; \ register long x0 asm ("x0") = i; \
register atomic64_t *x1 asm ("x1") = v; \ register atomic64_t *x1 asm ("x1") = v; \
\ \
asm volatile(ARM64_LSE_ATOMIC_INSN( \ asm volatile( \
__LSE_PREAMBLE \
ARM64_LSE_ATOMIC_INSN( \
/* LL/SC */ \ /* LL/SC */ \
__LL_SC_ATOMIC64(fetch_##op##name), \ __LL_SC_ATOMIC64(fetch_##op##name), \
/* LSE atomics */ \ /* LSE atomics */ \
...@@ -281,7 +301,9 @@ static inline long atomic64_add_return##name(long i, atomic64_t *v) \ ...@@ -281,7 +301,9 @@ static inline long atomic64_add_return##name(long i, atomic64_t *v) \
register long x0 asm ("x0") = i; \ register long x0 asm ("x0") = i; \
register atomic64_t *x1 asm ("x1") = v; \ register atomic64_t *x1 asm ("x1") = v; \
\ \
asm volatile(ARM64_LSE_ATOMIC_INSN( \ asm volatile( \
__LSE_PREAMBLE \
ARM64_LSE_ATOMIC_INSN( \
/* LL/SC */ \ /* LL/SC */ \
__LL_SC_ATOMIC64(add_return##name) \ __LL_SC_ATOMIC64(add_return##name) \
__nops(1), \ __nops(1), \
...@@ -307,7 +329,9 @@ static inline void atomic64_and(long i, atomic64_t *v) ...@@ -307,7 +329,9 @@ static inline void atomic64_and(long i, atomic64_t *v)
register long x0 asm ("x0") = i; register long x0 asm ("x0") = i;
register atomic64_t *x1 asm ("x1") = v; register atomic64_t *x1 asm ("x1") = v;
asm volatile(ARM64_LSE_ATOMIC_INSN( asm volatile(
__LSE_PREAMBLE
ARM64_LSE_ATOMIC_INSN(
/* LL/SC */ /* LL/SC */
__LL_SC_ATOMIC64(and) __LL_SC_ATOMIC64(and)
__nops(1), __nops(1),
...@@ -325,7 +349,9 @@ static inline long atomic64_fetch_and##name(long i, atomic64_t *v) \ ...@@ -325,7 +349,9 @@ static inline long atomic64_fetch_and##name(long i, atomic64_t *v) \
register long x0 asm ("x0") = i; \ register long x0 asm ("x0") = i; \
register atomic64_t *x1 asm ("x1") = v; \ register atomic64_t *x1 asm ("x1") = v; \
\ \
asm volatile(ARM64_LSE_ATOMIC_INSN( \ asm volatile( \
__LSE_PREAMBLE \
ARM64_LSE_ATOMIC_INSN( \
/* LL/SC */ \ /* LL/SC */ \
__LL_SC_ATOMIC64(fetch_and##name) \ __LL_SC_ATOMIC64(fetch_and##name) \
__nops(1), \ __nops(1), \
...@@ -351,7 +377,9 @@ static inline void atomic64_sub(long i, atomic64_t *v) ...@@ -351,7 +377,9 @@ static inline void atomic64_sub(long i, atomic64_t *v)
register long x0 asm ("x0") = i; register long x0 asm ("x0") = i;
register atomic64_t *x1 asm ("x1") = v; register atomic64_t *x1 asm ("x1") = v;
asm volatile(ARM64_LSE_ATOMIC_INSN( asm volatile(
__LSE_PREAMBLE
ARM64_LSE_ATOMIC_INSN(
/* LL/SC */ /* LL/SC */
__LL_SC_ATOMIC64(sub) __LL_SC_ATOMIC64(sub)
__nops(1), __nops(1),
...@@ -369,7 +397,9 @@ static inline long atomic64_sub_return##name(long i, atomic64_t *v) \ ...@@ -369,7 +397,9 @@ static inline long atomic64_sub_return##name(long i, atomic64_t *v) \
register long x0 asm ("x0") = i; \ register long x0 asm ("x0") = i; \
register atomic64_t *x1 asm ("x1") = v; \ register atomic64_t *x1 asm ("x1") = v; \
\ \
asm volatile(ARM64_LSE_ATOMIC_INSN( \ asm volatile( \
__LSE_PREAMBLE \
ARM64_LSE_ATOMIC_INSN( \
/* LL/SC */ \ /* LL/SC */ \
__LL_SC_ATOMIC64(sub_return##name) \ __LL_SC_ATOMIC64(sub_return##name) \
__nops(2), \ __nops(2), \
...@@ -397,7 +427,9 @@ static inline long atomic64_fetch_sub##name(long i, atomic64_t *v) \ ...@@ -397,7 +427,9 @@ static inline long atomic64_fetch_sub##name(long i, atomic64_t *v) \
register long x0 asm ("x0") = i; \ register long x0 asm ("x0") = i; \
register atomic64_t *x1 asm ("x1") = v; \ register atomic64_t *x1 asm ("x1") = v; \
\ \
asm volatile(ARM64_LSE_ATOMIC_INSN( \ asm volatile( \
__LSE_PREAMBLE \
ARM64_LSE_ATOMIC_INSN( \
/* LL/SC */ \ /* LL/SC */ \
__LL_SC_ATOMIC64(fetch_sub##name) \ __LL_SC_ATOMIC64(fetch_sub##name) \
__nops(1), \ __nops(1), \
...@@ -422,7 +454,9 @@ static inline long atomic64_dec_if_positive(atomic64_t *v) ...@@ -422,7 +454,9 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
{ {
register long x0 asm ("x0") = (long)v; register long x0 asm ("x0") = (long)v;
asm volatile(ARM64_LSE_ATOMIC_INSN( asm volatile(
__LSE_PREAMBLE
ARM64_LSE_ATOMIC_INSN(
/* LL/SC */ /* LL/SC */
__LL_SC_ATOMIC64(dec_if_positive) __LL_SC_ATOMIC64(dec_if_positive)
__nops(6), __nops(6),
...@@ -455,7 +489,9 @@ static inline unsigned long __cmpxchg_case_##name(volatile void *ptr, \ ...@@ -455,7 +489,9 @@ static inline unsigned long __cmpxchg_case_##name(volatile void *ptr, \
register unsigned long x1 asm ("x1") = old; \ register unsigned long x1 asm ("x1") = old; \
register unsigned long x2 asm ("x2") = new; \ register unsigned long x2 asm ("x2") = new; \
\ \
asm volatile(ARM64_LSE_ATOMIC_INSN( \ asm volatile( \
__LSE_PREAMBLE \
ARM64_LSE_ATOMIC_INSN( \
/* LL/SC */ \ /* LL/SC */ \
__LL_SC_CMPXCHG(name) \ __LL_SC_CMPXCHG(name) \
__nops(2), \ __nops(2), \
...@@ -507,7 +543,9 @@ static inline long __cmpxchg_double##name(unsigned long old1, \ ...@@ -507,7 +543,9 @@ static inline long __cmpxchg_double##name(unsigned long old1, \
register unsigned long x3 asm ("x3") = new2; \ register unsigned long x3 asm ("x3") = new2; \
register unsigned long x4 asm ("x4") = (unsigned long)ptr; \ register unsigned long x4 asm ("x4") = (unsigned long)ptr; \
\ \
asm volatile(ARM64_LSE_ATOMIC_INSN( \ asm volatile( \
__LSE_PREAMBLE \
ARM64_LSE_ATOMIC_INSN( \
/* LL/SC */ \ /* LL/SC */ \
__LL_SC_CMPXCHG_DBL(name) \ __LL_SC_CMPXCHG_DBL(name) \
__nops(3), \ __nops(3), \
......
...@@ -4,6 +4,8 @@ ...@@ -4,6 +4,8 @@
#if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS) #if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS)
#define __LSE_PREAMBLE ".arch armv8-a+lse\n"
#include <linux/compiler_types.h> #include <linux/compiler_types.h>
#include <linux/export.h> #include <linux/export.h>
#include <linux/stringify.h> #include <linux/stringify.h>
...@@ -20,8 +22,6 @@ ...@@ -20,8 +22,6 @@
#else /* __ASSEMBLER__ */ #else /* __ASSEMBLER__ */
__asm__(".arch_extension lse");
/* Move the ll/sc atomics out-of-line */ /* Move the ll/sc atomics out-of-line */
#define __LL_SC_INLINE notrace #define __LL_SC_INLINE notrace
#define __LL_SC_PREFIX(x) __ll_sc_##x #define __LL_SC_PREFIX(x) __ll_sc_##x
...@@ -33,7 +33,7 @@ __asm__(".arch_extension lse"); ...@@ -33,7 +33,7 @@ __asm__(".arch_extension lse");
/* In-line patching at runtime */ /* In-line patching at runtime */
#define ARM64_LSE_ATOMIC_INSN(llsc, lse) \ #define ARM64_LSE_ATOMIC_INSN(llsc, lse) \
ALTERNATIVE(llsc, lse, ARM64_HAS_LSE_ATOMICS) ALTERNATIVE(llsc, __LSE_PREAMBLE lse, ARM64_HAS_LSE_ATOMICS)
#endif /* __ASSEMBLER__ */ #endif /* __ASSEMBLER__ */
#else /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */ #else /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册