提交 476758c6 编写于 作者: B Bibo Mao 提交者: openeuler-sync-bot

LoongArch: refresh usage of sync

LoongArch inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I6BWFP

--------------------------------

sync is used for ll/sc pair for read barrier, instead only ll is
required for read barrier, sc does not need. This patch fixes
the issue.

Change-Id: Ib8a42f50d494e3e80cda0843d069668604b486e6
Signed-off-by: NBibo Mao <maobibo@loongson.cn>
(cherry picked from commit d8911a28)
上级 4416d3e0
...@@ -162,8 +162,10 @@ static inline int arch_atomic_sub_if_positive(int i, atomic_t *v) ...@@ -162,8 +162,10 @@ static inline int arch_atomic_sub_if_positive(int i, atomic_t *v)
" bltz %0, 2f \n" " bltz %0, 2f \n"
" sc.w %1, %2 \n" " sc.w %1, %2 \n"
" beqz %1, 1b \n" " beqz %1, 1b \n"
" b 3f \n"
"2: \n" "2: \n"
__WEAK_LLSC_MB __WEAK_LLSC_MB
"3: \n"
: "=&r" (result), "=&r" (temp), "+ZC" (v->counter) : "=&r" (result), "=&r" (temp), "+ZC" (v->counter)
: "I" (-i)); : "I" (-i));
} else { } else {
...@@ -174,8 +176,10 @@ static inline int arch_atomic_sub_if_positive(int i, atomic_t *v) ...@@ -174,8 +176,10 @@ static inline int arch_atomic_sub_if_positive(int i, atomic_t *v)
" bltz %0, 2f \n" " bltz %0, 2f \n"
" sc.w %1, %2 \n" " sc.w %1, %2 \n"
" beqz %1, 1b \n" " beqz %1, 1b \n"
" b 3f \n"
"2: \n" "2: \n"
__WEAK_LLSC_MB __WEAK_LLSC_MB
"3: \n"
: "=&r" (result), "=&r" (temp), "+ZC" (v->counter) : "=&r" (result), "=&r" (temp), "+ZC" (v->counter)
: "r" (i)); : "r" (i));
} }
...@@ -323,8 +327,10 @@ static inline long arch_atomic64_sub_if_positive(long i, atomic64_t *v) ...@@ -323,8 +327,10 @@ static inline long arch_atomic64_sub_if_positive(long i, atomic64_t *v)
" bltz %0, 2f \n" " bltz %0, 2f \n"
" sc.d %1, %2 \n" " sc.d %1, %2 \n"
" beqz %1, 1b \n" " beqz %1, 1b \n"
" b 3f \n"
"2: \n" "2: \n"
__WEAK_LLSC_MB __WEAK_LLSC_MB
"3: \n"
: "=&r" (result), "=&r" (temp), "+ZC" (v->counter) : "=&r" (result), "=&r" (temp), "+ZC" (v->counter)
: "I" (-i)); : "I" (-i));
} else { } else {
...@@ -335,8 +341,10 @@ static inline long arch_atomic64_sub_if_positive(long i, atomic64_t *v) ...@@ -335,8 +341,10 @@ static inline long arch_atomic64_sub_if_positive(long i, atomic64_t *v)
" bltz %0, 2f \n" " bltz %0, 2f \n"
" sc.d %1, %2 \n" " sc.d %1, %2 \n"
" beqz %1, 1b \n" " beqz %1, 1b \n"
" b 3f \n"
"2: \n" "2: \n"
__WEAK_LLSC_MB __WEAK_LLSC_MB
"3: \n"
: "=&r" (result), "=&r" (temp), "+ZC" (v->counter) : "=&r" (result), "=&r" (temp), "+ZC" (v->counter)
: "r" (i)); : "r" (i));
} }
......
...@@ -102,8 +102,10 @@ static inline unsigned long __xchg(volatile void *ptr, unsigned long x, ...@@ -102,8 +102,10 @@ static inline unsigned long __xchg(volatile void *ptr, unsigned long x,
" move $t0, %z4 \n" \ " move $t0, %z4 \n" \
" " st " $t0, %1 \n" \ " " st " $t0, %1 \n" \
" beqz $t0, 1b \n" \ " beqz $t0, 1b \n" \
" b 3f \n" \
"2: \n" \ "2: \n" \
__WEAK_LLSC_MB \ __WEAK_LLSC_MB \
"3: \n" \
: "=&r" (__ret), "=ZB"(*m) \ : "=&r" (__ret), "=ZB"(*m) \
: "ZB"(*m), "Jr" (old), "Jr" (new) \ : "ZB"(*m), "Jr" (old), "Jr" (new) \
: "t0", "memory"); \ : "t0", "memory"); \
......
...@@ -84,8 +84,10 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, u32 newv ...@@ -84,8 +84,10 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, u32 newv
" move $t0, %z5 \n" " move $t0, %z5 \n"
"2: sc.w $t0, %2 \n" "2: sc.w $t0, %2 \n"
" beqz $t0, 1b \n" " beqz $t0, 1b \n"
" b 5f \n"
"3: \n" "3: \n"
__WEAK_LLSC_MB __WEAK_LLSC_MB
"5: \n"
" .section .fixup,\"ax\" \n" " .section .fixup,\"ax\" \n"
"4: li.d %0, %6 \n" "4: li.d %0, %6 \n"
" b 3b \n" " b 3b \n"
......
...@@ -298,9 +298,10 @@ static inline void set_pte(pte_t *ptep, pte_t pteval) ...@@ -298,9 +298,10 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
" or %[tmp], %[tmp], %[global] \n" " or %[tmp], %[tmp], %[global] \n"
__SC "%[tmp], %[buddy] \n" __SC "%[tmp], %[buddy] \n"
" beqz %[tmp], 1b \n" " beqz %[tmp], 1b \n"
" nop \n" " b 3f \n"
"2: \n" "2: \n"
__WEAK_LLSC_MB __WEAK_LLSC_MB
"3: \n"
: [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp) : [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp)
: [global] "r" (page_global)); : [global] "r" (page_global));
#else /* !CONFIG_SMP */ #else /* !CONFIG_SMP */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册