From a906dfaa9961a67f0ac139e4d3bb3e71422103fe Mon Sep 17 00:00:00 2001
From: ou-yangkan <519689417@qq.com>
Date: Fri, 14 Jan 2022 14:52:58 +0800
Subject: [PATCH] fix atomic compile error of iar
Signed-off-by: wang-shulin93 <15173259956@163.com>
---
arch/arm/cortex-m3/keil/los_arch_atomic.h | 152 ------------------
arch/arm/cortex-m3/keil/los_atomic.S | 98 +++++++++++
arch/arm/cortex-m33/gcc/NTZ/los_arch_atomic.h | 52 +++---
.../gcc/TZ/non_secure/los_arch_atomic.h | 52 +++---
arch/arm/cortex-m33/iar/NTZ/los_arch_atomic.h | 90 +++++------
.../iar/TZ/non_secure/los_arch_atomic.h | 90 +++++------
arch/arm/cortex-m4/gcc/los_arch_atomic.h | 52 +++---
arch/arm/cortex-m4/iar/los_arch_atomic.h | 90 +++++------
arch/arm/cortex-m7/gcc/los_arch_atomic.h | 52 +++---
arch/arm/cortex-m7/iar/los_arch_atomic.h | 90 +++++------
arch/csky/v2/gcc/los_arch_atomic.h | 10 +-
arch/risc-v/nuclei/gcc/los_arch_atomic.h | 8 +-
arch/risc-v/riscv32/gcc/los_arch_atomic.h | 8 +-
arch/xtensa/lx6/gcc/los_arch_atomic.h | 78 ++++-----
14 files changed, 395 insertions(+), 527 deletions(-)
create mode 100755 arch/arm/cortex-m3/keil/los_atomic.S
diff --git a/arch/arm/cortex-m3/keil/los_arch_atomic.h b/arch/arm/cortex-m3/keil/los_arch_atomic.h
index d5112ab6..6327fd2d 100644
--- a/arch/arm/cortex-m3/keil/los_arch_atomic.h
+++ b/arch/arm/cortex-m3/keil/los_arch_atomic.h
@@ -40,73 +40,6 @@ extern "C" {
#endif /* __cplusplus */
#endif /* __cplusplus */
-STATIC INLINE INT32 ArchAtomicRead(const Atomic *v)
-{
- INT32 val;
- UINT32 intSave;
-
- intSave = LOS_IntLock();
-
- __asm__ __volatile__("ldrex %0, [%1]\n"
- : "=&r"(val)
- : "r"(v)
- : "cc");
- LOS_IntRestore(intSave);
-
- return val;
-}
-
-STATIC INLINE VOID ArchAtomicSet(Atomic *v, INT32 setVal)
-{
- UINT32 status;
- UINT32 intSave;
-
- intSave = LOS_IntLock();
-
- __asm__ __volatile__("1:ldrex %0, [%1]\n"
- " strex %0, %2, [%1]\n"
- " teq %0, #0\n"
- " bne 1b"
- : "=&r"(status)
- : "r"(v), "r"(setVal)
- : "cc");
- LOS_IntRestore(intSave);
-}
-
-STATIC INLINE INT32 ArchAtomicAdd(Atomic *v, INT32 addVal)
-{
- INT32 val;
- UINT32 status;
-
- do {
- __asm__ __volatile__("ldrex %1, [%2]\n"
- "add %1, %1, %3\n"
- "strex %0, %1, [%2]"
- : "=&r"(status), "=&r"(val)
- : "r"(v), "r"(addVal)
- : "cc");
- } while (__builtin_expect(status != 0, 0));
-
- return val;
-}
-
-STATIC INLINE INT32 ArchAtomicSub(Atomic *v, INT32 subVal)
-{
- INT32 val;
- UINT32 status;
-
- do {
- __asm__ __volatile__("ldrex %1, [%2]\n"
- "sub %1, %1, %3\n"
- "strex %0, %1, [%2]"
- : "=&r"(status), "=&r"(val)
- : "r"(v), "r"(subVal)
- : "cc");
- } while (__builtin_expect(status != 0, 0));
-
- return val;
-}
-
STATIC INLINE VOID ArchAtomicInc(Atomic *v)
{
(VOID)ArchAtomicAdd(v, 1);
@@ -127,91 +60,6 @@ STATIC INLINE INT32 ArchAtomicDecRet(Atomic *v)
return ArchAtomicSub(v, 1);
}
-/**
- * @ingroup los_arch_atomic
- * @brief Atomic exchange for 32-bit variable.
- *
- * @par Description:
- * This API is used to implement the atomic exchange for 32-bit variable
- * and return the previous value of the atomic variable.
- * @attention
- *
The pointer v must not be NULL.
- *
- * @param v [IN] The variable pointer.
- * @param val [IN] The exchange value.
- *
- * @retval #INT32 The previous value of the atomic variable
- * @par Dependency:
- * - los_arch_atomic.h: the header file that contains the API declaration.
- * @see
- */
-STATIC INLINE INT32 ArchAtomicXchg32bits(volatile INT32 *v, INT32 val)
-{
- INT32 prevVal = 0;
- UINT32 status = 0;
-
- do {
- __asm__ __volatile__("ldrex %0, [%3]\n"
- "strex %1, %4, [%3]"
- : "=&r"(prevVal), "=&r"(status), "+m"(*v)
- : "r"(v), "r"(val)
- : "cc");
- } while (__builtin_expect(status != 0, 0));
-
- return prevVal;
-}
-
-/**
- * @ingroup los_arch_atomic
- * @brief Atomic exchange for 32-bit variable with compare.
- *
- * @par Description:
- * This API is used to implement the atomic exchange for 32-bit variable, if the value of variable is equal to oldVal.
- * @attention
- * The pointer v must not be NULL.
- *
- * @param v [IN] The variable pointer.
- * @param val [IN] The new value.
- * @param oldVal [IN] The old value.
- *
- * @retval TRUE The previous value of the atomic variable is not equal to oldVal.
- * @retval FALSE The previous value of the atomic variable is equal to oldVal.
- * @par Dependency:
- * - los_arch_atomic.h: the header file that contains the API declaration.
- * @see
- */
-STATIC INLINE BOOL ArchAtomicCmpXchg32bits(volatile INT32 *v, INT32 val, INT32 oldVal)
-{
- INT32 prevVal = 0;
- UINT32 status = 0;
-
- do {
- __asm__ __volatile__("1: ldrex %0, %2\n"
- " mov %1, #0\n"
- " cmp %0, %3\n"
- " bne 2f\n"
- " strex %1, %4, %2\n"
- "2:"
- : "=&r"(prevVal), "=&r"(status), "+Q"(*v)
- : "r"(oldVal), "r"(val)
- : "cc");
- } while (__builtin_expect(status != 0, 0));
-
- return prevVal != oldVal;
-}
-
-STATIC INLINE INT64 ArchAtomic64Read(const Atomic64 *v)
-{
- INT64 val;
- UINT32 intSave;
-
- intSave = LOS_IntLock();
- val = *v;
- LOS_IntRestore(intSave);
-
- return val;
-}
-
STATIC INLINE VOID ArchAtomic64Set(Atomic64 *v, INT64 setVal)
{
UINT32 intSave;
diff --git a/arch/arm/cortex-m3/keil/los_atomic.S b/arch/arm/cortex-m3/keil/los_atomic.S
new file mode 100755
index 00000000..8d158c57
--- /dev/null
+++ b/arch/arm/cortex-m3/keil/los_atomic.S
@@ -0,0 +1,98 @@
+;
+; Copyright (c) 2022-2022 Huawei Device Co., Ltd. All rights reserved.
+;
+; Redistribution and use in source and binary forms, with or without modification,
+; are permitted provided that the following conditions are met:
+;
+; 1. Redistributions of source code must retain the above copyright notice, this list of
+; conditions and the following disclaimer.
+;
+; 2. Redistributions in binary form must reproduce the above copyright notice, this list
+; of conditions and the following disclaimer in the documentation and/or other materials
+; provided with the distribution.
+;
+; 3. Neither the name of the copyright holder nor the names of its contributors may be used
+; to endorse or promote products derived from this software without specific prior written
+; permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+; THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+; PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
+; CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+; EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+; WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+; OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+; ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;
+
+ EXPORT ArchAtomicRead
+ EXPORT ArchAtomicSet
+ EXPORT ArchAtomicAdd
+ EXPORT ArchAtomicSub
+ EXPORT ArchAtomicXchg32bits
+ EXPORT ArchAtomicCmpXchg32bits
+
+ PRESERVE8
+ AREA |.text|, CODE, READONLY
+ THUMB
+
+
+ EXPORT ArchAtomicRead
+ EXPORT ArchAtomicSet
+ EXPORT ArchAtomicAdd
+ EXPORT ArchAtomicSub
+ EXPORT ArchAtomicXchg32bits
+ EXPORT ArchAtomicCmpXchg32bits
+
+ PRESERVE8
+ AREA |.text|, CODE, READONLY
+ THUMB
+
+ArchAtomicRead
+ ldrex r1, [r0]
+ mov r0, r1
+ bx lr
+
+ArchAtomicSet
+ ldrex r2, [r0]
+ strex r3, r1, [r0]
+ teq r3, #0
+ bne ArchAtomicSet
+ bx lr
+
+ArchAtomicAdd
+ ldrex r2, [r0]
+ add r2, r2, r1
+ strex r3, r2, [r0]
+ teq r3, #0
+ bne ArchAtomicAdd
+ mov r0, r2
+ bx lr
+
+ArchAtomicSub
+ ldrex r2, [r0]
+ sub r2, r2, r1
+ strex r3, r2, [r0]
+ teq r3, #0
+ bne ArchAtomicSub
+ mov r0, r2
+ bx lr
+
+ArchAtomicXchg32bits
+ ldrex r2, [r0]
+ strex r3, r1, [r0]
+ teq r3, #0
+ bne ArchAtomicXchg32bits
+ mov r0, r2
+
+ArchAtomicCmpXchg32bits
+ ldrex r3, [r0]
+ cmp r3, r2
+ bne end
+ strex r4, r1, [r0]
+ teq r4, #0
+ bne ArchAtomicCmpXchg32bits
+end
diff --git a/arch/arm/cortex-m33/gcc/NTZ/los_arch_atomic.h b/arch/arm/cortex-m33/gcc/NTZ/los_arch_atomic.h
index d5112ab6..d4582cbd 100755
--- a/arch/arm/cortex-m33/gcc/NTZ/los_arch_atomic.h
+++ b/arch/arm/cortex-m33/gcc/NTZ/los_arch_atomic.h
@@ -43,15 +43,11 @@ extern "C" {
STATIC INLINE INT32 ArchAtomicRead(const Atomic *v)
{
INT32 val;
- UINT32 intSave;
-
- intSave = LOS_IntLock();
- __asm__ __volatile__("ldrex %0, [%1]\n"
+ __asm__ __volatile__("ldrex %0, [%1]\n"
: "=&r"(val)
: "r"(v)
: "cc");
- LOS_IntRestore(intSave);
return val;
}
@@ -59,18 +55,14 @@ STATIC INLINE INT32 ArchAtomicRead(const Atomic *v)
STATIC INLINE VOID ArchAtomicSet(Atomic *v, INT32 setVal)
{
UINT32 status;
- UINT32 intSave;
-
- intSave = LOS_IntLock();
- __asm__ __volatile__("1:ldrex %0, [%1]\n"
- " strex %0, %2, [%1]\n"
- " teq %0, #0\n"
- " bne 1b"
- : "=&r"(status)
- : "r"(v), "r"(setVal)
- : "cc");
- LOS_IntRestore(intSave);
+ do {
+ __asm__ __volatile__("ldrex %0, [%1]\n"
+ "strex %0, %2, [%1]\n"
+ : "=&r"(status)
+ : "r"(v), "r"(setVal)
+ : "cc");
+ } while (__builtin_expect(status != 0, 0));
}
STATIC INLINE INT32 ArchAtomicAdd(Atomic *v, INT32 addVal)
@@ -79,9 +71,9 @@ STATIC INLINE INT32 ArchAtomicAdd(Atomic *v, INT32 addVal)
UINT32 status;
do {
- __asm__ __volatile__("ldrex %1, [%2]\n"
- "add %1, %1, %3\n"
- "strex %0, %1, [%2]"
+ __asm__ __volatile__("ldrex %1, [%2]\n"
+ "add %1, %1, %3\n"
+ "strex %0, %1, [%2]"
: "=&r"(status), "=&r"(val)
: "r"(v), "r"(addVal)
: "cc");
@@ -96,9 +88,9 @@ STATIC INLINE INT32 ArchAtomicSub(Atomic *v, INT32 subVal)
UINT32 status;
do {
- __asm__ __volatile__("ldrex %1, [%2]\n"
- "sub %1, %1, %3\n"
- "strex %0, %1, [%2]"
+ __asm__ __volatile__("ldrex %1, [%2]\n"
+ "sub %1, %1, %3\n"
+ "strex %0, %1, [%2]"
: "=&r"(status), "=&r"(val)
: "r"(v), "r"(subVal)
: "cc");
@@ -151,8 +143,8 @@ STATIC INLINE INT32 ArchAtomicXchg32bits(volatile INT32 *v, INT32 val)
UINT32 status = 0;
do {
- __asm__ __volatile__("ldrex %0, [%3]\n"
- "strex %1, %4, [%3]"
+ __asm__ __volatile__("ldrex %0, [%3]\n"
+ "strex %1, %4, [%3]"
: "=&r"(prevVal), "=&r"(status), "+m"(*v)
: "r"(v), "r"(val)
: "cc");
@@ -186,12 +178,12 @@ STATIC INLINE BOOL ArchAtomicCmpXchg32bits(volatile INT32 *v, INT32 val, INT32 o
UINT32 status = 0;
do {
- __asm__ __volatile__("1: ldrex %0, %2\n"
- " mov %1, #0\n"
- " cmp %0, %3\n"
- " bne 2f\n"
- " strex %1, %4, %2\n"
- "2:"
+ __asm__ __volatile__("ldrex %0, %2\n"
+ "mov %1, #0\n"
+ "cmp %0, %3\n"
+ "bne 1f\n"
+ "strex %1, %4, %2\n"
+ "1:"
: "=&r"(prevVal), "=&r"(status), "+Q"(*v)
: "r"(oldVal), "r"(val)
: "cc");
diff --git a/arch/arm/cortex-m33/gcc/TZ/non_secure/los_arch_atomic.h b/arch/arm/cortex-m33/gcc/TZ/non_secure/los_arch_atomic.h
index d5112ab6..d4582cbd 100755
--- a/arch/arm/cortex-m33/gcc/TZ/non_secure/los_arch_atomic.h
+++ b/arch/arm/cortex-m33/gcc/TZ/non_secure/los_arch_atomic.h
@@ -43,15 +43,11 @@ extern "C" {
STATIC INLINE INT32 ArchAtomicRead(const Atomic *v)
{
INT32 val;
- UINT32 intSave;
-
- intSave = LOS_IntLock();
- __asm__ __volatile__("ldrex %0, [%1]\n"
+ __asm__ __volatile__("ldrex %0, [%1]\n"
: "=&r"(val)
: "r"(v)
: "cc");
- LOS_IntRestore(intSave);
return val;
}
@@ -59,18 +55,14 @@ STATIC INLINE INT32 ArchAtomicRead(const Atomic *v)
STATIC INLINE VOID ArchAtomicSet(Atomic *v, INT32 setVal)
{
UINT32 status;
- UINT32 intSave;
-
- intSave = LOS_IntLock();
- __asm__ __volatile__("1:ldrex %0, [%1]\n"
- " strex %0, %2, [%1]\n"
- " teq %0, #0\n"
- " bne 1b"
- : "=&r"(status)
- : "r"(v), "r"(setVal)
- : "cc");
- LOS_IntRestore(intSave);
+ do {
+ __asm__ __volatile__("ldrex %0, [%1]\n"
+ "strex %0, %2, [%1]\n"
+ : "=&r"(status)
+ : "r"(v), "r"(setVal)
+ : "cc");
+ } while (__builtin_expect(status != 0, 0));
}
STATIC INLINE INT32 ArchAtomicAdd(Atomic *v, INT32 addVal)
@@ -79,9 +71,9 @@ STATIC INLINE INT32 ArchAtomicAdd(Atomic *v, INT32 addVal)
UINT32 status;
do {
- __asm__ __volatile__("ldrex %1, [%2]\n"
- "add %1, %1, %3\n"
- "strex %0, %1, [%2]"
+ __asm__ __volatile__("ldrex %1, [%2]\n"
+ "add %1, %1, %3\n"
+ "strex %0, %1, [%2]"
: "=&r"(status), "=&r"(val)
: "r"(v), "r"(addVal)
: "cc");
@@ -96,9 +88,9 @@ STATIC INLINE INT32 ArchAtomicSub(Atomic *v, INT32 subVal)
UINT32 status;
do {
- __asm__ __volatile__("ldrex %1, [%2]\n"
- "sub %1, %1, %3\n"
- "strex %0, %1, [%2]"
+ __asm__ __volatile__("ldrex %1, [%2]\n"
+ "sub %1, %1, %3\n"
+ "strex %0, %1, [%2]"
: "=&r"(status), "=&r"(val)
: "r"(v), "r"(subVal)
: "cc");
@@ -151,8 +143,8 @@ STATIC INLINE INT32 ArchAtomicXchg32bits(volatile INT32 *v, INT32 val)
UINT32 status = 0;
do {
- __asm__ __volatile__("ldrex %0, [%3]\n"
- "strex %1, %4, [%3]"
+ __asm__ __volatile__("ldrex %0, [%3]\n"
+ "strex %1, %4, [%3]"
: "=&r"(prevVal), "=&r"(status), "+m"(*v)
: "r"(v), "r"(val)
: "cc");
@@ -186,12 +178,12 @@ STATIC INLINE BOOL ArchAtomicCmpXchg32bits(volatile INT32 *v, INT32 val, INT32 o
UINT32 status = 0;
do {
- __asm__ __volatile__("1: ldrex %0, %2\n"
- " mov %1, #0\n"
- " cmp %0, %3\n"
- " bne 2f\n"
- " strex %1, %4, %2\n"
- "2:"
+ __asm__ __volatile__("ldrex %0, %2\n"
+ "mov %1, #0\n"
+ "cmp %0, %3\n"
+ "bne 1f\n"
+ "strex %1, %4, %2\n"
+ "1:"
: "=&r"(prevVal), "=&r"(status), "+Q"(*v)
: "r"(oldVal), "r"(val)
: "cc");
diff --git a/arch/arm/cortex-m33/iar/NTZ/los_arch_atomic.h b/arch/arm/cortex-m33/iar/NTZ/los_arch_atomic.h
index d5112ab6..68e34352 100644
--- a/arch/arm/cortex-m33/iar/NTZ/los_arch_atomic.h
+++ b/arch/arm/cortex-m33/iar/NTZ/los_arch_atomic.h
@@ -43,15 +43,11 @@ extern "C" {
STATIC INLINE INT32 ArchAtomicRead(const Atomic *v)
{
INT32 val;
- UINT32 intSave;
- intSave = LOS_IntLock();
-
- __asm__ __volatile__("ldrex %0, [%1]\n"
- : "=&r"(val)
- : "r"(v)
- : "cc");
- LOS_IntRestore(intSave);
+ asm volatile("ldrex %0, [%1]\n"
+ : "=&r"(val)
+ : "r"(v)
+ : "cc");
return val;
}
@@ -59,18 +55,14 @@ STATIC INLINE INT32 ArchAtomicRead(const Atomic *v)
STATIC INLINE VOID ArchAtomicSet(Atomic *v, INT32 setVal)
{
UINT32 status;
- UINT32 intSave;
- intSave = LOS_IntLock();
-
- __asm__ __volatile__("1:ldrex %0, [%1]\n"
- " strex %0, %2, [%1]\n"
- " teq %0, #0\n"
- " bne 1b"
- : "=&r"(status)
- : "r"(v), "r"(setVal)
- : "cc");
- LOS_IntRestore(intSave);
+ do {
+ asm volatile("ldrex %0, [%1]\n"
+ "strex %0, %2, [%1]\n"
+ : "=&r"(status)
+ : "r"(v), "r"(setVal)
+ : "cc");
+ } while (status != 0);
}
STATIC INLINE INT32 ArchAtomicAdd(Atomic *v, INT32 addVal)
@@ -79,13 +71,13 @@ STATIC INLINE INT32 ArchAtomicAdd(Atomic *v, INT32 addVal)
UINT32 status;
do {
- __asm__ __volatile__("ldrex %1, [%2]\n"
- "add %1, %1, %3\n"
- "strex %0, %1, [%2]"
- : "=&r"(status), "=&r"(val)
- : "r"(v), "r"(addVal)
- : "cc");
- } while (__builtin_expect(status != 0, 0));
+ asm volatile("ldrex %1, [%2]\n"
+ "add %1, %1, %3\n"
+ "strex %0, %1, [%2]"
+ : "=&r"(status), "=&r"(val)
+ : "r"(v), "r"(addVal)
+ : "cc");
+ } while (status != 0);
return val;
}
@@ -96,13 +88,13 @@ STATIC INLINE INT32 ArchAtomicSub(Atomic *v, INT32 subVal)
UINT32 status;
do {
- __asm__ __volatile__("ldrex %1, [%2]\n"
- "sub %1, %1, %3\n"
- "strex %0, %1, [%2]"
- : "=&r"(status), "=&r"(val)
- : "r"(v), "r"(subVal)
- : "cc");
- } while (__builtin_expect(status != 0, 0));
+ asm volatile("ldrex %1, [%2]\n"
+ "sub %1, %1, %3\n"
+ "strex %0, %1, [%2]"
+ : "=&r"(status), "=&r"(val)
+ : "r"(v), "r"(subVal)
+ : "cc");
+ } while (status != 0);
return val;
}
@@ -151,12 +143,12 @@ STATIC INLINE INT32 ArchAtomicXchg32bits(volatile INT32 *v, INT32 val)
UINT32 status = 0;
do {
- __asm__ __volatile__("ldrex %0, [%3]\n"
- "strex %1, %4, [%3]"
- : "=&r"(prevVal), "=&r"(status), "+m"(*v)
- : "r"(v), "r"(val)
- : "cc");
- } while (__builtin_expect(status != 0, 0));
+ asm volatile("ldrex %0, [%2]\n"
+ "strex %1, %3, [%2]"
+ : "=&r"(prevVal), "=&r"(status)
+ : "r"(v), "r"(val)
+ : "cc");
+ } while (status != 0);
return prevVal;
}
@@ -186,16 +178,16 @@ STATIC INLINE BOOL ArchAtomicCmpXchg32bits(volatile INT32 *v, INT32 val, INT32 o
UINT32 status = 0;
do {
- __asm__ __volatile__("1: ldrex %0, %2\n"
- " mov %1, #0\n"
- " cmp %0, %3\n"
- " bne 2f\n"
- " strex %1, %4, %2\n"
- "2:"
- : "=&r"(prevVal), "=&r"(status), "+Q"(*v)
- : "r"(oldVal), "r"(val)
- : "cc");
- } while (__builtin_expect(status != 0, 0));
+ asm volatile("ldrex %0, [%2]\n"
+ "mov %1, #0\n"
+ "cmp %0, %3\n"
+ "bne 1f\n"
+ "strex %1, %4, [%2]\n"
+ "1:"
+ : "=&r"(prevVal), "=&r"(status)
+ : "r"(v), "r"(oldVal), "r"(val)
+ : "cc");
+ } while (status != 0);
return prevVal != oldVal;
}
diff --git a/arch/arm/cortex-m33/iar/TZ/non_secure/los_arch_atomic.h b/arch/arm/cortex-m33/iar/TZ/non_secure/los_arch_atomic.h
index d5112ab6..68e34352 100644
--- a/arch/arm/cortex-m33/iar/TZ/non_secure/los_arch_atomic.h
+++ b/arch/arm/cortex-m33/iar/TZ/non_secure/los_arch_atomic.h
@@ -43,15 +43,11 @@ extern "C" {
STATIC INLINE INT32 ArchAtomicRead(const Atomic *v)
{
INT32 val;
- UINT32 intSave;
- intSave = LOS_IntLock();
-
- __asm__ __volatile__("ldrex %0, [%1]\n"
- : "=&r"(val)
- : "r"(v)
- : "cc");
- LOS_IntRestore(intSave);
+ asm volatile("ldrex %0, [%1]\n"
+ : "=&r"(val)
+ : "r"(v)
+ : "cc");
return val;
}
@@ -59,18 +55,14 @@ STATIC INLINE INT32 ArchAtomicRead(const Atomic *v)
STATIC INLINE VOID ArchAtomicSet(Atomic *v, INT32 setVal)
{
UINT32 status;
- UINT32 intSave;
- intSave = LOS_IntLock();
-
- __asm__ __volatile__("1:ldrex %0, [%1]\n"
- " strex %0, %2, [%1]\n"
- " teq %0, #0\n"
- " bne 1b"
- : "=&r"(status)
- : "r"(v), "r"(setVal)
- : "cc");
- LOS_IntRestore(intSave);
+ do {
+ asm volatile("ldrex %0, [%1]\n"
+ "strex %0, %2, [%1]\n"
+ : "=&r"(status)
+ : "r"(v), "r"(setVal)
+ : "cc");
+ } while (status != 0);
}
STATIC INLINE INT32 ArchAtomicAdd(Atomic *v, INT32 addVal)
@@ -79,13 +71,13 @@ STATIC INLINE INT32 ArchAtomicAdd(Atomic *v, INT32 addVal)
UINT32 status;
do {
- __asm__ __volatile__("ldrex %1, [%2]\n"
- "add %1, %1, %3\n"
- "strex %0, %1, [%2]"
- : "=&r"(status), "=&r"(val)
- : "r"(v), "r"(addVal)
- : "cc");
- } while (__builtin_expect(status != 0, 0));
+ asm volatile("ldrex %1, [%2]\n"
+ "add %1, %1, %3\n"
+ "strex %0, %1, [%2]"
+ : "=&r"(status), "=&r"(val)
+ : "r"(v), "r"(addVal)
+ : "cc");
+ } while (status != 0);
return val;
}
@@ -96,13 +88,13 @@ STATIC INLINE INT32 ArchAtomicSub(Atomic *v, INT32 subVal)
UINT32 status;
do {
- __asm__ __volatile__("ldrex %1, [%2]\n"
- "sub %1, %1, %3\n"
- "strex %0, %1, [%2]"
- : "=&r"(status), "=&r"(val)
- : "r"(v), "r"(subVal)
- : "cc");
- } while (__builtin_expect(status != 0, 0));
+ asm volatile("ldrex %1, [%2]\n"
+ "sub %1, %1, %3\n"
+ "strex %0, %1, [%2]"
+ : "=&r"(status), "=&r"(val)
+ : "r"(v), "r"(subVal)
+ : "cc");
+ } while (status != 0);
return val;
}
@@ -151,12 +143,12 @@ STATIC INLINE INT32 ArchAtomicXchg32bits(volatile INT32 *v, INT32 val)
UINT32 status = 0;
do {
- __asm__ __volatile__("ldrex %0, [%3]\n"
- "strex %1, %4, [%3]"
- : "=&r"(prevVal), "=&r"(status), "+m"(*v)
- : "r"(v), "r"(val)
- : "cc");
- } while (__builtin_expect(status != 0, 0));
+ asm volatile("ldrex %0, [%2]\n"
+ "strex %1, %3, [%2]"
+ : "=&r"(prevVal), "=&r"(status)
+ : "r"(v), "r"(val)
+ : "cc");
+ } while (status != 0);
return prevVal;
}
@@ -186,16 +178,16 @@ STATIC INLINE BOOL ArchAtomicCmpXchg32bits(volatile INT32 *v, INT32 val, INT32 o
UINT32 status = 0;
do {
- __asm__ __volatile__("1: ldrex %0, %2\n"
- " mov %1, #0\n"
- " cmp %0, %3\n"
- " bne 2f\n"
- " strex %1, %4, %2\n"
- "2:"
- : "=&r"(prevVal), "=&r"(status), "+Q"(*v)
- : "r"(oldVal), "r"(val)
- : "cc");
- } while (__builtin_expect(status != 0, 0));
+ asm volatile("ldrex %0, [%2]\n"
+ "mov %1, #0\n"
+ "cmp %0, %3\n"
+ "bne 1f\n"
+ "strex %1, %4, [%2]\n"
+ "1:"
+ : "=&r"(prevVal), "=&r"(status)
+ : "r"(v), "r"(oldVal), "r"(val)
+ : "cc");
+ } while (status != 0);
return prevVal != oldVal;
}
diff --git a/arch/arm/cortex-m4/gcc/los_arch_atomic.h b/arch/arm/cortex-m4/gcc/los_arch_atomic.h
index d5112ab6..d4582cbd 100644
--- a/arch/arm/cortex-m4/gcc/los_arch_atomic.h
+++ b/arch/arm/cortex-m4/gcc/los_arch_atomic.h
@@ -43,15 +43,11 @@ extern "C" {
STATIC INLINE INT32 ArchAtomicRead(const Atomic *v)
{
INT32 val;
- UINT32 intSave;
-
- intSave = LOS_IntLock();
- __asm__ __volatile__("ldrex %0, [%1]\n"
+ __asm__ __volatile__("ldrex %0, [%1]\n"
: "=&r"(val)
: "r"(v)
: "cc");
- LOS_IntRestore(intSave);
return val;
}
@@ -59,18 +55,14 @@ STATIC INLINE INT32 ArchAtomicRead(const Atomic *v)
STATIC INLINE VOID ArchAtomicSet(Atomic *v, INT32 setVal)
{
UINT32 status;
- UINT32 intSave;
-
- intSave = LOS_IntLock();
- __asm__ __volatile__("1:ldrex %0, [%1]\n"
- " strex %0, %2, [%1]\n"
- " teq %0, #0\n"
- " bne 1b"
- : "=&r"(status)
- : "r"(v), "r"(setVal)
- : "cc");
- LOS_IntRestore(intSave);
+ do {
+ __asm__ __volatile__("ldrex %0, [%1]\n"
+ "strex %0, %2, [%1]\n"
+ : "=&r"(status)
+ : "r"(v), "r"(setVal)
+ : "cc");
+ } while (__builtin_expect(status != 0, 0));
}
STATIC INLINE INT32 ArchAtomicAdd(Atomic *v, INT32 addVal)
@@ -79,9 +71,9 @@ STATIC INLINE INT32 ArchAtomicAdd(Atomic *v, INT32 addVal)
UINT32 status;
do {
- __asm__ __volatile__("ldrex %1, [%2]\n"
- "add %1, %1, %3\n"
- "strex %0, %1, [%2]"
+ __asm__ __volatile__("ldrex %1, [%2]\n"
+ "add %1, %1, %3\n"
+ "strex %0, %1, [%2]"
: "=&r"(status), "=&r"(val)
: "r"(v), "r"(addVal)
: "cc");
@@ -96,9 +88,9 @@ STATIC INLINE INT32 ArchAtomicSub(Atomic *v, INT32 subVal)
UINT32 status;
do {
- __asm__ __volatile__("ldrex %1, [%2]\n"
- "sub %1, %1, %3\n"
- "strex %0, %1, [%2]"
+ __asm__ __volatile__("ldrex %1, [%2]\n"
+ "sub %1, %1, %3\n"
+ "strex %0, %1, [%2]"
: "=&r"(status), "=&r"(val)
: "r"(v), "r"(subVal)
: "cc");
@@ -151,8 +143,8 @@ STATIC INLINE INT32 ArchAtomicXchg32bits(volatile INT32 *v, INT32 val)
UINT32 status = 0;
do {
- __asm__ __volatile__("ldrex %0, [%3]\n"
- "strex %1, %4, [%3]"
+ __asm__ __volatile__("ldrex %0, [%3]\n"
+ "strex %1, %4, [%3]"
: "=&r"(prevVal), "=&r"(status), "+m"(*v)
: "r"(v), "r"(val)
: "cc");
@@ -186,12 +178,12 @@ STATIC INLINE BOOL ArchAtomicCmpXchg32bits(volatile INT32 *v, INT32 val, INT32 o
UINT32 status = 0;
do {
- __asm__ __volatile__("1: ldrex %0, %2\n"
- " mov %1, #0\n"
- " cmp %0, %3\n"
- " bne 2f\n"
- " strex %1, %4, %2\n"
- "2:"
+ __asm__ __volatile__("ldrex %0, %2\n"
+ "mov %1, #0\n"
+ "cmp %0, %3\n"
+ "bne 1f\n"
+ "strex %1, %4, %2\n"
+ "1:"
: "=&r"(prevVal), "=&r"(status), "+Q"(*v)
: "r"(oldVal), "r"(val)
: "cc");
diff --git a/arch/arm/cortex-m4/iar/los_arch_atomic.h b/arch/arm/cortex-m4/iar/los_arch_atomic.h
index d5112ab6..d1d8be8d 100644
--- a/arch/arm/cortex-m4/iar/los_arch_atomic.h
+++ b/arch/arm/cortex-m4/iar/los_arch_atomic.h
@@ -43,15 +43,11 @@ extern "C" {
STATIC INLINE INT32 ArchAtomicRead(const Atomic *v)
{
INT32 val;
- UINT32 intSave;
-
- intSave = LOS_IntLock();
- __asm__ __volatile__("ldrex %0, [%1]\n"
- : "=&r"(val)
- : "r"(v)
- : "cc");
- LOS_IntRestore(intSave);
+ asm volatile("ldrex %0, [%1]\n"
+ : "=&r"(val)
+ : "r"(v)
+ : "cc");
return val;
}
@@ -59,18 +55,14 @@ STATIC INLINE INT32 ArchAtomicRead(const Atomic *v)
STATIC INLINE VOID ArchAtomicSet(Atomic *v, INT32 setVal)
{
UINT32 status;
- UINT32 intSave;
-
- intSave = LOS_IntLock();
- __asm__ __volatile__("1:ldrex %0, [%1]\n"
- " strex %0, %2, [%1]\n"
- " teq %0, #0\n"
- " bne 1b"
- : "=&r"(status)
- : "r"(v), "r"(setVal)
- : "cc");
- LOS_IntRestore(intSave);
+ asm volatile("1:ldrex %0, [%1]\n"
+ " strex %0, %2, [%1]\n"
+ " teq %0, #0\n"
+ " bne 1b"
+ : "=&r"(status)
+ : "r"(v), "r"(setVal)
+ : "cc");
}
STATIC INLINE INT32 ArchAtomicAdd(Atomic *v, INT32 addVal)
@@ -79,13 +71,13 @@ STATIC INLINE INT32 ArchAtomicAdd(Atomic *v, INT32 addVal)
UINT32 status;
do {
- __asm__ __volatile__("ldrex %1, [%2]\n"
- "add %1, %1, %3\n"
- "strex %0, %1, [%2]"
- : "=&r"(status), "=&r"(val)
- : "r"(v), "r"(addVal)
- : "cc");
- } while (__builtin_expect(status != 0, 0));
+ asm volatile("ldrex %1, [%2]\n"
+ "add %1, %1, %3\n"
+ "strex %0, %1, [%2]"
+ : "=&r"(status), "=&r"(val)
+ : "r"(v), "r"(addVal)
+ : "cc");
+ } while (status != 0);
return val;
}
@@ -96,13 +88,13 @@ STATIC INLINE INT32 ArchAtomicSub(Atomic *v, INT32 subVal)
UINT32 status;
do {
- __asm__ __volatile__("ldrex %1, [%2]\n"
- "sub %1, %1, %3\n"
- "strex %0, %1, [%2]"
- : "=&r"(status), "=&r"(val)
- : "r"(v), "r"(subVal)
- : "cc");
- } while (__builtin_expect(status != 0, 0));
+ asm volatile("ldrex %1, [%2]\n"
+ "sub %1, %1, %3\n"
+ "strex %0, %1, [%2]"
+ : "=&r"(status), "=&r"(val)
+ : "r"(v), "r"(subVal)
+ : "cc");
+ } while (status != 0);
return val;
}
@@ -151,12 +143,12 @@ STATIC INLINE INT32 ArchAtomicXchg32bits(volatile INT32 *v, INT32 val)
UINT32 status = 0;
do {
- __asm__ __volatile__("ldrex %0, [%3]\n"
- "strex %1, %4, [%3]"
- : "=&r"(prevVal), "=&r"(status), "+m"(*v)
- : "r"(v), "r"(val)
- : "cc");
- } while (__builtin_expect(status != 0, 0));
+ asm volatile("ldrex %0, [%2]\n"
+ "strex %1, %3, [%2]"
+ : "=&r"(prevVal), "=&r"(status)
+ : "r"(v), "r"(val)
+ : "cc");
+ } while (status != 0);
return prevVal;
}
@@ -186,16 +178,16 @@ STATIC INLINE BOOL ArchAtomicCmpXchg32bits(volatile INT32 *v, INT32 val, INT32 o
UINT32 status = 0;
do {
- __asm__ __volatile__("1: ldrex %0, %2\n"
- " mov %1, #0\n"
- " cmp %0, %3\n"
- " bne 2f\n"
- " strex %1, %4, %2\n"
- "2:"
- : "=&r"(prevVal), "=&r"(status), "+Q"(*v)
- : "r"(oldVal), "r"(val)
- : "cc");
- } while (__builtin_expect(status != 0, 0));
+ asm volatile("ldrex %0, [%2]\n"
+ "mov %1, #0\n"
+ "cmp %0, %3\n"
+ "bne 1f\n"
+ "strex %1, %4, [%2]\n"
+ "1:"
+ : "=&r"(prevVal), "=&r"(status)
+ : "r"(v), "r"(oldVal), "r"(val)
+ : "cc");
+ } while (status != 0);
return prevVal != oldVal;
}
diff --git a/arch/arm/cortex-m7/gcc/los_arch_atomic.h b/arch/arm/cortex-m7/gcc/los_arch_atomic.h
index d5112ab6..d4582cbd 100644
--- a/arch/arm/cortex-m7/gcc/los_arch_atomic.h
+++ b/arch/arm/cortex-m7/gcc/los_arch_atomic.h
@@ -43,15 +43,11 @@ extern "C" {
STATIC INLINE INT32 ArchAtomicRead(const Atomic *v)
{
INT32 val;
- UINT32 intSave;
-
- intSave = LOS_IntLock();
- __asm__ __volatile__("ldrex %0, [%1]\n"
+ __asm__ __volatile__("ldrex %0, [%1]\n"
: "=&r"(val)
: "r"(v)
: "cc");
- LOS_IntRestore(intSave);
return val;
}
@@ -59,18 +55,14 @@ STATIC INLINE INT32 ArchAtomicRead(const Atomic *v)
STATIC INLINE VOID ArchAtomicSet(Atomic *v, INT32 setVal)
{
UINT32 status;
- UINT32 intSave;
-
- intSave = LOS_IntLock();
- __asm__ __volatile__("1:ldrex %0, [%1]\n"
- " strex %0, %2, [%1]\n"
- " teq %0, #0\n"
- " bne 1b"
- : "=&r"(status)
- : "r"(v), "r"(setVal)
- : "cc");
- LOS_IntRestore(intSave);
+ do {
+ __asm__ __volatile__("ldrex %0, [%1]\n"
+ "strex %0, %2, [%1]\n"
+ : "=&r"(status)
+ : "r"(v), "r"(setVal)
+ : "cc");
+ } while (__builtin_expect(status != 0, 0));
}
STATIC INLINE INT32 ArchAtomicAdd(Atomic *v, INT32 addVal)
@@ -79,9 +71,9 @@ STATIC INLINE INT32 ArchAtomicAdd(Atomic *v, INT32 addVal)
UINT32 status;
do {
- __asm__ __volatile__("ldrex %1, [%2]\n"
- "add %1, %1, %3\n"
- "strex %0, %1, [%2]"
+ __asm__ __volatile__("ldrex %1, [%2]\n"
+ "add %1, %1, %3\n"
+ "strex %0, %1, [%2]"
: "=&r"(status), "=&r"(val)
: "r"(v), "r"(addVal)
: "cc");
@@ -96,9 +88,9 @@ STATIC INLINE INT32 ArchAtomicSub(Atomic *v, INT32 subVal)
UINT32 status;
do {
- __asm__ __volatile__("ldrex %1, [%2]\n"
- "sub %1, %1, %3\n"
- "strex %0, %1, [%2]"
+ __asm__ __volatile__("ldrex %1, [%2]\n"
+ "sub %1, %1, %3\n"
+ "strex %0, %1, [%2]"
: "=&r"(status), "=&r"(val)
: "r"(v), "r"(subVal)
: "cc");
@@ -151,8 +143,8 @@ STATIC INLINE INT32 ArchAtomicXchg32bits(volatile INT32 *v, INT32 val)
UINT32 status = 0;
do {
- __asm__ __volatile__("ldrex %0, [%3]\n"
- "strex %1, %4, [%3]"
+ __asm__ __volatile__("ldrex %0, [%3]\n"
+ "strex %1, %4, [%3]"
: "=&r"(prevVal), "=&r"(status), "+m"(*v)
: "r"(v), "r"(val)
: "cc");
@@ -186,12 +178,12 @@ STATIC INLINE BOOL ArchAtomicCmpXchg32bits(volatile INT32 *v, INT32 val, INT32 o
UINT32 status = 0;
do {
- __asm__ __volatile__("1: ldrex %0, %2\n"
- " mov %1, #0\n"
- " cmp %0, %3\n"
- " bne 2f\n"
- " strex %1, %4, %2\n"
- "2:"
+ __asm__ __volatile__("ldrex %0, %2\n"
+ "mov %1, #0\n"
+ "cmp %0, %3\n"
+ "bne 1f\n"
+ "strex %1, %4, %2\n"
+ "1:"
: "=&r"(prevVal), "=&r"(status), "+Q"(*v)
: "r"(oldVal), "r"(val)
: "cc");
diff --git a/arch/arm/cortex-m7/iar/los_arch_atomic.h b/arch/arm/cortex-m7/iar/los_arch_atomic.h
index d5112ab6..68e34352 100644
--- a/arch/arm/cortex-m7/iar/los_arch_atomic.h
+++ b/arch/arm/cortex-m7/iar/los_arch_atomic.h
@@ -43,15 +43,11 @@ extern "C" {
STATIC INLINE INT32 ArchAtomicRead(const Atomic *v)
{
INT32 val;
- UINT32 intSave;
- intSave = LOS_IntLock();
-
- __asm__ __volatile__("ldrex %0, [%1]\n"
- : "=&r"(val)
- : "r"(v)
- : "cc");
- LOS_IntRestore(intSave);
+ asm volatile("ldrex %0, [%1]\n"
+ : "=&r"(val)
+ : "r"(v)
+ : "cc");
return val;
}
@@ -59,18 +55,14 @@ STATIC INLINE INT32 ArchAtomicRead(const Atomic *v)
STATIC INLINE VOID ArchAtomicSet(Atomic *v, INT32 setVal)
{
UINT32 status;
- UINT32 intSave;
- intSave = LOS_IntLock();
-
- __asm__ __volatile__("1:ldrex %0, [%1]\n"
- " strex %0, %2, [%1]\n"
- " teq %0, #0\n"
- " bne 1b"
- : "=&r"(status)
- : "r"(v), "r"(setVal)
- : "cc");
- LOS_IntRestore(intSave);
+ do {
+ asm volatile("ldrex %0, [%1]\n"
+ "strex %0, %2, [%1]\n"
+ : "=&r"(status)
+ : "r"(v), "r"(setVal)
+ : "cc");
+ } while (status != 0);
}
STATIC INLINE INT32 ArchAtomicAdd(Atomic *v, INT32 addVal)
@@ -79,13 +71,13 @@ STATIC INLINE INT32 ArchAtomicAdd(Atomic *v, INT32 addVal)
UINT32 status;
do {
- __asm__ __volatile__("ldrex %1, [%2]\n"
- "add %1, %1, %3\n"
- "strex %0, %1, [%2]"
- : "=&r"(status), "=&r"(val)
- : "r"(v), "r"(addVal)
- : "cc");
- } while (__builtin_expect(status != 0, 0));
+ asm volatile("ldrex %1, [%2]\n"
+ "add %1, %1, %3\n"
+ "strex %0, %1, [%2]"
+ : "=&r"(status), "=&r"(val)
+ : "r"(v), "r"(addVal)
+ : "cc");
+ } while (status != 0);
return val;
}
@@ -96,13 +88,13 @@ STATIC INLINE INT32 ArchAtomicSub(Atomic *v, INT32 subVal)
UINT32 status;
do {
- __asm__ __volatile__("ldrex %1, [%2]\n"
- "sub %1, %1, %3\n"
- "strex %0, %1, [%2]"
- : "=&r"(status), "=&r"(val)
- : "r"(v), "r"(subVal)
- : "cc");
- } while (__builtin_expect(status != 0, 0));
+ asm volatile("ldrex %1, [%2]\n"
+ "sub %1, %1, %3\n"
+ "strex %0, %1, [%2]"
+ : "=&r"(status), "=&r"(val)
+ : "r"(v), "r"(subVal)
+ : "cc");
+ } while (status != 0);
return val;
}
@@ -151,12 +143,12 @@ STATIC INLINE INT32 ArchAtomicXchg32bits(volatile INT32 *v, INT32 val)
UINT32 status = 0;
do {
- __asm__ __volatile__("ldrex %0, [%3]\n"
- "strex %1, %4, [%3]"
- : "=&r"(prevVal), "=&r"(status), "+m"(*v)
- : "r"(v), "r"(val)
- : "cc");
- } while (__builtin_expect(status != 0, 0));
+ asm volatile("ldrex %0, [%2]\n"
+ "strex %1, %3, [%2]"
+ : "=&r"(prevVal), "=&r"(status)
+ : "r"(v), "r"(val)
+ : "cc");
+ } while (status != 0);
return prevVal;
}
@@ -186,16 +178,16 @@ STATIC INLINE BOOL ArchAtomicCmpXchg32bits(volatile INT32 *v, INT32 val, INT32 o
UINT32 status = 0;
do {
- __asm__ __volatile__("1: ldrex %0, %2\n"
- " mov %1, #0\n"
- " cmp %0, %3\n"
- " bne 2f\n"
- " strex %1, %4, %2\n"
- "2:"
- : "=&r"(prevVal), "=&r"(status), "+Q"(*v)
- : "r"(oldVal), "r"(val)
- : "cc");
- } while (__builtin_expect(status != 0, 0));
+ asm volatile("ldrex %0, [%2]\n"
+ "mov %1, #0\n"
+ "cmp %0, %3\n"
+ "bne 1f\n"
+ "strex %1, %4, [%2]\n"
+ "1:"
+ : "=&r"(prevVal), "=&r"(status)
+ : "r"(v), "r"(oldVal), "r"(val)
+ : "cc");
+ } while (status != 0);
return prevVal != oldVal;
}
diff --git a/arch/csky/v2/gcc/los_arch_atomic.h b/arch/csky/v2/gcc/los_arch_atomic.h
index 4b1bbccb..75d08557 100644
--- a/arch/csky/v2/gcc/los_arch_atomic.h
+++ b/arch/csky/v2/gcc/los_arch_atomic.h
@@ -183,11 +183,11 @@ STATIC INLINE BOOL ArchAtomicCmpXchg32bits(volatile INT32 *v, INT32 val, INT32 o
intSave = LOS_IntLock();
- __asm__ __volatile__("1: ldw %0, (%1)\n"
- " cmpne %0, %2\n"
- " bt 2f\n"
- " stw %3, (%1)\n"
- "2:"
+ __asm__ __volatile__("ldw %0, (%1)\n"
+ "cmpne %0, %2\n"
+ "bt 1f\n"
+ "stw %3, (%1)\n"
+ "1:"
: "=&r"(prevVal)
: "r"(v), "r"(oldVal), "r"(val)
: "cc");
diff --git a/arch/risc-v/nuclei/gcc/los_arch_atomic.h b/arch/risc-v/nuclei/gcc/los_arch_atomic.h
index 56babd44..5f409a20 100644
--- a/arch/risc-v/nuclei/gcc/los_arch_atomic.h
+++ b/arch/risc-v/nuclei/gcc/los_arch_atomic.h
@@ -187,10 +187,10 @@ STATIC INLINE BOOL ArchAtomicCmpXchg32bits(volatile INT32 *v, INT32 val, INT32 o
UINT32 intSave;
intSave = LOS_IntLock();
- __asm__ __volatile__("1: lw %0, 0(%1)\n"
- " bne %0, %2, 2f\n"
- " amoswap.w %0, %3, (%1)\n"
- "2:"
+ __asm__ __volatile__("lw %0, 0(%1)\n"
+ "bne %0, %2, 1f\n"
+ "amoswap.w %0, %3, (%1)\n"
+ "1:"
: "=&r"(prevVal)
: "r"(v), "r"(oldVal), "r"(val)
: "memory");
diff --git a/arch/risc-v/riscv32/gcc/los_arch_atomic.h b/arch/risc-v/riscv32/gcc/los_arch_atomic.h
index 56babd44..5f409a20 100644
--- a/arch/risc-v/riscv32/gcc/los_arch_atomic.h
+++ b/arch/risc-v/riscv32/gcc/los_arch_atomic.h
@@ -187,10 +187,10 @@ STATIC INLINE BOOL ArchAtomicCmpXchg32bits(volatile INT32 *v, INT32 val, INT32 o
UINT32 intSave;
intSave = LOS_IntLock();
- __asm__ __volatile__("1: lw %0, 0(%1)\n"
- " bne %0, %2, 2f\n"
- " amoswap.w %0, %3, (%1)\n"
- "2:"
+ __asm__ __volatile__("lw %0, 0(%1)\n"
+ "bne %0, %2, 1f\n"
+ "amoswap.w %0, %3, (%1)\n"
+ "1:"
: "=&r"(prevVal)
: "r"(v), "r"(oldVal), "r"(val)
: "memory");
diff --git a/arch/xtensa/lx6/gcc/los_arch_atomic.h b/arch/xtensa/lx6/gcc/los_arch_atomic.h
index af10f55c..4c16ae5d 100644
--- a/arch/xtensa/lx6/gcc/los_arch_atomic.h
+++ b/arch/xtensa/lx6/gcc/los_arch_atomic.h
@@ -43,15 +43,11 @@ extern "C" {
STATIC INLINE INT32 ArchAtomicRead(const Atomic *v)
{
INT32 val;
- UINT32 intSave;
-
- intSave = LOS_IntLock();
__asm__ __volatile__("l32ai %0, %1, 0\n"
: "=&a"(val)
: "a"(v)
: "memory");
- LOS_IntRestore(intSave);
return val;
}
@@ -59,34 +55,30 @@ STATIC INLINE INT32 ArchAtomicRead(const Atomic *v)
STATIC INLINE VOID ArchAtomicSet(Atomic *v, INT32 setVal)
{
INT32 val;
- UINT32 intSave;
- intSave = LOS_IntLock();
-
- __asm__ __volatile__("l32ai %0, %2, 0\n"
- "wsr %0, SCOMPARE1\n"
- "s32c1i %3, %1"
+ __asm__ __volatile__("1:l32ai %0, %2, 0\n"
+ " wsr %0, SCOMPARE1\n"
+ " s32c1i %3, %1\n"
+ " bne %3, %0, 1b"
: "=&a"(val), "+m"(*v)
: "a"(v), "a"(setVal)
: "memory");
- LOS_IntRestore(intSave);
}
STATIC INLINE INT32 ArchAtomicAdd(Atomic *v, INT32 addVal)
{
INT32 val;
- UINT32 intSave;
-
- intSave = LOS_IntLock();
-
- __asm__ __volatile__("l32ai %0, %2, 0\n"
- "wsr %0, SCOMPARE1\n"
- "add %0, %0, %3\n"
- "s32c1i %0, %1\n"
- : "=&a"(val), "+m"(*v)
+ INT32 tmp;
+
+ __asm__ __volatile__("1:l32ai %0, %3, 0\n"
+ " wsr %0, SCOMPARE1\n"
+ " mov %1, %0\n"
+ " add %0, %0, %4\n"
+ " s32c1i %0, %2\n"
+ " bne %0, %1, 1b"
+ : "=&a"(val), "=&a"(tmp), "+m"(*v)
: "a"(v), "a"(addVal)
: "memory");
- LOS_IntRestore(intSave);
return *v;
}
@@ -94,18 +86,17 @@ STATIC INLINE INT32 ArchAtomicAdd(Atomic *v, INT32 addVal)
STATIC INLINE INT32 ArchAtomicSub(Atomic *v, INT32 subVal)
{
INT32 val;
- UINT32 intSave;
-
- intSave = LOS_IntLock();
-
- __asm__ __volatile__("l32ai %0, %2, 0\n"
- "wsr %0, SCOMPARE1\n"
- "sub %0, %0, %3\n"
- "s32c1i %0, %1\n"
- : "=&a"(val), "+m"(*v)
+ INT32 tmp;
+
+ __asm__ __volatile__("1:l32ai %0, %3, 0\n"
+ " wsr %0, SCOMPARE1\n"
+ " mov %1, %0\n"
+ " sub %0, %0, %4\n"
+ " s32c1i %0, %2\n"
+ " bne %0, %1, 1b"
+ : "=&a"(val), "=&a"(tmp), "+m"(*v)
: "a"(v), "a"(subVal)
: "memory");
- LOS_IntRestore(intSave);
return *v;
}
@@ -150,17 +141,16 @@ STATIC INLINE INT32 ArchAtomicDecRet(Atomic *v)
STATIC INLINE INT32 ArchAtomicXchg32bits(volatile INT32 *v, INT32 val)
{
INT32 prevVal = 0;
- UINT32 intSave;
-
- intSave = LOS_IntLock();
-
- __asm__ __volatile__("l32ai %0, %2, 0\n"
- "wsr %0, SCOMPARE1\n"
- "s32c1i %3, %1\n"
- : "=&a"(prevVal), "+m"(*v)
+ INT32 tmp;
+
+ __asm__ __volatile__("1:l32ai %0, %3, 0\n"
+ " wsr %0, SCOMPARE1\n"
+ " mov %1, %0\n"
+ " s32c1i %4, %2\n"
+ " bne %4, %1, 1b"
+ : "=&a"(prevVal), "=&a"(tmp), "+m"(*v)
: "a"(v), "a"(val)
: "memory");
- LOS_IntRestore(intSave);
return prevVal;
}
@@ -187,19 +177,15 @@ STATIC INLINE INT32 ArchAtomicXchg32bits(volatile INT32 *v, INT32 val)
STATIC INLINE BOOL ArchAtomicCmpXchg32bits(volatile INT32 *v, INT32 val, INT32 oldVal)
{
INT32 prevVal = 0;
- UINT32 intSave;
-
- intSave = LOS_IntLock();
__asm__ __volatile__("l32ai %0, %2, 0\n"
"wsr %0, SCOMPARE1\n"
- "bne %0, %3, 2f\n"
+ "bne %0, %3, 1f\n"
"s32c1i %4, %1\n"
- "2:\n"
+ "1:"
: "=&a"(prevVal), "+m"(*v)
: "a"(v), "a"(oldVal), "a"(val)
: "cc");
- LOS_IntRestore(intSave);
return prevVal != oldVal;
}
--
GitLab