提交 219b1e4c 编写于 作者: M Max Filippov 提交者: Chris Zankel

xtensa: add s32c1i-based atomic ops implementations

Signed-off-by: NMax Filippov <jcmvbkbc@gmail.com>
Signed-off-by: NChris Zankel <chris@zankel.net>
上级 00273125
......@@ -66,19 +66,35 @@
*/
static inline void atomic_add(int i, atomic_t * v)
{
unsigned int vval;
__asm__ __volatile__(
"rsil a15, "__stringify(LOCKLEVEL)"\n\t"
"l32i %0, %2, 0 \n\t"
"add %0, %0, %1 \n\t"
"s32i %0, %2, 0 \n\t"
"wsr a15, ps \n\t"
"rsync \n"
: "=&a" (vval)
: "a" (i), "a" (v)
: "a15", "memory"
);
#if XCHAL_HAVE_S32C1I
unsigned long tmp;
int result;
__asm__ __volatile__(
"1: l32i %1, %3, 0\n"
" wsr %1, scompare1\n"
" add %0, %1, %2\n"
" s32c1i %0, %3, 0\n"
" bne %0, %1, 1b\n"
: "=&a" (result), "=&a" (tmp)
: "a" (i), "a" (v)
: "memory"
);
#else
unsigned int vval;
__asm__ __volatile__(
" rsil a15, "__stringify(LOCKLEVEL)"\n"
" l32i %0, %2, 0\n"
" add %0, %0, %1\n"
" s32i %0, %2, 0\n"
" wsr a15, ps\n"
" rsync\n"
: "=&a" (vval)
: "a" (i), "a" (v)
: "a15", "memory"
);
#endif
}
/**
......@@ -90,19 +106,35 @@ static inline void atomic_add(int i, atomic_t * v)
*/
static inline void atomic_sub(int i, atomic_t *v)
{
unsigned int vval;
__asm__ __volatile__(
"rsil a15, "__stringify(LOCKLEVEL)"\n\t"
"l32i %0, %2, 0 \n\t"
"sub %0, %0, %1 \n\t"
"s32i %0, %2, 0 \n\t"
"wsr a15, ps \n\t"
"rsync \n"
: "=&a" (vval)
: "a" (i), "a" (v)
: "a15", "memory"
);
#if XCHAL_HAVE_S32C1I
unsigned long tmp;
int result;
__asm__ __volatile__(
"1: l32i %1, %3, 0\n"
" wsr %1, scompare1\n"
" sub %0, %1, %2\n"
" s32c1i %0, %3, 0\n"
" bne %0, %1, 1b\n"
: "=&a" (result), "=&a" (tmp)
: "a" (i), "a" (v)
: "memory"
);
#else
unsigned int vval;
__asm__ __volatile__(
" rsil a15, "__stringify(LOCKLEVEL)"\n"
" l32i %0, %2, 0\n"
" sub %0, %0, %1\n"
" s32i %0, %2, 0\n"
" wsr a15, ps\n"
" rsync\n"
: "=&a" (vval)
: "a" (i), "a" (v)
: "a15", "memory"
);
#endif
}
/*
......@@ -111,40 +143,78 @@ static inline void atomic_sub(int i, atomic_t *v)
static inline int atomic_add_return(int i, atomic_t * v)
{
unsigned int vval;
__asm__ __volatile__(
"rsil a15,"__stringify(LOCKLEVEL)"\n\t"
"l32i %0, %2, 0 \n\t"
"add %0, %0, %1 \n\t"
"s32i %0, %2, 0 \n\t"
"wsr a15, ps \n\t"
"rsync \n"
: "=&a" (vval)
: "a" (i), "a" (v)
: "a15", "memory"
);
return vval;
#if XCHAL_HAVE_S32C1I
unsigned long tmp;
int result;
__asm__ __volatile__(
"1: l32i %1, %3, 0\n"
" wsr %1, scompare1\n"
" add %0, %1, %2\n"
" s32c1i %0, %3, 0\n"
" bne %0, %1, 1b\n"
" add %0, %0, %2\n"
: "=&a" (result), "=&a" (tmp)
: "a" (i), "a" (v)
: "memory"
);
return result;
#else
unsigned int vval;
__asm__ __volatile__(
" rsil a15,"__stringify(LOCKLEVEL)"\n"
" l32i %0, %2, 0\n"
" add %0, %0, %1\n"
" s32i %0, %2, 0\n"
" wsr a15, ps\n"
" rsync\n"
: "=&a" (vval)
: "a" (i), "a" (v)
: "a15", "memory"
);
return vval;
#endif
}
static inline int atomic_sub_return(int i, atomic_t * v)
{
unsigned int vval;
__asm__ __volatile__(
"rsil a15,"__stringify(LOCKLEVEL)"\n\t"
"l32i %0, %2, 0 \n\t"
"sub %0, %0, %1 \n\t"
"s32i %0, %2, 0 \n\t"
"wsr a15, ps \n\t"
"rsync \n"
: "=&a" (vval)
: "a" (i), "a" (v)
: "a15", "memory"
);
return vval;
#if XCHAL_HAVE_S32C1I
unsigned long tmp;
int result;
__asm__ __volatile__(
"1: l32i %1, %3, 0\n"
" wsr %1, scompare1\n"
" sub %0, %1, %2\n"
" s32c1i %0, %3, 0\n"
" bne %0, %1, 1b\n"
" sub %0, %0, %2\n"
: "=&a" (result), "=&a" (tmp)
: "a" (i), "a" (v)
: "memory"
);
return result;
#else
unsigned int vval;
__asm__ __volatile__(
" rsil a15,"__stringify(LOCKLEVEL)"\n"
" l32i %0, %2, 0\n"
" sub %0, %0, %1\n"
" s32i %0, %2, 0\n"
" wsr a15, ps\n"
" rsync\n"
: "=&a" (vval)
: "a" (i), "a" (v)
: "a15", "memory"
);
return vval;
#endif
}
/**
......@@ -251,38 +321,70 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
{
unsigned int all_f = -1;
unsigned int vval;
__asm__ __volatile__(
"rsil a15,"__stringify(LOCKLEVEL)"\n\t"
"l32i %0, %2, 0 \n\t"
"xor %1, %4, %3 \n\t"
"and %0, %0, %4 \n\t"
"s32i %0, %2, 0 \n\t"
"wsr a15, ps \n\t"
"rsync \n"
: "=&a" (vval), "=a" (mask)
: "a" (v), "a" (all_f), "1" (mask)
: "a15", "memory"
);
#if XCHAL_HAVE_S32C1I
unsigned long tmp;
int result;
__asm__ __volatile__(
"1: l32i %1, %3, 0\n"
" wsr %1, scompare1\n"
" and %0, %1, %2\n"
" s32c1i %0, %3, 0\n"
" bne %0, %1, 1b\n"
: "=&a" (result), "=&a" (tmp)
: "a" (~mask), "a" (v)
: "memory"
);
#else
unsigned int all_f = -1;
unsigned int vval;
__asm__ __volatile__(
" rsil a15,"__stringify(LOCKLEVEL)"\n"
" l32i %0, %2, 0\n"
" xor %1, %4, %3\n"
" and %0, %0, %4\n"
" s32i %0, %2, 0\n"
" wsr a15, ps\n"
" rsync\n"
: "=&a" (vval), "=a" (mask)
: "a" (v), "a" (all_f), "1" (mask)
: "a15", "memory"
);
#endif
}
static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
{
unsigned int vval;
__asm__ __volatile__(
"rsil a15,"__stringify(LOCKLEVEL)"\n\t"
"l32i %0, %2, 0 \n\t"
"or %0, %0, %1 \n\t"
"s32i %0, %2, 0 \n\t"
"wsr a15, ps \n\t"
"rsync \n"
: "=&a" (vval)
: "a" (mask), "a" (v)
: "a15", "memory"
);
#if XCHAL_HAVE_S32C1I
unsigned long tmp;
int result;
__asm__ __volatile__(
"1: l32i %1, %3, 0\n"
" wsr %1, scompare1\n"
" or %0, %1, %2\n"
" s32c1i %0, %3, 0\n"
" bne %0, %1, 1b\n"
: "=&a" (result), "=&a" (tmp)
: "a" (mask), "a" (v)
: "memory"
);
#else
unsigned int vval;
__asm__ __volatile__(
" rsil a15,"__stringify(LOCKLEVEL)"\n"
" l32i %0, %2, 0\n"
" or %0, %0, %1\n"
" s32i %0, %2, 0\n"
" wsr a15, ps\n"
" rsync\n"
: "=&a" (vval)
: "a" (mask), "a" (v)
: "a15", "memory"
);
#endif
}
/* Atomic operations are already serializing */
......@@ -294,4 +396,3 @@ static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
#endif /* __KERNEL__ */
#endif /* _XTENSA_ATOMIC_H */
......@@ -22,17 +22,30 @@
static inline unsigned long
__cmpxchg_u32(volatile int *p, int old, int new)
{
__asm__ __volatile__("rsil a15, "__stringify(LOCKLEVEL)"\n\t"
"l32i %0, %1, 0 \n\t"
"bne %0, %2, 1f \n\t"
"s32i %3, %1, 0 \n\t"
"1: \n\t"
"wsr a15, ps \n\t"
"rsync \n\t"
: "=&a" (old)
: "a" (p), "a" (old), "r" (new)
: "a15", "memory");
return old;
#if XCHAL_HAVE_S32C1I
__asm__ __volatile__(
" wsr %2, scompare1\n"
" s32c1i %0, %1, 0\n"
: "+a" (new)
: "a" (p), "a" (old)
: "memory"
);
return new;
#else
__asm__ __volatile__(
" rsil a15, "__stringify(LOCKLEVEL)"\n"
" l32i %0, %1, 0\n"
" bne %0, %2, 1f\n"
" s32i %3, %1, 0\n"
"1:\n"
" wsr a15, ps\n"
" rsync\n"
: "=&a" (old)
: "a" (p), "a" (old), "r" (new)
: "a15", "memory");
return old;
#endif
}
/* This function doesn't exist, so you'll get a linker error
* if something tries to do an invalid cmpxchg(). */
......@@ -93,16 +106,32 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
{
unsigned long tmp;
__asm__ __volatile__("rsil a15, "__stringify(LOCKLEVEL)"\n\t"
"l32i %0, %1, 0 \n\t"
"s32i %2, %1, 0 \n\t"
"wsr a15, ps \n\t"
"rsync \n\t"
: "=&a" (tmp)
: "a" (m), "a" (val)
: "a15", "memory");
return tmp;
#if XCHAL_HAVE_S32C1I
unsigned long tmp, result;
__asm__ __volatile__(
"1: l32i %1, %2, 0\n"
" mov %0, %3\n"
" wsr %1, scompare1\n"
" s32c1i %0, %2, 0\n"
" bne %0, %1, 1b\n"
: "=&a" (result), "=&a" (tmp)
: "a" (m), "a" (val)
: "memory"
);
return result;
#else
unsigned long tmp;
__asm__ __volatile__(
" rsil a15, "__stringify(LOCKLEVEL)"\n"
" l32i %0, %1, 0\n"
" s32i %2, %1, 0\n"
" wsr a15, ps\n"
" rsync\n"
: "=&a" (tmp)
: "a" (m), "a" (val)
: "a15", "memory");
return tmp;
#endif
}
#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册