提交 3226aad8 编写于 作者: M Michael S. Tsirkin

sh: support 1 and 2 byte xchg

This completes the xchg implementation for sh architecture.  Note: The
llsc variant is tricky since this only supports 4 byte atomics, the
existing implementation of 1 byte xchg is wrong: we need to do a 4 byte
cmpxchg and retry if any bytes changed meanwhile.

Write this in C for clarity.
Suggested-by: NRich Felker <dalias@libc.org>
Signed-off-by: NMichael S. Tsirkin <mst@redhat.com>
Acked-by: NPeter Zijlstra (Intel) <peterz@infradead.org>
上级 a6596127
......@@ -23,6 +23,28 @@ static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val)
return retval;
}
static inline unsigned long xchg_u16(volatile u16 *m, unsigned long val)
{
unsigned long retval;
__asm__ __volatile__ (
" .align 2 \n\t"
" mova 1f, r0 \n\t" /* r0 = end point */
" mov r15, r1 \n\t" /* r1 = saved sp */
" mov #-6, r15 \n\t" /* LOGIN */
" mov.w @%1, %0 \n\t" /* load old value */
" extu.w %0, %0 \n\t" /* extend as unsigned */
" mov.w %2, @%1 \n\t" /* store new value */
"1: mov r1, r15 \n\t" /* LOGOUT */
: "=&r" (retval),
"+r" (m),
"+r" (val) /* inhibit r15 overloading */
:
: "memory" , "r0", "r1");
return retval;
}
static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val)
{
unsigned long retval;
......
......@@ -14,6 +14,17 @@ static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val)
return retval;
}
static inline unsigned long xchg_u16(volatile u16 *m, unsigned long val)
{
unsigned long flags, retval;
local_irq_save(flags);
retval = *m;
*m = val;
local_irq_restore(flags);
return retval;
}
static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val)
{
unsigned long flags, retval;
......
#ifndef __ASM_SH_CMPXCHG_LLSC_H
#define __ASM_SH_CMPXCHG_LLSC_H
#include <linux/bitops.h>
#include <asm/byteorder.h>
static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val)
{
unsigned long retval;
......@@ -22,29 +25,8 @@ static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val)
return retval;
}
static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val)
{
unsigned long retval;
unsigned long tmp;
__asm__ __volatile__ (
"1: \n\t"
"movli.l @%2, %0 ! xchg_u8 \n\t"
"mov %0, %1 \n\t"
"mov %3, %0 \n\t"
"movco.l %0, @%2 \n\t"
"bf 1b \n\t"
"synco \n\t"
: "=&z"(tmp), "=&r" (retval)
: "r" (m), "r" (val & 0xff)
: "t", "memory"
);
return retval;
}
static inline unsigned long
__cmpxchg_u32(volatile int *m, unsigned long old, unsigned long new)
__cmpxchg_u32(volatile u32 *m, unsigned long old, unsigned long new)
{
unsigned long retval;
unsigned long tmp;
......@@ -68,4 +50,36 @@ __cmpxchg_u32(volatile int *m, unsigned long old, unsigned long new)
return retval;
}
static inline u32 __xchg_cmpxchg(volatile void *ptr, u32 x, int size)
{
int off = (unsigned long)ptr % sizeof(u32);
volatile u32 *p = ptr - off;
#ifdef __BIG_ENDIAN
int bitoff = (sizeof(u32) - 1 - off) * BITS_PER_BYTE;
#else
int bitoff = off * BITS_PER_BYTE;
#endif
u32 bitmask = ((0x1 << size * BITS_PER_BYTE) - 1) << bitoff;
u32 oldv, newv;
u32 ret;
do {
oldv = READ_ONCE(*p);
ret = (oldv & bitmask) >> bitoff;
newv = (oldv & ~bitmask) | (x << bitoff);
} while (__cmpxchg_u32(p, oldv, newv) != oldv);
return ret;
}
static inline unsigned long xchg_u16(volatile u16 *m, unsigned long val)
{
return __xchg_cmpxchg(m, val, sizeof *m);
}
static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val)
{
return __xchg_cmpxchg(m, val, sizeof *m);
}
#endif /* __ASM_SH_CMPXCHG_LLSC_H */
......@@ -27,6 +27,9 @@ extern void __xchg_called_with_bad_pointer(void);
case 4: \
__xchg__res = xchg_u32(__xchg_ptr, x); \
break; \
case 2: \
__xchg__res = xchg_u16(__xchg_ptr, x); \
break; \
case 1: \
__xchg__res = xchg_u8(__xchg_ptr, x); \
break; \
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册