cmpxchg_64.h 5.1 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
2 3 4 5 6 7 8 9
/* 64-bit atomic xchg() and cmpxchg() definitions.
 *
 * Copyright (C) 1996, 1997, 2000 David S. Miller (davem@redhat.com)
 */

#ifndef __ARCH_SPARC64_CMPXCHG__
#define __ARCH_SPARC64_CMPXCHG__

10 11 12 13 14 15 16 17 18 19 20
static inline unsigned long
__cmpxchg_u32(volatile int *m, int old, int new)
{
	__asm__ __volatile__("cas [%2], %3, %0"
			     : "=&r" (new)
			     : "0" (new), "r" (m), "r" (old)
			     : "memory");

	return new;
}

21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54
static inline unsigned long xchg32(__volatile__ unsigned int *m, unsigned int val)
{
	unsigned long tmp1, tmp2;

	__asm__ __volatile__(
"	mov		%0, %1\n"
"1:	lduw		[%4], %2\n"
"	cas		[%4], %2, %0\n"
"	cmp		%2, %0\n"
"	bne,a,pn	%%icc, 1b\n"
"	 mov		%1, %0\n"
	: "=&r" (val), "=&r" (tmp1), "=&r" (tmp2)
	: "0" (val), "r" (m)
	: "cc", "memory");
	return val;
}

static inline unsigned long xchg64(__volatile__ unsigned long *m, unsigned long val)
{
	unsigned long tmp1, tmp2;

	__asm__ __volatile__(
"	mov		%0, %1\n"
"1:	ldx		[%4], %2\n"
"	casx		[%4], %2, %0\n"
"	cmp		%2, %0\n"
"	bne,a,pn	%%xcc, 1b\n"
"	 mov		%1, %0\n"
	: "=&r" (val), "=&r" (tmp1), "=&r" (tmp2)
	: "0" (val), "r" (m)
	: "cc", "memory");
	return val;
}

55 56 57 58 59 60
#define xchg(ptr,x)							\
({	__typeof__(*(ptr)) __ret;					\
	__ret = (__typeof__(*(ptr)))					\
		__xchg((unsigned long)(x), (ptr), sizeof(*(ptr)));	\
	__ret;								\
})
61

62
void __xchg_called_with_bad_pointer(void);
63

64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89
/*
 * Use 4 byte cas instruction to achieve 2 byte xchg. Main logic
 * here is to get the bit shift of the byte we are interested in.
 * The XOR is handy for reversing the bits for big-endian byte order.
 */
static inline unsigned long
xchg16(__volatile__ unsigned short *m, unsigned short val)
{
	unsigned long maddr = (unsigned long)m;
	int bit_shift = (((unsigned long)m & 2) ^ 2) << 3;
	unsigned int mask = 0xffff << bit_shift;
	unsigned int *ptr = (unsigned int  *) (maddr & ~2);
	unsigned int old32, new32, load32;

	/* Read the old value */
	load32 = *ptr;

	do {
		old32 = load32;
		new32 = (load32 & (~mask)) | val << bit_shift;
		load32 = __cmpxchg_u32(ptr, old32, new32);
	} while (load32 != old32);

	return (load32 & mask) >> bit_shift;
}

90 91 92 93
static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr,
				       int size)
{
	switch (size) {
94 95
	case 2:
		return xchg16(ptr, x);
96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124
	case 4:
		return xchg32(ptr, x);
	case 8:
		return xchg64(ptr, x);
	}
	__xchg_called_with_bad_pointer();
	return x;
}

/*
 * Atomic compare and exchange.  Compare OLD with MEM, if identical,
 * store NEW in MEM.  Return the initial value in MEM.  Success is
 * indicated by comparing RETURN with OLD.
 */

#include <asm-generic/cmpxchg-local.h>


static inline unsigned long
__cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new)
{
	__asm__ __volatile__("casx [%2], %3, %0"
			     : "=&r" (new)
			     : "0" (new), "r" (m), "r" (old)
			     : "memory");

	return new;
}

125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151
/*
 * Use 4 byte cas instruction to achieve 1 byte cmpxchg. Main logic
 * here is to get the bit shift of the byte we are interested in.
 * The XOR is handy for reversing the bits for big-endian byte order
 */
static inline unsigned long
__cmpxchg_u8(volatile unsigned char *m, unsigned char old, unsigned char new)
{
	unsigned long maddr = (unsigned long)m;
	int bit_shift = (((unsigned long)m & 3) ^ 3) << 3;
	unsigned int mask = 0xff << bit_shift;
	unsigned int *ptr = (unsigned int *) (maddr & ~3);
	unsigned int old32, new32, load;
	unsigned int load32 = *ptr;

	do {
		new32 = (load32 & ~mask) | (new << bit_shift);
		old32 = (load32 & ~mask) | (old << bit_shift);
		load32 = __cmpxchg_u32(ptr, old32, new32);
		if (load32 == old32)
			return old;
		load = (load32 & mask) >> bit_shift;
	} while (load == old);

	return load;
}

152 153
/* This function doesn't exist, so you'll get a linker error
   if something tries to do an invalid cmpxchg().  */
154
void __cmpxchg_called_with_bad_pointer(void);
155 156 157 158 159

static inline unsigned long
__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
{
	switch (size) {
160 161
		case 1:
			return __cmpxchg_u8(ptr, old, new);
162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205
		case 4:
			return __cmpxchg_u32(ptr, old, new);
		case 8:
			return __cmpxchg_u64(ptr, old, new);
	}
	__cmpxchg_called_with_bad_pointer();
	return old;
}

#define cmpxchg(ptr,o,n)						 \
  ({									 \
     __typeof__(*(ptr)) _o_ = (o);					 \
     __typeof__(*(ptr)) _n_ = (n);					 \
     (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_,		 \
				    (unsigned long)_n_, sizeof(*(ptr))); \
  })

/*
 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
 * them available.
 */

static inline unsigned long __cmpxchg_local(volatile void *ptr,
				      unsigned long old,
				      unsigned long new, int size)
{
	switch (size) {
	case 4:
	case 8:	return __cmpxchg(ptr, old, new, size);
	default:
		return __cmpxchg_local_generic(ptr, old, new, size);
	}

	return old;
}

#define cmpxchg_local(ptr, o, n)				  	\
	((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o),	\
			(unsigned long)(n), sizeof(*(ptr))))
#define cmpxchg64_local(ptr, o, n)					\
  ({									\
	BUILD_BUG_ON(sizeof(*(ptr)) != 8);				\
	cmpxchg_local((ptr), (o), (n));					\
  })
206
#define cmpxchg64(ptr, o, n)	cmpxchg64_local((ptr), (o), (n))
207 208

#endif /* __ARCH_SPARC64_CMPXCHG__ */