cmpxchg.h 3.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
/*
 * Based on arch/arm/include/asm/cmpxchg.h
 *
 * Copyright (C) 2012 ARM Ltd.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 */
#ifndef __ASM_CMPXCHG_H
#define __ASM_CMPXCHG_H

#include <linux/bug.h>

#include <asm/barrier.h>

static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
{
	unsigned long ret, tmp;

	switch (size) {
	case 1:
		asm volatile("//	__xchg1\n"
32
		"1:	ldxrb	%w0, %2\n"
33
		"	stlxrb	%w1, %w3, %2\n"
34
		"	cbnz	%w1, 1b\n"
35 36 37
			: "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr)
			: "r" (x)
			: "cc", "memory");
38 39 40
		break;
	case 2:
		asm volatile("//	__xchg2\n"
41
		"1:	ldxrh	%w0, %2\n"
42
		"	stlxrh	%w1, %w3, %2\n"
43
		"	cbnz	%w1, 1b\n"
44 45 46
			: "=&r" (ret), "=&r" (tmp), "+Q" (*(u16 *)ptr)
			: "r" (x)
			: "cc", "memory");
47 48 49
		break;
	case 4:
		asm volatile("//	__xchg4\n"
50
		"1:	ldxr	%w0, %2\n"
51
		"	stlxr	%w1, %w3, %2\n"
52
		"	cbnz	%w1, 1b\n"
53 54 55
			: "=&r" (ret), "=&r" (tmp), "+Q" (*(u32 *)ptr)
			: "r" (x)
			: "cc", "memory");
56 57 58
		break;
	case 8:
		asm volatile("//	__xchg8\n"
59
		"1:	ldxr	%0, %2\n"
60
		"	stlxr	%w1, %3, %2\n"
61
		"	cbnz	%w1, 1b\n"
62 63 64
			: "=&r" (ret), "=&r" (tmp), "+Q" (*(u64 *)ptr)
			: "r" (x)
			: "cc", "memory");
65 66 67 68 69
		break;
	default:
		BUILD_BUG();
	}

70
	smp_mb();
71 72 73 74 75 76 77 78 79 80 81 82 83 84 85
	return ret;
}

#define xchg(ptr,x) \
	((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))

static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
				      unsigned long new, int size)
{
	unsigned long oldval = 0, res;

	switch (size) {
	case 1:
		do {
			asm volatile("// __cmpxchg1\n"
86
			"	ldxrb	%w1, %2\n"
87 88 89
			"	mov	%w0, #0\n"
			"	cmp	%w1, %w3\n"
			"	b.ne	1f\n"
90
			"	stxrb	%w0, %w4, %2\n"
91
			"1:\n"
92 93
				: "=&r" (res), "=&r" (oldval), "+Q" (*(u8 *)ptr)
				: "Ir" (old), "r" (new)
94 95 96 97 98 99 100
				: "cc");
		} while (res);
		break;

	case 2:
		do {
			asm volatile("// __cmpxchg2\n"
101
			"	ldxrh	%w1, %2\n"
102 103 104
			"	mov	%w0, #0\n"
			"	cmp	%w1, %w3\n"
			"	b.ne	1f\n"
105
			"	stxrh	%w0, %w4, %2\n"
106
			"1:\n"
107 108 109
				: "=&r" (res), "=&r" (oldval), "+Q" (*(u16 *)ptr)
				: "Ir" (old), "r" (new)
				: "cc");
110 111 112 113 114 115
		} while (res);
		break;

	case 4:
		do {
			asm volatile("// __cmpxchg4\n"
116
			"	ldxr	%w1, %2\n"
117 118 119
			"	mov	%w0, #0\n"
			"	cmp	%w1, %w3\n"
			"	b.ne	1f\n"
120
			"	stxr	%w0, %w4, %2\n"
121
			"1:\n"
122 123
				: "=&r" (res), "=&r" (oldval), "+Q" (*(u32 *)ptr)
				: "Ir" (old), "r" (new)
124 125 126 127 128 129 130
				: "cc");
		} while (res);
		break;

	case 8:
		do {
			asm volatile("// __cmpxchg8\n"
131
			"	ldxr	%1, %2\n"
132 133 134
			"	mov	%w0, #0\n"
			"	cmp	%1, %3\n"
			"	b.ne	1f\n"
135
			"	stxr	%w0, %4, %2\n"
136
			"1:\n"
137 138
				: "=&r" (res), "=&r" (oldval), "+Q" (*(u64 *)ptr)
				: "Ir" (old), "r" (new)
139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161
				: "cc");
		} while (res);
		break;

	default:
		BUILD_BUG();
	}

	return oldval;
}

static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
					 unsigned long new, int size)
{
	unsigned long ret;

	smp_mb();
	ret = __cmpxchg(ptr, old, new, size);
	smp_mb();

	return ret;
}

162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178
#define cmpxchg(ptr, o, n) \
({ \
	__typeof__(*(ptr)) __ret; \
	__ret = (__typeof__(*(ptr))) \
		__cmpxchg_mb((ptr), (unsigned long)(o), (unsigned long)(n), \
			     sizeof(*(ptr))); \
	__ret; \
})

#define cmpxchg_local(ptr, o, n) \
({ \
	__typeof__(*(ptr)) __ret; \
	__ret = (__typeof__(*(ptr))) \
		__cmpxchg((ptr), (unsigned long)(o), \
			  (unsigned long)(n), sizeof(*(ptr))); \
	__ret; \
})
179

180 181 182
#define cmpxchg64(ptr,o,n)		cmpxchg((ptr),(o),(n))
#define cmpxchg64_local(ptr,o,n)	cmpxchg_local((ptr),(o),(n))

183 184
#define cmpxchg64_relaxed(ptr,o,n)	cmpxchg_local((ptr),(o),(n))

185
#endif	/* __ASM_CMPXCHG_H */