atomic.h 7.7 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3
#ifndef _ALPHA_ATOMIC_H
#define _ALPHA_ATOMIC_H

4
#include <linux/types.h>
5
#include <asm/barrier.h>
6
#include <asm/cmpxchg.h>
7

L
Linus Torvalds 已提交
8 9 10 11 12 13 14 15 16
/*
 * Atomic operations that C can't guarantee us.  Useful for
 * resource counting etc...
 *
 * But use these as seldom as possible since they are much slower
 * than regular operations.
 */


17 18
#define ATOMIC_INIT(i)		{ (i) }
#define ATOMIC64_INIT(i)	{ (i) }
L
Linus Torvalds 已提交
19

20 21
#define atomic_read(v)		READ_ONCE((v)->counter)
#define atomic64_read(v)	READ_ONCE((v)->counter)
L
Linus Torvalds 已提交
22

23 24
#define atomic_set(v,i)		WRITE_ONCE((v)->counter, (i))
#define atomic64_set(v,i)	WRITE_ONCE((v)->counter, (i))
L
Linus Torvalds 已提交
25 26 27 28 29 30 31

/*
 * To get proper branch prediction for the main line, we must branch
 * forward to code at the end of this object's .text section, then
 * branch back to restart the operation.
 */

32
#define ATOMIC_OP(op, asm_op)						\
33 34 35 36 37
static __inline__ void atomic_##op(int i, atomic_t * v)			\
{									\
	unsigned long temp;						\
	__asm__ __volatile__(						\
	"1:	ldl_l %0,%1\n"						\
38
	"	" #asm_op " %0,%2,%0\n"					\
39 40 41 42 43 44 45 46 47
	"	stl_c %0,%1\n"						\
	"	beq %0,2f\n"						\
	".subsection 2\n"						\
	"2:	br 1b\n"						\
	".previous"							\
	:"=&r" (temp), "=m" (v->counter)				\
	:"Ir" (i), "m" (v->counter));					\
}									\

48
#define ATOMIC_OP_RETURN(op, asm_op)					\
49 50 51 52 53 54
static inline int atomic_##op##_return(int i, atomic_t *v)		\
{									\
	long temp, result;						\
	smp_mb();							\
	__asm__ __volatile__(						\
	"1:	ldl_l %0,%1\n"						\
55 56
	"	" #asm_op " %0,%3,%2\n"					\
	"	" #asm_op " %0,%3,%0\n"					\
57 58 59 60 61 62 63 64 65
	"	stl_c %0,%1\n"						\
	"	beq %0,2f\n"						\
	".subsection 2\n"						\
	"2:	br 1b\n"						\
	".previous"							\
	:"=&r" (temp), "=m" (v->counter), "=&r" (result)		\
	:"Ir" (i), "m" (v->counter) : "memory");			\
	smp_mb();							\
	return result;							\
L
Linus Torvalds 已提交
66 67
}

68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86
#define ATOMIC_FETCH_OP(op, asm_op)					\
static inline int atomic_fetch_##op(int i, atomic_t *v)			\
{									\
	long temp, result;						\
	smp_mb();							\
	__asm__ __volatile__(						\
	"1:	ldl_l %2,%1\n"						\
	"	" #asm_op " %2,%3,%0\n"					\
	"	stl_c %0,%1\n"						\
	"	beq %0,2f\n"						\
	".subsection 2\n"						\
	"2:	br 1b\n"						\
	".previous"							\
	:"=&r" (temp), "=m" (v->counter), "=&r" (result)		\
	:"Ir" (i), "m" (v->counter) : "memory");			\
	smp_mb();							\
	return result;							\
}

87
#define ATOMIC64_OP(op, asm_op)						\
88 89 90 91 92
static __inline__ void atomic64_##op(long i, atomic64_t * v)		\
{									\
	unsigned long temp;						\
	__asm__ __volatile__(						\
	"1:	ldq_l %0,%1\n"						\
93
	"	" #asm_op " %0,%2,%0\n"					\
94 95 96 97 98 99 100 101 102
	"	stq_c %0,%1\n"						\
	"	beq %0,2f\n"						\
	".subsection 2\n"						\
	"2:	br 1b\n"						\
	".previous"							\
	:"=&r" (temp), "=m" (v->counter)				\
	:"Ir" (i), "m" (v->counter));					\
}									\

103
#define ATOMIC64_OP_RETURN(op, asm_op)					\
104 105 106 107 108 109
static __inline__ long atomic64_##op##_return(long i, atomic64_t * v)	\
{									\
	long temp, result;						\
	smp_mb();							\
	__asm__ __volatile__(						\
	"1:	ldq_l %0,%1\n"						\
110 111
	"	" #asm_op " %0,%3,%2\n"					\
	"	" #asm_op " %0,%3,%0\n"					\
112 113 114 115 116 117 118 119 120
	"	stq_c %0,%1\n"						\
	"	beq %0,2f\n"						\
	".subsection 2\n"						\
	"2:	br 1b\n"						\
	".previous"							\
	:"=&r" (temp), "=m" (v->counter), "=&r" (result)		\
	:"Ir" (i), "m" (v->counter) : "memory");			\
	smp_mb();							\
	return result;							\
L
Linus Torvalds 已提交
121 122
}

123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141
#define ATOMIC64_FETCH_OP(op, asm_op)					\
static __inline__ long atomic64_fetch_##op(long i, atomic64_t * v)	\
{									\
	long temp, result;						\
	smp_mb();							\
	__asm__ __volatile__(						\
	"1:	ldq_l %2,%1\n"						\
	"	" #asm_op " %2,%3,%0\n"					\
	"	stq_c %0,%1\n"						\
	"	beq %0,2f\n"						\
	".subsection 2\n"						\
	"2:	br 1b\n"						\
	".previous"							\
	:"=&r" (temp), "=m" (v->counter), "=&r" (result)		\
	:"Ir" (i), "m" (v->counter) : "memory");			\
	smp_mb();							\
	return result;							\
}

142 143 144
#define ATOMIC_OPS(op)							\
	ATOMIC_OP(op, op##l)						\
	ATOMIC_OP_RETURN(op, op##l)					\
145
	ATOMIC_FETCH_OP(op, op##l)					\
146
	ATOMIC64_OP(op, op##q)						\
147 148
	ATOMIC64_OP_RETURN(op, op##q)					\
	ATOMIC64_FETCH_OP(op, op##q)
L
Linus Torvalds 已提交
149

150 151
ATOMIC_OPS(add)
ATOMIC_OPS(sub)
L
Linus Torvalds 已提交
152

153 154 155
#define atomic_andnot atomic_andnot
#define atomic64_andnot atomic64_andnot

156 157 158 159 160 161 162 163 164 165 166
#undef ATOMIC_OPS
#define ATOMIC_OPS(op, asm)						\
	ATOMIC_OP(op, asm)						\
	ATOMIC_FETCH_OP(op, asm)					\
	ATOMIC64_OP(op, asm)						\
	ATOMIC64_FETCH_OP(op, asm)

ATOMIC_OPS(and, and)
ATOMIC_OPS(andnot, bic)
ATOMIC_OPS(or, bis)
ATOMIC_OPS(xor, xor)
167

168
#undef ATOMIC_OPS
169
#undef ATOMIC64_FETCH_OP
170 171
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
172
#undef ATOMIC_FETCH_OP
173 174
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
L
Linus Torvalds 已提交
175

176 177 178 179
#define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))

#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
180
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
N
Nick Piggin 已提交
181

182
/**
183
 * __atomic_add_unless - add unless the number is a given value
184 185 186 187 188
 * @v: pointer of type atomic_t
 * @a: the amount to add to v...
 * @u: ...unless v is equal to u.
 *
 * Atomically adds @a to @v, so long as it was not @u.
189
 * Returns the old value of @v.
190
 */
191
static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
192
{
193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210
	int c, new, old;
	smp_mb();
	__asm__ __volatile__(
	"1:	ldl_l	%[old],%[mem]\n"
	"	cmpeq	%[old],%[u],%[c]\n"
	"	addl	%[old],%[a],%[new]\n"
	"	bne	%[c],2f\n"
	"	stl_c	%[new],%[mem]\n"
	"	beq	%[new],3f\n"
	"2:\n"
	".subsection 2\n"
	"3:	br	1b\n"
	".previous"
	: [old] "=&r"(old), [new] "=&r"(new), [c] "=&r"(c)
	: [mem] "m"(*v), [a] "rI"(a), [u] "rI"((long)u)
	: "memory");
	smp_mb();
	return old;
211 212
}

N
Nick Piggin 已提交
213

214 215 216 217 218 219 220
/**
 * atomic64_add_unless - add unless the number is a given value
 * @v: pointer of type atomic64_t
 * @a: the amount to add to v...
 * @u: ...unless v is equal to u.
 *
 * Atomically adds @a to @v, so long as it was not @u.
221
 * Returns true iff @v was not @u.
222
 */
223 224
static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
{
225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242
	long c, tmp;
	smp_mb();
	__asm__ __volatile__(
	"1:	ldq_l	%[tmp],%[mem]\n"
	"	cmpeq	%[tmp],%[u],%[c]\n"
	"	addq	%[tmp],%[a],%[tmp]\n"
	"	bne	%[c],2f\n"
	"	stq_c	%[tmp],%[mem]\n"
	"	beq	%[tmp],3f\n"
	"2:\n"
	".subsection 2\n"
	"3:	br	1b\n"
	".previous"
	: [tmp] "=&r"(tmp), [c] "=&r"(c)
	: [mem] "m"(*v), [a] "rI"(a), [u] "rI"(u)
	: "memory");
	smp_mb();
	return !c;
243 244
}

245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272
/*
 * atomic64_dec_if_positive - decrement by 1 if old value positive
 * @v: pointer of type atomic_t
 *
 * The function returns the old value of *v minus 1, even if
 * the atomic variable, v, was not decremented.
 */
static inline long atomic64_dec_if_positive(atomic64_t *v)
{
	long old, tmp;
	smp_mb();
	__asm__ __volatile__(
	"1:	ldq_l	%[old],%[mem]\n"
	"	subq	%[old],1,%[tmp]\n"
	"	ble	%[old],2f\n"
	"	stq_c	%[tmp],%[mem]\n"
	"	beq	%[tmp],3f\n"
	"2:\n"
	".subsection 2\n"
	"3:	br	1b\n"
	".previous"
	: [old] "=&r"(old), [tmp] "=&r"(tmp)
	: [mem] "m"(*v)
	: "memory");
	smp_mb();
	return old - 1;
}

273 274
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)

H
Hugh Dickins 已提交
275 276 277
#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)

L
Linus Torvalds 已提交
278 279 280 281 282 283 284 285 286 287
#define atomic_dec_return(v) atomic_sub_return(1,(v))
#define atomic64_dec_return(v) atomic64_sub_return(1,(v))

#define atomic_inc_return(v) atomic_add_return(1,(v))
#define atomic64_inc_return(v) atomic64_add_return(1,(v))

#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)

#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
H
Hugh Dickins 已提交
288 289
#define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0)

L
Linus Torvalds 已提交
290 291 292 293 294 295 296 297 298 299
#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
#define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)

#define atomic_inc(v) atomic_add(1,(v))
#define atomic64_inc(v) atomic64_add(1,(v))

#define atomic_dec(v) atomic_sub(1,(v))
#define atomic64_dec(v) atomic64_sub(1,(v))

#endif /* _ALPHA_ATOMIC_H */