atomic.h 8.7 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
L
Linus Torvalds 已提交
2 3 4
#ifndef _ALPHA_ATOMIC_H
#define _ALPHA_ATOMIC_H

5
#include <linux/types.h>
6
#include <asm/barrier.h>
7
#include <asm/cmpxchg.h>
8

L
Linus Torvalds 已提交
9 10 11 12 13 14 15 16
/*
 * Atomic operations that C can't guarantee us.  Useful for
 * resource counting etc...
 *
 * But use these as seldom as possible since they are much slower
 * than regular operations.
 */

17 18 19 20 21 22 23 24 25
/*
 * To ensure dependency ordering is preserved for the _relaxed and
 * _release atomics, an smp_read_barrier_depends() is unconditionally
 * inserted into the _relaxed variants, which are used to build the
 * barriered versions. To avoid redundant back-to-back fences, we can
 * define the _acquire and _fence versions explicitly.
 */
#define __atomic_op_acquire(op, args...)	op##_relaxed(args)
#define __atomic_op_fence			__atomic_op_release
L
Linus Torvalds 已提交
26

27 28
#define ATOMIC_INIT(i)		{ (i) }
#define ATOMIC64_INIT(i)	{ (i) }
L
Linus Torvalds 已提交
29

30 31
#define atomic_read(v)		READ_ONCE((v)->counter)
#define atomic64_read(v)	READ_ONCE((v)->counter)
L
Linus Torvalds 已提交
32

33 34
#define atomic_set(v,i)		WRITE_ONCE((v)->counter, (i))
#define atomic64_set(v,i)	WRITE_ONCE((v)->counter, (i))
L
Linus Torvalds 已提交
35 36 37 38 39 40 41

/*
 * To get proper branch prediction for the main line, we must branch
 * forward to code at the end of this object's .text section, then
 * branch back to restart the operation.
 */

42
#define ATOMIC_OP(op, asm_op)						\
43 44 45 46 47
static __inline__ void atomic_##op(int i, atomic_t * v)			\
{									\
	unsigned long temp;						\
	__asm__ __volatile__(						\
	"1:	ldl_l %0,%1\n"						\
48
	"	" #asm_op " %0,%2,%0\n"					\
49 50 51 52 53 54 55 56 57
	"	stl_c %0,%1\n"						\
	"	beq %0,2f\n"						\
	".subsection 2\n"						\
	"2:	br 1b\n"						\
	".previous"							\
	:"=&r" (temp), "=m" (v->counter)				\
	:"Ir" (i), "m" (v->counter));					\
}									\

58
#define ATOMIC_OP_RETURN(op, asm_op)					\
59
static inline int atomic_##op##_return_relaxed(int i, atomic_t *v)	\
60 61 62 63
{									\
	long temp, result;						\
	__asm__ __volatile__(						\
	"1:	ldl_l %0,%1\n"						\
64 65
	"	" #asm_op " %0,%3,%2\n"					\
	"	" #asm_op " %0,%3,%0\n"					\
66 67 68 69 70 71 72
	"	stl_c %0,%1\n"						\
	"	beq %0,2f\n"						\
	".subsection 2\n"						\
	"2:	br 1b\n"						\
	".previous"							\
	:"=&r" (temp), "=m" (v->counter), "=&r" (result)		\
	:"Ir" (i), "m" (v->counter) : "memory");			\
73
	smp_read_barrier_depends();					\
74
	return result;							\
L
Linus Torvalds 已提交
75 76
}

77
#define ATOMIC_FETCH_OP(op, asm_op)					\
78
static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v)	\
79 80 81 82 83 84 85 86 87 88 89 90
{									\
	long temp, result;						\
	__asm__ __volatile__(						\
	"1:	ldl_l %2,%1\n"						\
	"	" #asm_op " %2,%3,%0\n"					\
	"	stl_c %0,%1\n"						\
	"	beq %0,2f\n"						\
	".subsection 2\n"						\
	"2:	br 1b\n"						\
	".previous"							\
	:"=&r" (temp), "=m" (v->counter), "=&r" (result)		\
	:"Ir" (i), "m" (v->counter) : "memory");			\
91
	smp_read_barrier_depends();					\
92 93 94
	return result;							\
}

95
#define ATOMIC64_OP(op, asm_op)						\
96 97 98 99 100
static __inline__ void atomic64_##op(long i, atomic64_t * v)		\
{									\
	unsigned long temp;						\
	__asm__ __volatile__(						\
	"1:	ldq_l %0,%1\n"						\
101
	"	" #asm_op " %0,%2,%0\n"					\
102 103 104 105 106 107 108 109 110
	"	stq_c %0,%1\n"						\
	"	beq %0,2f\n"						\
	".subsection 2\n"						\
	"2:	br 1b\n"						\
	".previous"							\
	:"=&r" (temp), "=m" (v->counter)				\
	:"Ir" (i), "m" (v->counter));					\
}									\

111
#define ATOMIC64_OP_RETURN(op, asm_op)					\
112
static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v)	\
113 114 115 116
{									\
	long temp, result;						\
	__asm__ __volatile__(						\
	"1:	ldq_l %0,%1\n"						\
117 118
	"	" #asm_op " %0,%3,%2\n"					\
	"	" #asm_op " %0,%3,%0\n"					\
119 120 121 122 123 124 125
	"	stq_c %0,%1\n"						\
	"	beq %0,2f\n"						\
	".subsection 2\n"						\
	"2:	br 1b\n"						\
	".previous"							\
	:"=&r" (temp), "=m" (v->counter), "=&r" (result)		\
	:"Ir" (i), "m" (v->counter) : "memory");			\
126
	smp_read_barrier_depends();					\
127
	return result;							\
L
Linus Torvalds 已提交
128 129
}

130
#define ATOMIC64_FETCH_OP(op, asm_op)					\
131
static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v)	\
132 133 134 135 136 137 138 139 140 141 142 143
{									\
	long temp, result;						\
	__asm__ __volatile__(						\
	"1:	ldq_l %2,%1\n"						\
	"	" #asm_op " %2,%3,%0\n"					\
	"	stq_c %0,%1\n"						\
	"	beq %0,2f\n"						\
	".subsection 2\n"						\
	"2:	br 1b\n"						\
	".previous"							\
	:"=&r" (temp), "=m" (v->counter), "=&r" (result)		\
	:"Ir" (i), "m" (v->counter) : "memory");			\
144
	smp_read_barrier_depends();					\
145 146 147
	return result;							\
}

148 149 150
#define ATOMIC_OPS(op)							\
	ATOMIC_OP(op, op##l)						\
	ATOMIC_OP_RETURN(op, op##l)					\
151
	ATOMIC_FETCH_OP(op, op##l)					\
152
	ATOMIC64_OP(op, op##q)						\
153 154
	ATOMIC64_OP_RETURN(op, op##q)					\
	ATOMIC64_FETCH_OP(op, op##q)
L
Linus Torvalds 已提交
155

156 157
ATOMIC_OPS(add)
ATOMIC_OPS(sub)
L
Linus Torvalds 已提交
158

159 160 161 162 163 164 165 166 167 168
#define atomic_add_return_relaxed	atomic_add_return_relaxed
#define atomic_sub_return_relaxed	atomic_sub_return_relaxed
#define atomic_fetch_add_relaxed	atomic_fetch_add_relaxed
#define atomic_fetch_sub_relaxed	atomic_fetch_sub_relaxed

#define atomic64_add_return_relaxed	atomic64_add_return_relaxed
#define atomic64_sub_return_relaxed	atomic64_sub_return_relaxed
#define atomic64_fetch_add_relaxed	atomic64_fetch_add_relaxed
#define atomic64_fetch_sub_relaxed	atomic64_fetch_sub_relaxed

169 170 171
#define atomic_andnot atomic_andnot
#define atomic64_andnot atomic64_andnot

172 173 174 175 176 177 178 179 180 181 182
#undef ATOMIC_OPS
#define ATOMIC_OPS(op, asm)						\
	ATOMIC_OP(op, asm)						\
	ATOMIC_FETCH_OP(op, asm)					\
	ATOMIC64_OP(op, asm)						\
	ATOMIC64_FETCH_OP(op, asm)

ATOMIC_OPS(and, and)
ATOMIC_OPS(andnot, bic)
ATOMIC_OPS(or, bis)
ATOMIC_OPS(xor, xor)
183

184 185 186 187 188 189 190 191 192 193
#define atomic_fetch_and_relaxed	atomic_fetch_and_relaxed
#define atomic_fetch_andnot_relaxed	atomic_fetch_andnot_relaxed
#define atomic_fetch_or_relaxed		atomic_fetch_or_relaxed
#define atomic_fetch_xor_relaxed	atomic_fetch_xor_relaxed

#define atomic64_fetch_and_relaxed	atomic64_fetch_and_relaxed
#define atomic64_fetch_andnot_relaxed	atomic64_fetch_andnot_relaxed
#define atomic64_fetch_or_relaxed	atomic64_fetch_or_relaxed
#define atomic64_fetch_xor_relaxed	atomic64_fetch_xor_relaxed

194
#undef ATOMIC_OPS
195
#undef ATOMIC64_FETCH_OP
196 197
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
198
#undef ATOMIC_FETCH_OP
199 200
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
L
Linus Torvalds 已提交
201

202 203 204 205
#define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))

#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
206
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
N
Nick Piggin 已提交
207

208
/**
209
 * atomic_fetch_add_unless - add unless the number is a given value
210 211 212 213 214
 * @v: pointer of type atomic_t
 * @a: the amount to add to v...
 * @u: ...unless v is equal to u.
 *
 * Atomically adds @a to @v, so long as it was not @u.
215
 * Returns the old value of @v.
216
 */
217
static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
218
{
219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236
	int c, new, old;
	smp_mb();
	__asm__ __volatile__(
	"1:	ldl_l	%[old],%[mem]\n"
	"	cmpeq	%[old],%[u],%[c]\n"
	"	addl	%[old],%[a],%[new]\n"
	"	bne	%[c],2f\n"
	"	stl_c	%[new],%[mem]\n"
	"	beq	%[new],3f\n"
	"2:\n"
	".subsection 2\n"
	"3:	br	1b\n"
	".previous"
	: [old] "=&r"(old), [new] "=&r"(new), [c] "=&r"(c)
	: [mem] "m"(*v), [a] "rI"(a), [u] "rI"((long)u)
	: "memory");
	smp_mb();
	return old;
237
}
238
#define atomic_fetch_add_unless atomic_fetch_add_unless
N
Nick Piggin 已提交
239

240
/**
241
 * atomic64_fetch_add_unless - add unless the number is a given value
242 243 244 245 246
 * @v: pointer of type atomic64_t
 * @a: the amount to add to v...
 * @u: ...unless v is equal to u.
 *
 * Atomically adds @a to @v, so long as it was not @u.
247
 * Returns the old value of @v.
248
 */
249
static __inline__ long atomic64_fetch_add_unless(atomic64_t *v, long a, long u)
250
{
251
	long c, new, old;
252 253
	smp_mb();
	__asm__ __volatile__(
254 255 256
	"1:	ldq_l	%[old],%[mem]\n"
	"	cmpeq	%[old],%[u],%[c]\n"
	"	addq	%[old],%[a],%[new]\n"
257
	"	bne	%[c],2f\n"
258 259
	"	stq_c	%[new],%[mem]\n"
	"	beq	%[new],3f\n"
260 261 262 263
	"2:\n"
	".subsection 2\n"
	"3:	br	1b\n"
	".previous"
264
	: [old] "=&r"(old), [new] "=&r"(new), [c] "=&r"(c)
265 266 267
	: [mem] "m"(*v), [a] "rI"(a), [u] "rI"(u)
	: "memory");
	smp_mb();
268
	return old;
269
}
270
#define atomic64_fetch_add_unless atomic64_fetch_add_unless
271

272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299
/*
 * atomic64_dec_if_positive - decrement by 1 if old value positive
 * @v: pointer of type atomic_t
 *
 * The function returns the old value of *v minus 1, even if
 * the atomic variable, v, was not decremented.
 */
static inline long atomic64_dec_if_positive(atomic64_t *v)
{
	long old, tmp;
	smp_mb();
	__asm__ __volatile__(
	"1:	ldq_l	%[old],%[mem]\n"
	"	subq	%[old],1,%[tmp]\n"
	"	ble	%[old],2f\n"
	"	stq_c	%[tmp],%[mem]\n"
	"	beq	%[tmp],3f\n"
	"2:\n"
	".subsection 2\n"
	"3:	br	1b\n"
	".previous"
	: [old] "=&r"(old), [tmp] "=&r"(tmp)
	: [mem] "m"(*v)
	: "memory");
	smp_mb();
	return old - 1;
}

L
Linus Torvalds 已提交
300 301 302 303 304 305 306 307 308 309 310 311 312
#define atomic_dec_return(v) atomic_sub_return(1,(v))
#define atomic64_dec_return(v) atomic64_sub_return(1,(v))

#define atomic_inc_return(v) atomic_add_return(1,(v))
#define atomic64_inc_return(v) atomic64_add_return(1,(v))

#define atomic_inc(v) atomic_add(1,(v))
#define atomic64_inc(v) atomic64_add(1,(v))

#define atomic_dec(v) atomic_sub(1,(v))
#define atomic64_dec(v) atomic64_sub(1,(v))

#endif /* _ALPHA_ATOMIC_H */