atomic.h 8.2 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
2
 * Copyright IBM Corp. 1999, 2009
3 4 5
 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
 *	      Denis Joseph Barrow,
 *	      Arnd Bergmann <arndb@de.ibm.com>,
L
Linus Torvalds 已提交
6
 *
7 8
 * Atomic operations that C can't guarantee us.
 * Useful for resource counting etc.
L
Lucas De Marchi 已提交
9
 * s390 uses 'Compare And Swap' for atomicity in SMP environment.
L
Linus Torvalds 已提交
10 11 12
 *
 */

13 14 15
#ifndef __ARCH_S390_ATOMIC__
#define __ARCH_S390_ATOMIC__

16 17
#include <linux/compiler.h>
#include <linux/types.h>
18
#include <asm/barrier.h>
19
#include <asm/cmpxchg.h>
L
Linus Torvalds 已提交
20 21 22

#define ATOMIC_INIT(i)  { (i) }

23 24
#define __ATOMIC_NO_BARRIER	"\n"

25 26 27 28 29
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES

#define __ATOMIC_OR	"lao"
#define __ATOMIC_AND	"lan"
#define __ATOMIC_ADD	"laa"
30
#define __ATOMIC_XOR	"lax"
31
#define __ATOMIC_BARRIER "bcr	14,0\n"
32

33
#define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier)		\
34 35
({									\
	int old_val;							\
36 37
									\
	typecheck(atomic_t *, ptr);					\
38 39
	asm volatile(							\
		op_string "	%0,%2,%1\n"				\
40
		__barrier						\
41
		: "=d" (old_val), "+Q" ((ptr)->counter)			\
42 43 44 45 46 47 48 49 50 51
		: "d" (op_val)						\
		: "cc", "memory");					\
	old_val;							\
})

#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */

#define __ATOMIC_OR	"or"
#define __ATOMIC_AND	"nr"
#define __ATOMIC_ADD	"ar"
52
#define __ATOMIC_XOR	"xr"
53
#define __ATOMIC_BARRIER "\n"
54

55
#define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier)		\
56
({									\
57
	int old_val, new_val;						\
58 59
									\
	typecheck(atomic_t *, ptr);					\
60 61 62 63 64 65
	asm volatile(							\
		"	l	%0,%2\n"				\
		"0:	lr	%1,%0\n"				\
		op_string "	%1,%3\n"				\
		"	cs	%0,%1,%2\n"				\
		"	jl	0b"					\
66 67
		: "=&d" (old_val), "=&d" (new_val), "+Q" ((ptr)->counter)\
		: "d" (op_val)						\
68
		: "cc", "memory");					\
69
	old_val;							\
L
Linus Torvalds 已提交
70
})
71

72 73
#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */

74 75
static inline int atomic_read(const atomic_t *v)
{
H
Heiko Carstens 已提交
76 77 78 79 80 81
	int c;

	asm volatile(
		"	l	%0,%1\n"
		: "=d" (c) : "Q" (v->counter));
	return c;
82 83 84 85
}

static inline void atomic_set(atomic_t *v, int i)
{
H
Heiko Carstens 已提交
86 87 88
	asm volatile(
		"	st	%1,%0\n"
		: "=Q" (v->counter) : "d" (i));
89
}
L
Linus Torvalds 已提交
90

91
static inline int atomic_add_return(int i, atomic_t *v)
L
Linus Torvalds 已提交
92
{
93
	return __ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_BARRIER) + i;
L
Linus Torvalds 已提交
94
}
95

96 97 98 99 100
static inline int atomic_fetch_add(int i, atomic_t *v)
{
	return __ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_BARRIER);
}

101 102 103 104 105 106 107 108 109
static inline void atomic_add(int i, atomic_t *v)
{
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
	if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
		asm volatile(
			"asi	%0,%1\n"
			: "+Q" (v->counter)
			: "i" (i)
			: "cc", "memory");
110
		return;
111 112
	}
#endif
113
	__ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_NO_BARRIER);
114 115
}

116
#define atomic_add_negative(_i, _v)	(atomic_add_return(_i, _v) < 0)
117
#define atomic_inc(_v)			atomic_add(1, _v)
118 119
#define atomic_inc_return(_v)		atomic_add_return(1, _v)
#define atomic_inc_and_test(_v)		(atomic_add_return(1, _v) == 0)
120
#define atomic_sub(_i, _v)		atomic_add(-(int)(_i), _v)
121
#define atomic_sub_return(_i, _v)	atomic_add_return(-(int)(_i), _v)
122
#define atomic_fetch_sub(_i, _v)	atomic_fetch_add(-(int)(_i), _v)
123
#define atomic_sub_and_test(_i, _v)	(atomic_sub_return(_i, _v) == 0)
124
#define atomic_dec(_v)			atomic_sub(1, _v)
125 126
#define atomic_dec_return(_v)		atomic_sub_return(1, _v)
#define atomic_dec_and_test(_v)		(atomic_sub_return(1, _v) == 0)
L
Linus Torvalds 已提交
127

128
#define ATOMIC_OPS(op, OP)						\
129 130 131
static inline void atomic_##op(int i, atomic_t *v)			\
{									\
	__ATOMIC_LOOP(v, i, __ATOMIC_##OP, __ATOMIC_NO_BARRIER);	\
132 133 134 135
}									\
static inline int atomic_fetch_##op(int i, atomic_t *v)			\
{									\
	return __ATOMIC_LOOP(v, i, __ATOMIC_##OP, __ATOMIC_BARRIER);	\
136 137
}

138 139 140
ATOMIC_OPS(and, AND)
ATOMIC_OPS(or, OR)
ATOMIC_OPS(xor, XOR)
141

142
#undef ATOMIC_OPS
143

144 145
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))

146
static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
147
{
148 149
	asm volatile(
		"	cs	%0,%2,%1"
150 151
		: "+d" (old), "+Q" (v->counter)
		: "d" (new)
152
		: "cc", "memory");
153 154 155
	return old;
}

156
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
157 158 159
{
	int c, old;
	c = atomic_read(v);
160 161 162 163 164 165
	for (;;) {
		if (unlikely(c == u))
			break;
		old = atomic_cmpxchg(v, c, c + a);
		if (likely(old == c))
			break;
166
		c = old;
167
	}
168
	return c;
169 170 171
}


172
#undef __ATOMIC_LOOP
L
Linus Torvalds 已提交
173 174 175

#define ATOMIC64_INIT(i)  { (i) }

176 177
#define __ATOMIC64_NO_BARRIER	"\n"

178 179 180 181 182
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES

#define __ATOMIC64_OR	"laog"
#define __ATOMIC64_AND	"lang"
#define __ATOMIC64_ADD	"laag"
183
#define __ATOMIC64_XOR	"laxg"
184
#define __ATOMIC64_BARRIER "bcr	14,0\n"
185

186
#define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier)		\
187 188
({									\
	long long old_val;						\
189 190
									\
	typecheck(atomic64_t *, ptr);					\
191 192
	asm volatile(							\
		op_string "	%0,%2,%1\n"				\
193
		__barrier						\
194
		: "=d" (old_val), "+Q" ((ptr)->counter)			\
195 196 197 198 199 200 201 202 203 204
		: "d" (op_val)						\
		: "cc", "memory");					\
	old_val;							\
})

#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */

#define __ATOMIC64_OR	"ogr"
#define __ATOMIC64_AND	"ngr"
#define __ATOMIC64_ADD	"agr"
205
#define __ATOMIC64_XOR	"xgr"
206
#define __ATOMIC64_BARRIER "\n"
207

208
#define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier)		\
209
({									\
210
	long long old_val, new_val;					\
211 212
									\
	typecheck(atomic64_t *, ptr);					\
213 214 215 216 217 218
	asm volatile(							\
		"	lg	%0,%2\n"				\
		"0:	lgr	%1,%0\n"				\
		op_string "	%1,%3\n"				\
		"	csg	%0,%1,%2\n"				\
		"	jl	0b"					\
219 220
		: "=&d" (old_val), "=&d" (new_val), "+Q" ((ptr)->counter)\
		: "d" (op_val)						\
221
		: "cc", "memory");					\
222
	old_val;							\
L
Linus Torvalds 已提交
223
})
224

225 226
#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */

227 228
static inline long long atomic64_read(const atomic64_t *v)
{
H
Heiko Carstens 已提交
229 230 231 232 233 234
	long long c;

	asm volatile(
		"	lg	%0,%1\n"
		: "=d" (c) : "Q" (v->counter));
	return c;
235 236 237 238
}

static inline void atomic64_set(atomic64_t *v, long long i)
{
H
Heiko Carstens 已提交
239 240 241
	asm volatile(
		"	stg	%1,%0\n"
		: "=Q" (v->counter) : "d" (i));
242
}
L
Linus Torvalds 已提交
243

244
static inline long long atomic64_add_return(long long i, atomic64_t *v)
L
Linus Torvalds 已提交
245
{
246 247 248
	return __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_BARRIER) + i;
}

249 250 251 252 253
static inline long long atomic64_fetch_add(long long i, atomic64_t *v)
{
	return __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_BARRIER);
}

254 255 256 257 258 259 260 261 262 263 264 265 266
static inline void atomic64_add(long long i, atomic64_t *v)
{
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
	if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
		asm volatile(
			"agsi	%0,%1\n"
			: "+Q" (v->counter)
			: "i" (i)
			: "cc", "memory");
		return;
	}
#endif
	__ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_NO_BARRIER);
L
Linus Torvalds 已提交
267
}
268

269 270
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))

271
static inline long long atomic64_cmpxchg(atomic64_t *v,
272 273
					     long long old, long long new)
{
274 275
	asm volatile(
		"	csg	%0,%2,%1"
276 277
		: "+d" (old), "+Q" (v->counter)
		: "d" (new)
278
		: "cc", "memory");
279 280
	return old;
}
L
Linus Torvalds 已提交
281

282
#define ATOMIC64_OPS(op, OP)						\
283 284 285
static inline void atomic64_##op(long i, atomic64_t *v)			\
{									\
	__ATOMIC64_LOOP(v, i, __ATOMIC64_##OP, __ATOMIC64_NO_BARRIER);	\
286 287 288 289
}									\
static inline long atomic64_fetch_##op(long i, atomic64_t *v)		\
{									\
	return __ATOMIC64_LOOP(v, i, __ATOMIC64_##OP, __ATOMIC64_BARRIER); \
290 291
}

292 293 294
ATOMIC64_OPS(and, AND)
ATOMIC64_OPS(or, OR)
ATOMIC64_OPS(xor, XOR)
295

296
#undef ATOMIC64_OPS
297
#undef __ATOMIC64_LOOP
298

299
static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)
L
Linus Torvalds 已提交
300
{
301
	long long c, old;
302

303
	c = atomic64_read(v);
304 305 306
	for (;;) {
		if (unlikely(c == u))
			break;
307
		old = atomic64_cmpxchg(v, c, c + i);
308 309
		if (likely(old == c))
			break;
310
		c = old;
311
	}
312
	return c != u;
L
Linus Torvalds 已提交
313 314
}

315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331
static inline long long atomic64_dec_if_positive(atomic64_t *v)
{
	long long c, old, dec;

	c = atomic64_read(v);
	for (;;) {
		dec = c - 1;
		if (unlikely(dec < 0))
			break;
		old = atomic64_cmpxchg((v), c, dec);
		if (likely(old == c))
			break;
		c = old;
	}
	return dec;
}

332
#define atomic64_add_negative(_i, _v)	(atomic64_add_return(_i, _v) < 0)
333
#define atomic64_inc(_v)		atomic64_add(1, _v)
334 335
#define atomic64_inc_return(_v)		atomic64_add_return(1, _v)
#define atomic64_inc_and_test(_v)	(atomic64_add_return(1, _v) == 0)
336
#define atomic64_sub_return(_i, _v)	atomic64_add_return(-(long long)(_i), _v)
337
#define atomic64_fetch_sub(_i, _v)	atomic64_fetch_add(-(long long)(_i), _v)
338
#define atomic64_sub(_i, _v)		atomic64_add(-(long long)(_i), _v)
339
#define atomic64_sub_and_test(_i, _v)	(atomic64_sub_return(_i, _v) == 0)
340
#define atomic64_dec(_v)		atomic64_sub(1, _v)
341 342 343
#define atomic64_dec_return(_v)		atomic64_sub_return(1, _v)
#define atomic64_dec_and_test(_v)	(atomic64_sub_return(1, _v) == 0)
#define atomic64_inc_not_zero(v)	atomic64_add_unless((v), 1, 0)
N
Nick Piggin 已提交
344

L
Linus Torvalds 已提交
345
#endif /* __ARCH_S390_ATOMIC__  */