atomic.h 8.3 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
2
 * Copyright IBM Corp. 1999, 2009
3 4 5
 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
 *	      Denis Joseph Barrow,
 *	      Arnd Bergmann <arndb@de.ibm.com>,
L
Linus Torvalds 已提交
6
 *
7 8
 * Atomic operations that C can't guarantee us.
 * Useful for resource counting etc.
L
Lucas De Marchi 已提交
9
 * s390 uses 'Compare And Swap' for atomicity in SMP environment.
L
Linus Torvalds 已提交
10 11 12
 *
 */

13 14 15
#ifndef __ARCH_S390_ATOMIC__
#define __ARCH_S390_ATOMIC__

16 17
#include <linux/compiler.h>
#include <linux/types.h>
18
#include <asm/cmpxchg.h>
L
Linus Torvalds 已提交
19 20 21

#define ATOMIC_INIT(i)  { (i) }

22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES

#define __ATOMIC_OR	"lao"
#define __ATOMIC_AND	"lan"
#define __ATOMIC_ADD	"laa"

#define __ATOMIC_LOOP(ptr, op_val, op_string)				\
({									\
	int old_val;							\
	asm volatile(							\
		op_string "	%0,%2,%1\n"				\
		: "=d" (old_val), "+Q" (((atomic_t *)(ptr))->counter)	\
		: "d" (op_val)						\
		: "cc", "memory");					\
	old_val;							\
})

#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */

#define __ATOMIC_OR	"or"
#define __ATOMIC_AND	"nr"
#define __ATOMIC_ADD	"ar"

#define __ATOMIC_LOOP(ptr, op_val, op_string)				\
({									\
47
	int old_val, new_val;						\
48 49 50 51 52 53 54 55 56 57
	asm volatile(							\
		"	l	%0,%2\n"				\
		"0:	lr	%1,%0\n"				\
		op_string "	%1,%3\n"				\
		"	cs	%0,%1,%2\n"				\
		"	jl	0b"					\
		: "=&d" (old_val), "=&d" (new_val),			\
		  "=Q" (((atomic_t *)(ptr))->counter)			\
		: "d" (op_val),	 "Q" (((atomic_t *)(ptr))->counter)	\
		: "cc", "memory");					\
58
	old_val;							\
L
Linus Torvalds 已提交
59
})
60

61 62
#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */

63 64
static inline int atomic_read(const atomic_t *v)
{
H
Heiko Carstens 已提交
65 66 67 68 69 70
	int c;

	asm volatile(
		"	l	%0,%1\n"
		: "=d" (c) : "Q" (v->counter));
	return c;
71 72 73 74
}

static inline void atomic_set(atomic_t *v, int i)
{
H
Heiko Carstens 已提交
75 76 77
	asm volatile(
		"	st	%1,%0\n"
		: "=Q" (v->counter) : "d" (i));
78
}
L
Linus Torvalds 已提交
79

80
static inline int atomic_add_return(int i, atomic_t *v)
L
Linus Torvalds 已提交
81
{
82
	return __ATOMIC_LOOP(v, i, __ATOMIC_ADD) + i;
L
Linus Torvalds 已提交
83
}
84

85 86 87 88 89
#define atomic_add(_i, _v)		atomic_add_return(_i, _v)
#define atomic_add_negative(_i, _v)	(atomic_add_return(_i, _v) < 0)
#define atomic_inc(_v)			atomic_add_return(1, _v)
#define atomic_inc_return(_v)		atomic_add_return(1, _v)
#define atomic_inc_and_test(_v)		(atomic_add_return(1, _v) == 0)
90
#define atomic_sub_return(_i, _v)	atomic_add_return(-(int)(_i), _v)
91 92 93 94 95
#define atomic_sub(_i, _v)		atomic_sub_return(_i, _v)
#define atomic_sub_and_test(_i, _v)	(atomic_sub_return(_i, _v) == 0)
#define atomic_dec(_v)			atomic_sub_return(1, _v)
#define atomic_dec_return(_v)		atomic_sub_return(1, _v)
#define atomic_dec_and_test(_v)		(atomic_sub_return(1, _v) == 0)
L
Linus Torvalds 已提交
96

97
static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
L
Linus Torvalds 已提交
98
{
99
	__ATOMIC_LOOP(v, ~mask, __ATOMIC_AND);
L
Linus Torvalds 已提交
100
}
101

102
static inline void atomic_set_mask(unsigned long mask, atomic_t *v)
L
Linus Torvalds 已提交
103
{
104
	__ATOMIC_LOOP(v, mask, __ATOMIC_OR);
L
Linus Torvalds 已提交
105
}
106

107 108
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))

109
static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
110
{
111 112 113 114 115
	asm volatile(
		"	cs	%0,%2,%1"
		: "+d" (old), "=Q" (v->counter)
		: "d" (new), "Q" (v->counter)
		: "cc", "memory");
116 117 118
	return old;
}

119
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
120 121 122
{
	int c, old;
	c = atomic_read(v);
123 124 125 126 127 128
	for (;;) {
		if (unlikely(c == u))
			break;
		old = atomic_cmpxchg(v, c, c + a);
		if (likely(old == c))
			break;
129
		c = old;
130
	}
131
	return c;
132 133 134
}


135
#undef __ATOMIC_LOOP
L
Linus Torvalds 已提交
136 137 138

#define ATOMIC64_INIT(i)  { (i) }

139 140
#ifdef CONFIG_64BIT

141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES

#define __ATOMIC64_OR	"laog"
#define __ATOMIC64_AND	"lang"
#define __ATOMIC64_ADD	"laag"

#define __ATOMIC64_LOOP(ptr, op_val, op_string)				\
({									\
	long long old_val;						\
	asm volatile(							\
		op_string "	%0,%2,%1\n"				\
		: "=d" (old_val), "+Q" (((atomic_t *)(ptr))->counter)	\
		: "d" (op_val)						\
		: "cc", "memory");					\
	old_val;							\
})

#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */

#define __ATOMIC64_OR	"ogr"
#define __ATOMIC64_AND	"ngr"
#define __ATOMIC64_ADD	"agr"

#define __ATOMIC64_LOOP(ptr, op_val, op_string)				\
({									\
166
	long long old_val, new_val;					\
167 168 169 170 171 172 173 174 175
	asm volatile(							\
		"	lg	%0,%2\n"				\
		"0:	lgr	%1,%0\n"				\
		op_string "	%1,%3\n"				\
		"	csg	%0,%1,%2\n"				\
		"	jl	0b"					\
		: "=&d" (old_val), "=&d" (new_val),			\
		  "=Q" (((atomic_t *)(ptr))->counter)			\
		: "d" (op_val),	"Q" (((atomic_t *)(ptr))->counter)	\
176
		: "cc", "memory");					\
177
	old_val;							\
L
Linus Torvalds 已提交
178
})
179

180 181
#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */

182 183
static inline long long atomic64_read(const atomic64_t *v)
{
H
Heiko Carstens 已提交
184 185 186 187 188 189
	long long c;

	asm volatile(
		"	lg	%0,%1\n"
		: "=d" (c) : "Q" (v->counter));
	return c;
190 191 192 193
}

static inline void atomic64_set(atomic64_t *v, long long i)
{
H
Heiko Carstens 已提交
194 195 196
	asm volatile(
		"	stg	%1,%0\n"
		: "=Q" (v->counter) : "d" (i));
197
}
L
Linus Torvalds 已提交
198

199
static inline long long atomic64_add_return(long long i, atomic64_t *v)
L
Linus Torvalds 已提交
200
{
201
	return __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD) + i;
L
Linus Torvalds 已提交
202
}
203

204
static inline void atomic64_clear_mask(unsigned long mask, atomic64_t *v)
L
Linus Torvalds 已提交
205
{
206
	__ATOMIC64_LOOP(v, ~mask, __ATOMIC64_AND);
L
Linus Torvalds 已提交
207
}
208

209
static inline void atomic64_set_mask(unsigned long mask, atomic64_t *v)
L
Linus Torvalds 已提交
210
{
211
	__ATOMIC64_LOOP(v, mask, __ATOMIC64_OR);
L
Linus Torvalds 已提交
212 213
}

214 215
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))

216
static inline long long atomic64_cmpxchg(atomic64_t *v,
217 218
					     long long old, long long new)
{
219 220 221 222 223
	asm volatile(
		"	csg	%0,%2,%1"
		: "+d" (old), "=Q" (v->counter)
		: "d" (new), "Q" (v->counter)
		: "cc", "memory");
224 225
	return old;
}
L
Linus Torvalds 已提交
226

227
#undef __ATOMIC64_LOOP
228 229 230 231 232 233 234 235 236 237 238 239

#else /* CONFIG_64BIT */

typedef struct {
	long long counter;
} atomic64_t;

static inline long long atomic64_read(const atomic64_t *v)
{
	register_pair rp;

	asm volatile(
240 241
		"	lm	%0,%N0,%1"
		: "=&d" (rp) : "Q" (v->counter)	);
242 243 244 245 246 247 248 249
	return rp.pair;
}

static inline void atomic64_set(atomic64_t *v, long long i)
{
	register_pair rp = {.pair = i};

	asm volatile(
250 251
		"	stm	%1,%N1,%0"
		: "=Q" (v->counter) : "d" (rp) );
252 253 254 255 256 257 258 259
}

static inline long long atomic64_xchg(atomic64_t *v, long long new)
{
	register_pair rp_new = {.pair = new};
	register_pair rp_old;

	asm volatile(
260 261
		"	lm	%0,%N0,%1\n"
		"0:	cds	%0,%2,%1\n"
262
		"	jl	0b\n"
263 264
		: "=&d" (rp_old), "=Q" (v->counter)
		: "d" (rp_new), "Q" (v->counter)
265 266 267 268 269 270 271 272 273 274 275
		: "cc");
	return rp_old.pair;
}

static inline long long atomic64_cmpxchg(atomic64_t *v,
					 long long old, long long new)
{
	register_pair rp_old = {.pair = old};
	register_pair rp_new = {.pair = new};

	asm volatile(
276 277 278
		"	cds	%0,%2,%1"
		: "+&d" (rp_old), "=Q" (v->counter)
		: "d" (rp_new), "Q" (v->counter)
279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316
		: "cc");
	return rp_old.pair;
}


static inline long long atomic64_add_return(long long i, atomic64_t *v)
{
	long long old, new;

	do {
		old = atomic64_read(v);
		new = old + i;
	} while (atomic64_cmpxchg(v, old, new) != old);
	return new;
}

static inline void atomic64_set_mask(unsigned long long mask, atomic64_t *v)
{
	long long old, new;

	do {
		old = atomic64_read(v);
		new = old | mask;
	} while (atomic64_cmpxchg(v, old, new) != old);
}

static inline void atomic64_clear_mask(unsigned long long mask, atomic64_t *v)
{
	long long old, new;

	do {
		old = atomic64_read(v);
		new = old & mask;
	} while (atomic64_cmpxchg(v, old, new) != old);
}

#endif /* CONFIG_64BIT */

317
static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
L
Linus Torvalds 已提交
318
{
319
	long long c, old;
320

321
	c = atomic64_read(v);
322 323 324 325 326 327
	for (;;) {
		if (unlikely(c == u))
			break;
		old = atomic64_cmpxchg(v, c, c + a);
		if (likely(old == c))
			break;
328
		c = old;
329
	}
330
	return c != u;
L
Linus Torvalds 已提交
331 332
}

333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349
static inline long long atomic64_dec_if_positive(atomic64_t *v)
{
	long long c, old, dec;

	c = atomic64_read(v);
	for (;;) {
		dec = c - 1;
		if (unlikely(dec < 0))
			break;
		old = atomic64_cmpxchg((v), c, dec);
		if (likely(old == c))
			break;
		c = old;
	}
	return dec;
}

350 351 352 353 354
#define atomic64_add(_i, _v)		atomic64_add_return(_i, _v)
#define atomic64_add_negative(_i, _v)	(atomic64_add_return(_i, _v) < 0)
#define atomic64_inc(_v)		atomic64_add_return(1, _v)
#define atomic64_inc_return(_v)		atomic64_add_return(1, _v)
#define atomic64_inc_and_test(_v)	(atomic64_add_return(1, _v) == 0)
355
#define atomic64_sub_return(_i, _v)	atomic64_add_return(-(long long)(_i), _v)
356 357 358 359 360 361
#define atomic64_sub(_i, _v)		atomic64_sub_return(_i, _v)
#define atomic64_sub_and_test(_i, _v)	(atomic64_sub_return(_i, _v) == 0)
#define atomic64_dec(_v)		atomic64_sub_return(1, _v)
#define atomic64_dec_return(_v)		atomic64_sub_return(1, _v)
#define atomic64_dec_and_test(_v)	(atomic64_sub_return(1, _v) == 0)
#define atomic64_inc_not_zero(v)	atomic64_add_unless((v), 1, 0)
N
Nick Piggin 已提交
362

L
Linus Torvalds 已提交
363 364 365 366 367 368
#define smp_mb__before_atomic_dec()	smp_mb()
#define smp_mb__after_atomic_dec()	smp_mb()
#define smp_mb__before_atomic_inc()	smp_mb()
#define smp_mb__after_atomic_inc()	smp_mb()

#endif /* __ARCH_S390_ATOMIC__  */