atomic.h 7.0 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3
#ifndef __ARCH_S390_ATOMIC__
#define __ARCH_S390_ATOMIC__

4
#include <linux/compiler.h>
5
#include <linux/types.h>
6

L
Linus Torvalds 已提交
7 8 9 10
/*
 *  include/asm-s390/atomic.h
 *
 *  S390 version
11
 *    Copyright (C) 1999-2005 IBM Deutschland Entwicklung GmbH, IBM Corporation
L
Linus Torvalds 已提交
12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
 *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
 *               Denis Joseph Barrow,
 *		 Arnd Bergmann (arndb@de.ibm.com)
 *
 *  Derived from "include/asm-i386/bitops.h"
 *    Copyright (C) 1992, Linus Torvalds
 *
 */

/*
 * Atomic operations that C can't guarantee us.  Useful for
 * resource counting etc..
 * S390 uses 'Compare And Swap' for atomicity in SMP enviroment
 */

#define ATOMIC_INIT(i)  { (i) }

#ifdef __KERNEL__

31 32
#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)

L
Linus Torvalds 已提交
33 34
#define __CS_LOOP(ptr, op_val, op_string) ({				\
	typeof(ptr->counter) old_val, new_val;				\
35 36 37 38 39 40 41 42 43 44
	asm volatile(							\
		"	l	%0,%2\n"				\
		"0:	lr	%1,%0\n"				\
		op_string "	%1,%3\n"				\
		"	cs	%0,%1,%2\n"				\
		"	jl	0b"					\
		: "=&d" (old_val), "=&d" (new_val),			\
		  "=Q" (((atomic_t *)(ptr))->counter)			\
		: "d" (op_val),	 "Q" (((atomic_t *)(ptr))->counter)	\
		: "cc", "memory");					\
L
Linus Torvalds 已提交
45 46
	new_val;							\
})
47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67

#else /* __GNUC__ */

#define __CS_LOOP(ptr, op_val, op_string) ({				\
	typeof(ptr->counter) old_val, new_val;				\
	asm volatile(							\
		"	l	%0,0(%3)\n"				\
		"0:	lr	%1,%0\n"				\
		op_string "	%1,%4\n"				\
		"	cs	%0,%1,0(%3)\n"				\
		"	jl	0b"					\
		: "=&d" (old_val), "=&d" (new_val),			\
		  "=m" (((atomic_t *)(ptr))->counter)			\
		: "a" (ptr), "d" (op_val),				\
		  "m" (((atomic_t *)(ptr))->counter)			\
		: "cc", "memory");					\
	new_val;							\
})

#endif /* __GNUC__ */

68 69 70 71 72 73 74 75 76 77 78
static inline int atomic_read(const atomic_t *v)
{
	barrier();
	return v->counter;
}

static inline void atomic_set(atomic_t *v, int i)
{
	v->counter = i;
	barrier();
}
L
Linus Torvalds 已提交
79 80 81 82 83

static __inline__ int atomic_add_return(int i, atomic_t * v)
{
	return __CS_LOOP(v, i, "ar");
}
84 85 86 87 88 89
#define atomic_add(_i, _v)		atomic_add_return(_i, _v)
#define atomic_add_negative(_i, _v)	(atomic_add_return(_i, _v) < 0)
#define atomic_inc(_v)			atomic_add_return(1, _v)
#define atomic_inc_return(_v)		atomic_add_return(1, _v)
#define atomic_inc_and_test(_v)		(atomic_add_return(1, _v) == 0)

L
Linus Torvalds 已提交
90 91 92 93
static __inline__ int atomic_sub_return(int i, atomic_t * v)
{
	return __CS_LOOP(v, i, "sr");
}
94 95 96 97 98
#define atomic_sub(_i, _v)		atomic_sub_return(_i, _v)
#define atomic_sub_and_test(_i, _v)	(atomic_sub_return(_i, _v) == 0)
#define atomic_dec(_v)			atomic_sub_return(1, _v)
#define atomic_dec_return(_v)		atomic_sub_return(1, _v)
#define atomic_dec_and_test(_v)		(atomic_sub_return(1, _v) == 0)
L
Linus Torvalds 已提交
99 100 101 102 103

static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t * v)
{
	       __CS_LOOP(v, ~mask, "nr");
}
104

L
Linus Torvalds 已提交
105 106 107 108
static __inline__ void atomic_set_mask(unsigned long mask, atomic_t * v)
{
	       __CS_LOOP(v, mask, "or");
}
109

110 111
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))

112 113
static __inline__ int atomic_cmpxchg(atomic_t *v, int old, int new)
{
114 115 116 117 118 119 120 121 122 123 124 125 126
#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
	asm volatile(
		"	cs	%0,%2,%1"
		: "+d" (old), "=Q" (v->counter)
		: "d" (new), "Q" (v->counter)
		: "cc", "memory");
#else /* __GNUC__ */
	asm volatile(
		"	cs	%0,%3,0(%2)"
		: "+d" (old), "=m" (v->counter)
		: "a" (v), "d" (new), "m" (v->counter)
		: "cc", "memory");
#endif /* __GNUC__ */
127 128 129 130 131 132 133
	return old;
}

static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
{
	int c, old;
	c = atomic_read(v);
134 135 136 137 138 139
	for (;;) {
		if (unlikely(c == u))
			break;
		old = atomic_cmpxchg(v, c, c + a);
		if (likely(old == c))
			break;
140
		c = old;
141
	}
142 143 144 145 146
	return c != u;
}

#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)

L
Linus Torvalds 已提交
147 148 149 150 151
#undef __CS_LOOP

#ifdef __s390x__
#define ATOMIC64_INIT(i)  { (i) }

152 153
#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)

L
Linus Torvalds 已提交
154 155
#define __CSG_LOOP(ptr, op_val, op_string) ({				\
	typeof(ptr->counter) old_val, new_val;				\
156 157 158 159 160 161 162 163 164 165
	asm volatile(							\
		"	lg	%0,%2\n"				\
		"0:	lgr	%1,%0\n"				\
		op_string "	%1,%3\n"				\
		"	csg	%0,%1,%2\n"				\
		"	jl	0b"					\
		: "=&d" (old_val), "=&d" (new_val),			\
		  "=Q" (((atomic_t *)(ptr))->counter)			\
		: "d" (op_val),	"Q" (((atomic_t *)(ptr))->counter)	\
		: "cc", "memory" );					\
L
Linus Torvalds 已提交
166 167
	new_val;							\
})
168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188

#else /* __GNUC__ */

#define __CSG_LOOP(ptr, op_val, op_string) ({				\
	typeof(ptr->counter) old_val, new_val;				\
	asm volatile(							\
		"	lg	%0,0(%3)\n"				\
		"0:	lgr	%1,%0\n"				\
		op_string "	%1,%4\n"				\
		"	csg	%0,%1,0(%3)\n"				\
		"	jl	0b"					\
		: "=&d" (old_val), "=&d" (new_val),			\
		  "=m" (((atomic_t *)(ptr))->counter)			\
		: "a" (ptr), "d" (op_val),				\
		  "m" (((atomic_t *)(ptr))->counter)			\
		: "cc", "memory" );					\
	new_val;							\
})

#endif /* __GNUC__ */

189 190 191 192 193 194 195 196 197 198 199
static inline long long atomic64_read(const atomic64_t *v)
{
	barrier();
	return v->counter;
}

static inline void atomic64_set(atomic64_t *v, long long i)
{
	v->counter = i;
	barrier();
}
L
Linus Torvalds 已提交
200

201
static __inline__ long long atomic64_add_return(long long i, atomic64_t * v)
L
Linus Torvalds 已提交
202 203 204
{
	return __CSG_LOOP(v, i, "agr");
}
205 206 207 208 209 210 211
#define atomic64_add(_i, _v)		atomic64_add_return(_i, _v)
#define atomic64_add_negative(_i, _v)	(atomic64_add_return(_i, _v) < 0)
#define atomic64_inc(_v)		atomic64_add_return(1, _v)
#define atomic64_inc_return(_v)		atomic64_add_return(1, _v)
#define atomic64_inc_and_test(_v)	(atomic64_add_return(1, _v) == 0)

static __inline__ long long atomic64_sub_return(long long i, atomic64_t * v)
L
Linus Torvalds 已提交
212
{
213
	return __CSG_LOOP(v, i, "sgr");
L
Linus Torvalds 已提交
214
}
215 216 217 218 219 220
#define atomic64_sub(_i, _v)		atomic64_sub_return(_i, _v)
#define atomic64_sub_and_test(_i, _v)	(atomic64_sub_return(_i, _v) == 0)
#define atomic64_dec(_v)		atomic64_sub_return(1, _v)
#define atomic64_dec_return(_v)		atomic64_sub_return(1, _v)
#define atomic64_dec_and_test(_v)	(atomic64_sub_return(1, _v) == 0)

L
Linus Torvalds 已提交
221 222 223 224
static __inline__ void atomic64_clear_mask(unsigned long mask, atomic64_t * v)
{
	       __CSG_LOOP(v, ~mask, "ngr");
}
225

L
Linus Torvalds 已提交
226 227 228 229 230
static __inline__ void atomic64_set_mask(unsigned long mask, atomic64_t * v)
{
	       __CSG_LOOP(v, mask, "ogr");
}

231 232
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))

233 234 235
static __inline__ long long atomic64_cmpxchg(atomic64_t *v,
					     long long old, long long new)
{
236 237 238 239 240 241 242 243 244 245 246 247 248
#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
	asm volatile(
		"	csg	%0,%2,%1"
		: "+d" (old), "=Q" (v->counter)
		: "d" (new), "Q" (v->counter)
		: "cc", "memory");
#else /* __GNUC__ */
	asm volatile(
		"	csg	%0,%3,0(%2)"
		: "+d" (old), "=m" (v->counter)
		: "a" (v), "d" (new), "m" (v->counter)
		: "cc", "memory");
#endif /* __GNUC__ */
249 250
	return old;
}
L
Linus Torvalds 已提交
251

252 253
static __inline__ int atomic64_add_unless(atomic64_t *v,
					  long long a, long long u)
L
Linus Torvalds 已提交
254
{
255 256
	long long c, old;
	c = atomic64_read(v);
257 258 259 260 261 262
	for (;;) {
		if (unlikely(c == u))
			break;
		old = atomic64_cmpxchg(v, c, c + a);
		if (likely(old == c))
			break;
263
		c = old;
264
	}
265
	return c != u;
L
Linus Torvalds 已提交
266 267
}

268
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
N
Nick Piggin 已提交
269

270 271
#undef __CSG_LOOP
#endif
N
Nick Piggin 已提交
272

L
Linus Torvalds 已提交
273 274 275 276 277
#define smp_mb__before_atomic_dec()	smp_mb()
#define smp_mb__after_atomic_dec()	smp_mb()
#define smp_mb__before_atomic_inc()	smp_mb()
#define smp_mb__after_atomic_inc()	smp_mb()

278
#include <asm-generic/atomic-long.h>
L
Linus Torvalds 已提交
279 280
#endif /* __KERNEL__ */
#endif /* __ARCH_S390_ATOMIC__  */