atomic.h 7.0 KB
Newer Older
K
Kyle McMartin 已提交
1 2 3 4
/* Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
 * Copyright (C) 2006 Kyle McMartin <kyle@parisc-linux.org>
 */

L
Linus Torvalds 已提交
5 6 7
#ifndef _ASM_PARISC_ATOMIC_H_
#define _ASM_PARISC_ATOMIC_H_

K
Kyle McMartin 已提交
8
#include <linux/types.h>
9
#include <asm/cmpxchg.h>
10
#include <asm/barrier.h>
L
Linus Torvalds 已提交
11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28

/*
 * Atomic operations that C can't guarantee us.  Useful for
 * resource counting etc..
 *
 * And probably incredibly slow on parisc.  OTOH, we don't
 * have to write any serious assembly.   prumpf
 */

#ifdef CONFIG_SMP
#include <asm/spinlock.h>
#include <asm/cache.h>		/* we use L1_CACHE_BYTES */

/* Use an array of spinlocks for our atomic_ts.
 * Hash function to index into a different SPINLOCK.
 * Since "a" is usually an address, use one spinlock per cacheline.
 */
#  define ATOMIC_HASH_SIZE 4
29
#  define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) (a))/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
L
Linus Torvalds 已提交
30

31
extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
L
Linus Torvalds 已提交
32

I
Ingo Molnar 已提交
33
/* Can't use raw_spin_lock_irq because of #include problems, so
L
Linus Torvalds 已提交
34 35
 * this is the substitute */
#define _atomic_spin_lock_irqsave(l,f) do {	\
36
	arch_spinlock_t *s = ATOMIC_HASH(l);		\
L
Linus Torvalds 已提交
37
	local_irq_save(f);			\
38
	arch_spin_lock(s);			\
L
Linus Torvalds 已提交
39 40 41
} while(0)

#define _atomic_spin_unlock_irqrestore(l,f) do {	\
42
	arch_spinlock_t *s = ATOMIC_HASH(l);			\
43
	arch_spin_unlock(s);				\
L
Linus Torvalds 已提交
44 45 46 47 48 49 50 51 52
	local_irq_restore(f);				\
} while(0)


#else
#  define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)
#  define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
#endif

53 54 55
/*
 * Note that we need not lock read accesses - aligned word writes/reads
 * are atomic, so a reader never sees inconsistent values.
K
Kyle McMartin 已提交
56
 */
L
Linus Torvalds 已提交
57

58
static __inline__ void atomic_set(atomic_t *v, int i)
L
Linus Torvalds 已提交
59 60 61 62 63 64 65 66 67 68 69
{
	unsigned long flags;
	_atomic_spin_lock_irqsave(v, flags);

	v->counter = i;

	_atomic_spin_unlock_irqrestore(v, flags);
}

static __inline__ int atomic_read(const atomic_t *v)
{
70
	return ACCESS_ONCE((v)->counter);
L
Linus Torvalds 已提交
71 72 73
}

/* exported interface */
74
#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
75
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
L
Linus Torvalds 已提交
76

N
Nick Piggin 已提交
77
/**
78
 * __atomic_add_unless - add unless the number is a given value
N
Nick Piggin 已提交
79 80 81 82 83
 * @v: pointer of type atomic_t
 * @a: the amount to add to v...
 * @u: ...unless v is equal to u.
 *
 * Atomically adds @a to @v, so long as it was not @u.
84
 * Returns the old value of @v.
N
Nick Piggin 已提交
85
 */
86
static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
87 88 89 90 91 92 93 94 95 96 97
{
	int c, old;
	c = atomic_read(v);
	for (;;) {
		if (unlikely(c == (u)))
			break;
		old = atomic_cmpxchg((v), c, c + (a));
		if (likely(old == c))
			break;
		c = old;
	}
98
	return c;
99 100
}

101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128
#define ATOMIC_OP(op, c_op)						\
static __inline__ void atomic_##op(int i, atomic_t *v)			\
{									\
	unsigned long flags;						\
									\
	_atomic_spin_lock_irqsave(v, flags);				\
	v->counter c_op i;						\
	_atomic_spin_unlock_irqrestore(v, flags);			\
}									\

#define ATOMIC_OP_RETURN(op, c_op)					\
static __inline__ int atomic_##op##_return(int i, atomic_t *v)		\
{									\
	unsigned long flags;						\
	int ret;							\
									\
	_atomic_spin_lock_irqsave(v, flags);				\
	ret = (v->counter c_op i);					\
	_atomic_spin_unlock_irqrestore(v, flags);			\
									\
	return ret;							\
}

#define ATOMIC_OPS(op, c_op) ATOMIC_OP(op, c_op) ATOMIC_OP_RETURN(op, c_op)

ATOMIC_OPS(add, +=)
ATOMIC_OPS(sub, -=)

129 130 131 132
ATOMIC_OP(and, &=)
ATOMIC_OP(or, |=)
ATOMIC_OP(xor, ^=)

133 134 135
#undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
N
Nick Piggin 已提交
136

137 138
#define atomic_inc(v)	(atomic_add(   1,(v)))
#define atomic_dec(v)	(atomic_add(  -1,(v)))
L
Linus Torvalds 已提交
139

140 141
#define atomic_inc_return(v)	(atomic_add_return(   1,(v)))
#define atomic_dec_return(v)	(atomic_add_return(  -1,(v)))
L
Linus Torvalds 已提交
142 143 144 145 146 147 148 149 150 151 152 153 154 155 156

#define atomic_add_negative(a, v)	(atomic_add_return((a), (v)) < 0)

/*
 * atomic_inc_and_test - increment and test
 * @v: pointer of type atomic_t
 *
 * Atomically increments @v by 1
 * and returns true if the result is zero, or false for all
 * other cases.
 */
#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)

#define atomic_dec_and_test(v)	(atomic_dec_return(v) == 0)

K
Kyle McMartin 已提交
157 158
#define atomic_sub_and_test(i,v)	(atomic_sub_return((i),(v)) == 0)

159
#define ATOMIC_INIT(i)	{ (i) }
L
Linus Torvalds 已提交
160

161
#ifdef CONFIG_64BIT
K
Kyle McMartin 已提交
162

163
#define ATOMIC64_INIT(i) { (i) }
K
Kyle McMartin 已提交
164

165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186
#define ATOMIC64_OP(op, c_op)						\
static __inline__ void atomic64_##op(s64 i, atomic64_t *v)		\
{									\
	unsigned long flags;						\
									\
	_atomic_spin_lock_irqsave(v, flags);				\
	v->counter c_op i;						\
	_atomic_spin_unlock_irqrestore(v, flags);			\
}									\

#define ATOMIC64_OP_RETURN(op, c_op)					\
static __inline__ s64 atomic64_##op##_return(s64 i, atomic64_t *v)	\
{									\
	unsigned long flags;						\
	s64 ret;							\
									\
	_atomic_spin_lock_irqsave(v, flags);				\
	ret = (v->counter c_op i);					\
	_atomic_spin_unlock_irqrestore(v, flags);			\
									\
	return ret;							\
}
K
Kyle McMartin 已提交
187

188
#define ATOMIC64_OPS(op, c_op) ATOMIC64_OP(op, c_op) ATOMIC64_OP_RETURN(op, c_op)
K
Kyle McMartin 已提交
189

190 191
ATOMIC64_OPS(add, +=)
ATOMIC64_OPS(sub, -=)
192 193 194
ATOMIC64_OP(and, &=)
ATOMIC64_OP(or, |=)
ATOMIC64_OP(xor, ^=)
195 196 197 198

#undef ATOMIC64_OPS
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
K
Kyle McMartin 已提交
199 200 201 202 203 204 205 206 207 208 209 210 211 212 213

static __inline__ void
atomic64_set(atomic64_t *v, s64 i)
{
	unsigned long flags;
	_atomic_spin_lock_irqsave(v, flags);

	v->counter = i;

	_atomic_spin_unlock_irqrestore(v, flags);
}

static __inline__ s64
atomic64_read(const atomic64_t *v)
{
214
	return ACCESS_ONCE((v)->counter);
K
Kyle McMartin 已提交
215 216
}

217 218
#define atomic64_inc(v)		(atomic64_add(   1,(v)))
#define atomic64_dec(v)		(atomic64_add(  -1,(v)))
K
Kyle McMartin 已提交
219

220 221
#define atomic64_inc_return(v)		(atomic64_add_return(   1,(v)))
#define atomic64_dec_return(v)		(atomic64_add_return(  -1,(v)))
K
Kyle McMartin 已提交
222 223 224 225 226

#define atomic64_add_negative(a, v)	(atomic64_add_return((a), (v)) < 0)

#define atomic64_inc_and_test(v) 	(atomic64_inc_return(v) == 0)
#define atomic64_dec_and_test(v)	(atomic64_dec_return(v) == 0)
K
Kyle McMartin 已提交
227
#define atomic64_sub_and_test(i,v)	(atomic64_sub_return((i),(v)) == 0)
K
Kyle McMartin 已提交
228

229 230 231 232 233 234 235 236 237 238 239 240
/* exported interface */
#define atomic64_cmpxchg(v, o, n) \
	((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))

/**
 * atomic64_add_unless - add unless the number is a given value
 * @v: pointer of type atomic64_t
 * @a: the amount to add to v...
 * @u: ...unless v is equal to u.
 *
 * Atomically adds @a to @v, so long as it was not @u.
241
 * Returns the old value of @v.
242
 */
243 244 245 246 247 248 249 250 251 252 253 254 255 256 257
static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
{
	long c, old;
	c = atomic64_read(v);
	for (;;) {
		if (unlikely(c == (u)))
			break;
		old = atomic64_cmpxchg((v), c, c + (a));
		if (likely(old == c))
			break;
		c = old;
	}
	return c != (u);
}

258 259
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)

260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282
/*
 * atomic64_dec_if_positive - decrement by 1 if old value positive
 * @v: pointer of type atomic_t
 *
 * The function returns the old value of *v minus 1, even if
 * the atomic variable, v, was not decremented.
 */
static inline long atomic64_dec_if_positive(atomic64_t *v)
{
	long c, old, dec;
	c = atomic64_read(v);
	for (;;) {
		dec = c - 1;
		if (unlikely(dec < 0))
			break;
		old = atomic64_cmpxchg((v), c, dec);
		if (likely(old == c))
			break;
		c = old;
	}
	return dec;
}

283
#endif /* !CONFIG_64BIT */
K
Kyle McMartin 已提交
284 285 286


#endif /* _ASM_PARISC_ATOMIC_H_ */