atomic.h 6.4 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
K
Kyle McMartin 已提交
2 3 4 5
/* Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
 * Copyright (C) 2006 Kyle McMartin <kyle@parisc-linux.org>
 */

L
Linus Torvalds 已提交
6 7 8
#ifndef _ASM_PARISC_ATOMIC_H_
#define _ASM_PARISC_ATOMIC_H_

K
Kyle McMartin 已提交
9
#include <linux/types.h>
10
#include <asm/cmpxchg.h>
11
#include <asm/barrier.h>
L
Linus Torvalds 已提交
12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29

/*
 * Atomic operations that C can't guarantee us.  Useful for
 * resource counting etc..
 *
 * And probably incredibly slow on parisc.  OTOH, we don't
 * have to write any serious assembly.   prumpf
 */

#ifdef CONFIG_SMP
#include <asm/spinlock.h>
#include <asm/cache.h>		/* we use L1_CACHE_BYTES */

/* Use an array of spinlocks for our atomic_ts.
 * Hash function to index into a different SPINLOCK.
 * Since "a" is usually an address, use one spinlock per cacheline.
 */
#  define ATOMIC_HASH_SIZE 4
30
#  define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) (a))/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
L
Linus Torvalds 已提交
31

32
extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
L
Linus Torvalds 已提交
33

I
Ingo Molnar 已提交
34
/* Can't use raw_spin_lock_irq because of #include problems, so
L
Linus Torvalds 已提交
35 36
 * this is the substitute */
#define _atomic_spin_lock_irqsave(l,f) do {	\
37
	arch_spinlock_t *s = ATOMIC_HASH(l);		\
L
Linus Torvalds 已提交
38
	local_irq_save(f);			\
39
	arch_spin_lock(s);			\
L
Linus Torvalds 已提交
40 41 42
} while(0)

#define _atomic_spin_unlock_irqrestore(l,f) do {	\
43
	arch_spinlock_t *s = ATOMIC_HASH(l);			\
44
	arch_spin_unlock(s);				\
L
Linus Torvalds 已提交
45 46 47 48 49 50 51 52 53
	local_irq_restore(f);				\
} while(0)


#else
#  define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)
#  define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
#endif

54 55 56
/*
 * Note that we need not lock read accesses - aligned word writes/reads
 * are atomic, so a reader never sees inconsistent values.
K
Kyle McMartin 已提交
57
 */
L
Linus Torvalds 已提交
58

59
static __inline__ void atomic_set(atomic_t *v, int i)
L
Linus Torvalds 已提交
60 61 62 63 64 65 66 67 68
{
	unsigned long flags;
	_atomic_spin_lock_irqsave(v, flags);

	v->counter = i;

	_atomic_spin_unlock_irqrestore(v, flags);
}

69 70
#define atomic_set_release(v, i)	atomic_set((v), (i))

L
Linus Torvalds 已提交
71 72
static __inline__ int atomic_read(const atomic_t *v)
{
73
	return READ_ONCE((v)->counter);
L
Linus Torvalds 已提交
74 75 76
}

/* exported interface */
77
#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
78
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
L
Linus Torvalds 已提交
79

80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102
#define ATOMIC_OP(op, c_op)						\
static __inline__ void atomic_##op(int i, atomic_t *v)			\
{									\
	unsigned long flags;						\
									\
	_atomic_spin_lock_irqsave(v, flags);				\
	v->counter c_op i;						\
	_atomic_spin_unlock_irqrestore(v, flags);			\
}									\

#define ATOMIC_OP_RETURN(op, c_op)					\
static __inline__ int atomic_##op##_return(int i, atomic_t *v)		\
{									\
	unsigned long flags;						\
	int ret;							\
									\
	_atomic_spin_lock_irqsave(v, flags);				\
	ret = (v->counter c_op i);					\
	_atomic_spin_unlock_irqrestore(v, flags);			\
									\
	return ret;							\
}

103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120
#define ATOMIC_FETCH_OP(op, c_op)					\
static __inline__ int atomic_fetch_##op(int i, atomic_t *v)		\
{									\
	unsigned long flags;						\
	int ret;							\
									\
	_atomic_spin_lock_irqsave(v, flags);				\
	ret = v->counter;						\
	v->counter c_op i;						\
	_atomic_spin_unlock_irqrestore(v, flags);			\
									\
	return ret;							\
}

#define ATOMIC_OPS(op, c_op)						\
	ATOMIC_OP(op, c_op)						\
	ATOMIC_OP_RETURN(op, c_op)					\
	ATOMIC_FETCH_OP(op, c_op)
121 122 123 124

ATOMIC_OPS(add, +=)
ATOMIC_OPS(sub, -=)

125 126 127 128 129 130 131 132
#undef ATOMIC_OPS
#define ATOMIC_OPS(op, c_op)						\
	ATOMIC_OP(op, c_op)						\
	ATOMIC_FETCH_OP(op, c_op)

ATOMIC_OPS(and, &=)
ATOMIC_OPS(or, |=)
ATOMIC_OPS(xor, ^=)
133

134
#undef ATOMIC_OPS
135
#undef ATOMIC_FETCH_OP
136 137
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
N
Nick Piggin 已提交
138

139 140
#define atomic_inc(v)	(atomic_add(   1,(v)))
#define atomic_dec(v)	(atomic_add(  -1,(v)))
L
Linus Torvalds 已提交
141

142 143
#define atomic_inc_return(v)	(atomic_add_return(   1,(v)))
#define atomic_dec_return(v)	(atomic_add_return(  -1,(v)))
L
Linus Torvalds 已提交
144

145
#define ATOMIC_INIT(i)	{ (i) }
L
Linus Torvalds 已提交
146

147
#ifdef CONFIG_64BIT
K
Kyle McMartin 已提交
148

149
#define ATOMIC64_INIT(i) { (i) }
K
Kyle McMartin 已提交
150

151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172
#define ATOMIC64_OP(op, c_op)						\
static __inline__ void atomic64_##op(s64 i, atomic64_t *v)		\
{									\
	unsigned long flags;						\
									\
	_atomic_spin_lock_irqsave(v, flags);				\
	v->counter c_op i;						\
	_atomic_spin_unlock_irqrestore(v, flags);			\
}									\

#define ATOMIC64_OP_RETURN(op, c_op)					\
static __inline__ s64 atomic64_##op##_return(s64 i, atomic64_t *v)	\
{									\
	unsigned long flags;						\
	s64 ret;							\
									\
	_atomic_spin_lock_irqsave(v, flags);				\
	ret = (v->counter c_op i);					\
	_atomic_spin_unlock_irqrestore(v, flags);			\
									\
	return ret;							\
}
K
Kyle McMartin 已提交
173

174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191
#define ATOMIC64_FETCH_OP(op, c_op)					\
static __inline__ s64 atomic64_fetch_##op(s64 i, atomic64_t *v)		\
{									\
	unsigned long flags;						\
	s64 ret;							\
									\
	_atomic_spin_lock_irqsave(v, flags);				\
	ret = v->counter;						\
	v->counter c_op i;						\
	_atomic_spin_unlock_irqrestore(v, flags);			\
									\
	return ret;							\
}

#define ATOMIC64_OPS(op, c_op)						\
	ATOMIC64_OP(op, c_op)						\
	ATOMIC64_OP_RETURN(op, c_op)					\
	ATOMIC64_FETCH_OP(op, c_op)
K
Kyle McMartin 已提交
192

193 194 195 196
ATOMIC64_OPS(add, +=)
ATOMIC64_OPS(sub, -=)

#undef ATOMIC64_OPS
197 198 199 200 201 202 203 204 205 206
#define ATOMIC64_OPS(op, c_op)						\
	ATOMIC64_OP(op, c_op)						\
	ATOMIC64_FETCH_OP(op, c_op)

ATOMIC64_OPS(and, &=)
ATOMIC64_OPS(or, |=)
ATOMIC64_OPS(xor, ^=)

#undef ATOMIC64_OPS
#undef ATOMIC64_FETCH_OP
207 208
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
K
Kyle McMartin 已提交
209 210 211 212 213 214 215 216 217 218 219 220 221 222 223

static __inline__ void
atomic64_set(atomic64_t *v, s64 i)
{
	unsigned long flags;
	_atomic_spin_lock_irqsave(v, flags);

	v->counter = i;

	_atomic_spin_unlock_irqrestore(v, flags);
}

static __inline__ s64
atomic64_read(const atomic64_t *v)
{
224
	return READ_ONCE((v)->counter);
K
Kyle McMartin 已提交
225 226
}

227 228
#define atomic64_inc(v)		(atomic64_add(   1,(v)))
#define atomic64_dec(v)		(atomic64_add(  -1,(v)))
K
Kyle McMartin 已提交
229

230 231
#define atomic64_inc_return(v)		(atomic64_add_return(   1,(v)))
#define atomic64_dec_return(v)		(atomic64_add_return(  -1,(v)))
K
Kyle McMartin 已提交
232

233 234 235 236 237
/* exported interface */
#define atomic64_cmpxchg(v, o, n) \
	((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))

238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260
/*
 * atomic64_dec_if_positive - decrement by 1 if old value positive
 * @v: pointer of type atomic_t
 *
 * The function returns the old value of *v minus 1, even if
 * the atomic variable, v, was not decremented.
 */
static inline long atomic64_dec_if_positive(atomic64_t *v)
{
	long c, old, dec;
	c = atomic64_read(v);
	for (;;) {
		dec = c - 1;
		if (unlikely(dec < 0))
			break;
		old = atomic64_cmpxchg((v), c, dec);
		if (likely(old == c))
			break;
		c = old;
	}
	return dec;
}

261
#endif /* !CONFIG_64BIT */
K
Kyle McMartin 已提交
262 263 264


#endif /* _ASM_PARISC_ATOMIC_H_ */