atomic64.c 4.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
/*
 * Generic implementation of 64-bit atomics using spinlocks,
 * useful on processors that don't have 64-bit atomic instructions.
 *
 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version
 * 2 of the License, or (at your option) any later version.
 */
#include <linux/types.h>
#include <linux/cache.h>
#include <linux/spinlock.h>
#include <linux/init.h>
16
#include <linux/export.h>
A
Arun Sharma 已提交
17
#include <linux/atomic.h>
18 19 20 21 22 23 24 25 26 27 28 29 30 31

/*
 * We use a hashed array of spinlocks to provide exclusive access
 * to each atomic64_t variable.  Since this is expected to used on
 * systems with small numbers of CPUs (<= 4 or so), we use a
 * relatively small array of 16 spinlocks to avoid wasting too much
 * memory on the spinlock array.
 */
#define NR_LOCKS	16

/*
 * Ensure each lock is in a separate cacheline.
 */
static union {
32
	raw_spinlock_t lock;
33
	char pad[L1_CACHE_BYTES];
34 35 36 37 38
} atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp = {
	[0 ... (NR_LOCKS - 1)] = {
		.lock =  __RAW_SPIN_LOCK_UNLOCKED(atomic64_lock.lock),
	},
};
39

40
static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
41 42 43 44 45 46 47 48 49 50 51
{
	unsigned long addr = (unsigned long) v;

	addr >>= L1_CACHE_SHIFT;
	addr ^= (addr >> 8) ^ (addr >> 16);
	return &atomic64_lock[addr & (NR_LOCKS - 1)].lock;
}

long long atomic64_read(const atomic64_t *v)
{
	unsigned long flags;
52
	raw_spinlock_t *lock = lock_addr(v);
53 54
	long long val;

55
	raw_spin_lock_irqsave(lock, flags);
56
	val = v->counter;
57
	raw_spin_unlock_irqrestore(lock, flags);
58 59
	return val;
}
60
EXPORT_SYMBOL(atomic64_read);
61 62 63 64

void atomic64_set(atomic64_t *v, long long i)
{
	unsigned long flags;
65
	raw_spinlock_t *lock = lock_addr(v);
66

67
	raw_spin_lock_irqsave(lock, flags);
68
	v->counter = i;
69
	raw_spin_unlock_irqrestore(lock, flags);
70
}
71
EXPORT_SYMBOL(atomic64_set);
72

73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98
#define ATOMIC64_OP(op, c_op)						\
void atomic64_##op(long long a, atomic64_t *v)				\
{									\
	unsigned long flags;						\
	raw_spinlock_t *lock = lock_addr(v);				\
									\
	raw_spin_lock_irqsave(lock, flags);				\
	v->counter c_op a;						\
	raw_spin_unlock_irqrestore(lock, flags);			\
}									\
EXPORT_SYMBOL(atomic64_##op);

#define ATOMIC64_OP_RETURN(op, c_op)					\
long long atomic64_##op##_return(long long a, atomic64_t *v)		\
{									\
	unsigned long flags;						\
	raw_spinlock_t *lock = lock_addr(v);				\
	long long val;							\
									\
	raw_spin_lock_irqsave(lock, flags);				\
	val = (v->counter c_op a);					\
	raw_spin_unlock_irqrestore(lock, flags);			\
	return val;							\
}									\
EXPORT_SYMBOL(atomic64_##op##_return);

P
Peter Zijlstra 已提交
99 100 101 102 103 104 105 106 107 108 109 110 111 112 113
#define ATOMIC64_FETCH_OP(op, c_op)					\
long long atomic64_fetch_##op(long long a, atomic64_t *v)		\
{									\
	unsigned long flags;						\
	raw_spinlock_t *lock = lock_addr(v);				\
	long long val;							\
									\
	raw_spin_lock_irqsave(lock, flags);				\
	val = v->counter;						\
	v->counter c_op a;						\
	raw_spin_unlock_irqrestore(lock, flags);			\
	return val;							\
}									\
EXPORT_SYMBOL(atomic64_fetch_##op);

114 115
#define ATOMIC64_OPS(op, c_op)						\
	ATOMIC64_OP(op, c_op)						\
P
Peter Zijlstra 已提交
116 117
	ATOMIC64_OP_RETURN(op, c_op)					\
	ATOMIC64_FETCH_OP(op, c_op)
118 119 120 121 122

ATOMIC64_OPS(add, +=)
ATOMIC64_OPS(sub, -=)

#undef ATOMIC64_OPS
P
Peter Zijlstra 已提交
123 124 125 126 127 128 129 130 131 132 133
#define ATOMIC64_OPS(op, c_op)						\
	ATOMIC64_OP(op, c_op)						\
	ATOMIC64_OP_RETURN(op, c_op)					\
	ATOMIC64_FETCH_OP(op, c_op)

ATOMIC64_OPS(and, &=)
ATOMIC64_OPS(or, |=)
ATOMIC64_OPS(xor, ^=)

#undef ATOMIC64_OPS
#undef ATOMIC64_FETCH_OP
134 135
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
136 137 138 139

long long atomic64_dec_if_positive(atomic64_t *v)
{
	unsigned long flags;
140
	raw_spinlock_t *lock = lock_addr(v);
141 142
	long long val;

143
	raw_spin_lock_irqsave(lock, flags);
144 145 146
	val = v->counter - 1;
	if (val >= 0)
		v->counter = val;
147
	raw_spin_unlock_irqrestore(lock, flags);
148 149
	return val;
}
150
EXPORT_SYMBOL(atomic64_dec_if_positive);
151 152 153 154

long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n)
{
	unsigned long flags;
155
	raw_spinlock_t *lock = lock_addr(v);
156 157
	long long val;

158
	raw_spin_lock_irqsave(lock, flags);
159 160 161
	val = v->counter;
	if (val == o)
		v->counter = n;
162
	raw_spin_unlock_irqrestore(lock, flags);
163 164
	return val;
}
165
EXPORT_SYMBOL(atomic64_cmpxchg);
166 167 168 169

long long atomic64_xchg(atomic64_t *v, long long new)
{
	unsigned long flags;
170
	raw_spinlock_t *lock = lock_addr(v);
171 172
	long long val;

173
	raw_spin_lock_irqsave(lock, flags);
174 175
	val = v->counter;
	v->counter = new;
176
	raw_spin_unlock_irqrestore(lock, flags);
177 178
	return val;
}
179
EXPORT_SYMBOL(atomic64_xchg);
180 181 182 183

int atomic64_add_unless(atomic64_t *v, long long a, long long u)
{
	unsigned long flags;
184
	raw_spinlock_t *lock = lock_addr(v);
185
	int ret = 0;
186

187
	raw_spin_lock_irqsave(lock, flags);
188 189
	if (v->counter != u) {
		v->counter += a;
190
		ret = 1;
191
	}
192
	raw_spin_unlock_irqrestore(lock, flags);
193 194
	return ret;
}
195
EXPORT_SYMBOL(atomic64_add_unless);