atomic_64.h 5.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
/*
 * Copyright 2011 Tilera Corporation. All Rights Reserved.
 *
 *   This program is free software; you can redistribute it and/or
 *   modify it under the terms of the GNU General Public License
 *   as published by the Free Software Foundation, version 2.
 *
 *   This program is distributed in the hope that it will be useful, but
 *   WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
 *   NON INFRINGEMENT.  See the GNU General Public License for
 *   more details.
 *
A
Arun Sharma 已提交
14
 * Do not include directly; use <linux/atomic.h>.
15 16 17 18 19 20 21
 */

#ifndef _ASM_TILE_ATOMIC_64_H
#define _ASM_TILE_ATOMIC_64_H

#ifndef __ASSEMBLY__

22
#include <asm/barrier.h>
23 24 25 26
#include <arch/spr_def.h>

/* First, the 32-bit atomic ops that are "real" on our 64-bit platform. */

27
#define atomic_set(v, i) WRITE_ONCE((v)->counter, (i))
28 29 30 31 32 33 34

/*
 * The smp_mb() operations throughout are to support the fact that
 * Linux requires memory barriers before and after the operation,
 * on any routine which updates memory and returns a value.
 */

35 36 37 38 39 40 41 42 43 44 45 46 47
/*
 * Note a subtlety of the locking here.  We are required to provide a
 * full memory barrier before and after the operation.  However, we
 * only provide an explicit mb before the operation.  After the
 * operation, we use barrier() to get a full mb for free, because:
 *
 * (1) The barrier directive to the compiler prohibits any instructions
 * being statically hoisted before the barrier;
 * (2) the microarchitecture will not issue any further instructions
 * until the fetchadd result is available for the "+ i" add instruction;
 * (3) the smb_mb before the fetchadd ensures that no other memory
 * operations are in flight at this point.
 */
48 49 50 51 52
static inline int atomic_add_return(int i, atomic_t *v)
{
	int val;
	smp_mb();  /* barrier for proper semantics */
	val = __insn_fetchadd4((void *)&v->counter, i) + i;
53
	barrier();  /* equivalent to smp_mb(); see block comment above */
54 55 56
	return val;
}

57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77
#define ATOMIC_OPS(op)							\
static inline int atomic_fetch_##op(int i, atomic_t *v)			\
{									\
	int val;							\
	smp_mb();							\
	val = __insn_fetch##op##4((void *)&v->counter, i);		\
	smp_mb();							\
	return val;							\
}									\
static inline void atomic_##op(int i, atomic_t *v)			\
{									\
	__insn_fetch##op##4((void *)&v->counter, i);			\
}

ATOMIC_OPS(add)
ATOMIC_OPS(and)
ATOMIC_OPS(or)

#undef ATOMIC_OPS

static inline int atomic_fetch_xor(int i, atomic_t *v)
78 79
{
	int guess, oldval = v->counter;
80
	smp_mb();
81 82
	do {
		guess = oldval;
83 84
		__insn_mtspr(SPR_CMPEXCH_VALUE, guess);
		oldval = __insn_cmpexch4(&v->counter, guess ^ i);
85
	} while (guess != oldval);
86
	smp_mb();
87
	return oldval;
88 89
}

90 91 92 93 94 95 96 97 98 99
static inline void atomic_xor(int i, atomic_t *v)
{
	int guess, oldval = v->counter;
	do {
		guess = oldval;
		__insn_mtspr(SPR_CMPEXCH_VALUE, guess);
		oldval = __insn_cmpexch4(&v->counter, guess ^ i);
	} while (guess != oldval);
}

100 101 102 103 104 105 106 107 108 109 110 111
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
{
	int guess, oldval = v->counter;
	do {
		if (oldval == u)
			break;
		guess = oldval;
		oldval = cmpxchg(&v->counter, guess, guess + a);
	} while (guess != oldval);
	return oldval;
}

112 113 114 115
/* Now the true 64-bit operations. */

#define ATOMIC64_INIT(i)	{ (i) }

116 117
#define atomic64_read(v)	READ_ONCE((v)->counter)
#define atomic64_set(v, i)	WRITE_ONCE((v)->counter, (i))
118 119 120 121 122 123

static inline long atomic64_add_return(long i, atomic64_t *v)
{
	int val;
	smp_mb();  /* barrier for proper semantics */
	val = __insn_fetchadd((void *)&v->counter, i) + i;
124
	barrier();  /* equivalent to smp_mb; see atomic_add_return() */
125 126 127
	return val;
}

128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148
#define ATOMIC64_OPS(op)						\
static inline long atomic64_fetch_##op(long i, atomic64_t *v)		\
{									\
	long val;							\
	smp_mb();							\
	val = __insn_fetch##op((void *)&v->counter, i);			\
	smp_mb();							\
	return val;							\
}									\
static inline void atomic64_##op(long i, atomic64_t *v)			\
{									\
	__insn_fetch##op((void *)&v->counter, i);			\
}

ATOMIC64_OPS(add)
ATOMIC64_OPS(and)
ATOMIC64_OPS(or)

#undef ATOMIC64_OPS

static inline long atomic64_fetch_xor(long i, atomic64_t *v)
149 150
{
	long guess, oldval = v->counter;
151
	smp_mb();
152 153
	do {
		guess = oldval;
154 155
		__insn_mtspr(SPR_CMPEXCH_VALUE, guess);
		oldval = __insn_cmpexch(&v->counter, guess ^ i);
156
	} while (guess != oldval);
157 158
	smp_mb();
	return oldval;
159 160 161 162 163 164 165 166 167 168 169 170
}

static inline void atomic64_xor(long i, atomic64_t *v)
{
	long guess, oldval = v->counter;
	do {
		guess = oldval;
		__insn_mtspr(SPR_CMPEXCH_VALUE, guess);
		oldval = __insn_cmpexch(&v->counter, guess ^ i);
	} while (guess != oldval);
}

171 172 173 174 175 176 177 178 179 180 181 182
static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
{
	long guess, oldval = v->counter;
	do {
		if (oldval == u)
			break;
		guess = oldval;
		oldval = cmpxchg(&v->counter, guess, guess + a);
	} while (guess != oldval);
	return oldval != u;
}

183
#define atomic64_sub_return(i, v)	atomic64_add_return(-(i), (v))
184
#define atomic64_fetch_sub(i, v)	atomic64_fetch_add(-(i), (v))
185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200
#define atomic64_sub(i, v)		atomic64_add(-(i), (v))
#define atomic64_inc_return(v)		atomic64_add_return(1, (v))
#define atomic64_dec_return(v)		atomic64_sub_return(1, (v))
#define atomic64_inc(v)			atomic64_add(1, (v))
#define atomic64_dec(v)			atomic64_sub(1, (v))

#define atomic64_inc_and_test(v)	(atomic64_inc_return(v) == 0)
#define atomic64_dec_and_test(v)	(atomic64_dec_return(v) == 0)
#define atomic64_sub_and_test(i, v)	(atomic64_sub_return((i), (v)) == 0)
#define atomic64_add_negative(i, v)	(atomic64_add_return((i), (v)) < 0)

#define atomic64_inc_not_zero(v)	atomic64_add_unless((v), 1, 0)

#endif /* !__ASSEMBLY__ */

#endif /* _ASM_TILE_ATOMIC_64_H */