atomic64_64.h 6.2 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
2 3 4 5 6 7 8 9 10 11 12 13
#ifndef _ASM_X86_ATOMIC64_64_H
#define _ASM_X86_ATOMIC64_64_H

#include <linux/types.h>
#include <asm/alternative.h>
#include <asm/cmpxchg.h>

/* The 64-bit atomic type */

#define ATOMIC64_INIT(i)	{ (i) }

/**
14
 * arch_atomic64_read - read atomic64 variable
15 16 17 18 19
 * @v: pointer of type atomic64_t
 *
 * Atomically reads the value of @v.
 * Doesn't imply a read memory barrier.
 */
20
static inline long arch_atomic64_read(const atomic64_t *v)
21
{
22
	return READ_ONCE((v)->counter);
23 24 25
}

/**
26
 * arch_atomic64_set - set atomic64 variable
27 28 29 30 31
 * @v: pointer to type atomic64_t
 * @i: required value
 *
 * Atomically sets the value of @v to @i.
 */
32
static inline void arch_atomic64_set(atomic64_t *v, long i)
33
{
34
	WRITE_ONCE(v->counter, i);
35 36 37
}

/**
38
 * arch_atomic64_add - add integer to atomic64 variable
39 40 41 42 43
 * @i: integer value to add
 * @v: pointer to type atomic64_t
 *
 * Atomically adds @i to @v.
 */
44
static __always_inline void arch_atomic64_add(long i, atomic64_t *v)
45 46 47 48 49 50 51
{
	asm volatile(LOCK_PREFIX "addq %1,%0"
		     : "=m" (v->counter)
		     : "er" (i), "m" (v->counter));
}

/**
52
 * arch_atomic64_sub - subtract the atomic64 variable
53 54 55 56 57
 * @i: integer value to subtract
 * @v: pointer to type atomic64_t
 *
 * Atomically subtracts @i from @v.
 */
58
static inline void arch_atomic64_sub(long i, atomic64_t *v)
59 60 61 62 63 64 65
{
	asm volatile(LOCK_PREFIX "subq %1,%0"
		     : "=m" (v->counter)
		     : "er" (i), "m" (v->counter));
}

/**
66
 * arch_atomic64_sub_and_test - subtract value from variable and test result
67 68 69 70 71 72 73
 * @i: integer value to subtract
 * @v: pointer to type atomic64_t
 *
 * Atomically subtracts @i from @v and returns
 * true if the result is zero, or false for all
 * other cases.
 */
74
#define arch_atomic64_sub_and_test arch_atomic64_sub_and_test
75
static inline bool arch_atomic64_sub_and_test(long i, atomic64_t *v)
76
{
77
	GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", e);
78 79 80
}

/**
81
 * arch_atomic64_inc - increment atomic64 variable
82 83 84 85
 * @v: pointer to type atomic64_t
 *
 * Atomically increments @v by 1.
 */
86
static __always_inline void arch_atomic64_inc(atomic64_t *v)
87 88 89 90 91 92 93
{
	asm volatile(LOCK_PREFIX "incq %0"
		     : "=m" (v->counter)
		     : "m" (v->counter));
}

/**
94
 * arch_atomic64_dec - decrement atomic64 variable
95 96 97 98
 * @v: pointer to type atomic64_t
 *
 * Atomically decrements @v by 1.
 */
99
static __always_inline void arch_atomic64_dec(atomic64_t *v)
100 101 102 103 104 105 106
{
	asm volatile(LOCK_PREFIX "decq %0"
		     : "=m" (v->counter)
		     : "m" (v->counter));
}

/**
107
 * arch_atomic64_dec_and_test - decrement and test
108 109 110 111 112 113
 * @v: pointer to type atomic64_t
 *
 * Atomically decrements @v by 1 and
 * returns true if the result is 0, or false for all other
 * cases.
 */
114
#define arch_atomic64_dec_and_test arch_atomic64_dec_and_test
115
static inline bool arch_atomic64_dec_and_test(atomic64_t *v)
116
{
117
	GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", e);
118 119 120
}

/**
121
 * arch_atomic64_inc_and_test - increment and test
122 123 124 125 126 127
 * @v: pointer to type atomic64_t
 *
 * Atomically increments @v by 1
 * and returns true if the result is zero, or false for all
 * other cases.
 */
128
#define arch_atomic64_inc_and_test arch_atomic64_inc_and_test
129
static inline bool arch_atomic64_inc_and_test(atomic64_t *v)
130
{
131
	GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", e);
132 133 134
}

/**
135
 * arch_atomic64_add_negative - add and test if negative
136 137 138 139 140 141 142
 * @i: integer value to add
 * @v: pointer to type atomic64_t
 *
 * Atomically adds @i to @v and returns true
 * if the result is negative, or false when
 * result is greater than or equal to zero.
 */
143
#define arch_atomic64_add_negative arch_atomic64_add_negative
144
static inline bool arch_atomic64_add_negative(long i, atomic64_t *v)
145
{
146
	GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", s);
147 148 149
}

/**
150
 * arch_atomic64_add_return - add and return
151 152 153 154 155
 * @i: integer value to add
 * @v: pointer to type atomic64_t
 *
 * Atomically adds @i to @v and returns @i + @v
 */
156
static __always_inline long arch_atomic64_add_return(long i, atomic64_t *v)
157
{
158
	return i + xadd(&v->counter, i);
159 160
}

161
static inline long arch_atomic64_sub_return(long i, atomic64_t *v)
162
{
163
	return arch_atomic64_add_return(-i, v);
164 165
}

166
static inline long arch_atomic64_fetch_add(long i, atomic64_t *v)
167 168 169 170
{
	return xadd(&v->counter, i);
}

171
static inline long arch_atomic64_fetch_sub(long i, atomic64_t *v)
172 173 174 175
{
	return xadd(&v->counter, -i);
}

176 177
#define arch_atomic64_inc_return(v)  (arch_atomic64_add_return(1, (v)))
#define arch_atomic64_dec_return(v)  (arch_atomic64_sub_return(1, (v)))
178

179
static inline long arch_atomic64_cmpxchg(atomic64_t *v, long old, long new)
180
{
181
	return arch_cmpxchg(&v->counter, old, new);
182 183
}

184 185
#define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg
static __always_inline bool arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, long new)
186 187 188 189
{
	return try_cmpxchg(&v->counter, old, new);
}

190
static inline long arch_atomic64_xchg(atomic64_t *v, long new)
191 192 193 194
{
	return xchg(&v->counter, new);
}

195
/*
196
 * arch_atomic64_dec_if_positive - decrement by 1 if old value positive
197 198 199 200 201
 * @v: pointer of type atomic_t
 *
 * The function returns the old value of *v minus 1, even if
 * the atomic variable, v, was not decremented.
 */
202
static inline long arch_atomic64_dec_if_positive(atomic64_t *v)
203
{
204
	s64 dec, c = arch_atomic64_read(v);
205
	do {
206 207 208
		dec = c - 1;
		if (unlikely(dec < 0))
			break;
209
	} while (!arch_atomic64_try_cmpxchg(v, &c, dec));
210 211 212
	return dec;
}

213
static inline void arch_atomic64_and(long i, atomic64_t *v)
214 215 216 217 218
{
	asm volatile(LOCK_PREFIX "andq %1,%0"
			: "+m" (v->counter)
			: "er" (i)
			: "memory");
219 220
}

221
static inline long arch_atomic64_fetch_and(long i, atomic64_t *v)
222
{
223
	s64 val = arch_atomic64_read(v);
224 225

	do {
226
	} while (!arch_atomic64_try_cmpxchg(v, &val, val & i));
227
	return val;
228 229
}

230
static inline void arch_atomic64_or(long i, atomic64_t *v)
231 232 233 234 235 236
{
	asm volatile(LOCK_PREFIX "orq %1,%0"
			: "+m" (v->counter)
			: "er" (i)
			: "memory");
}
237

238
static inline long arch_atomic64_fetch_or(long i, atomic64_t *v)
239
{
240
	s64 val = arch_atomic64_read(v);
241

242
	do {
243
	} while (!arch_atomic64_try_cmpxchg(v, &val, val | i));
244 245 246
	return val;
}

247
static inline void arch_atomic64_xor(long i, atomic64_t *v)
248 249 250 251 252 253 254
{
	asm volatile(LOCK_PREFIX "xorq %1,%0"
			: "+m" (v->counter)
			: "er" (i)
			: "memory");
}

255
static inline long arch_atomic64_fetch_xor(long i, atomic64_t *v)
256
{
257
	s64 val = arch_atomic64_read(v);
258 259

	do {
260
	} while (!arch_atomic64_try_cmpxchg(v, &val, val ^ i));
261 262
	return val;
}
263

264
#endif /* _ASM_X86_ATOMIC64_64_H */