atomic.h 12.9 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
2 3 4
#ifndef _ASM_POWERPC_ATOMIC_H_
#define _ASM_POWERPC_ATOMIC_H_

L
Linus Torvalds 已提交
5 6 7 8 9
/*
 * PowerPC atomic operations
 */

#ifdef __KERNEL__
10 11
#include <linux/types.h>
#include <asm/cmpxchg.h>
12
#include <asm/barrier.h>
L
Linus Torvalds 已提交
13

14
#define ATOMIC_INIT(i)		{ (i) }
L
Linus Torvalds 已提交
15

16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33
/*
 * Since *_return_relaxed and {cmp}xchg_relaxed are implemented with
 * a "bne-" instruction at the end, so an isync is enough as a acquire barrier
 * on the platform without lwsync.
 */
#define __atomic_op_acquire(op, args...)				\
({									\
	typeof(op##_relaxed(args)) __ret  = op##_relaxed(args);		\
	__asm__ __volatile__(PPC_ACQUIRE_BARRIER "" : : : "memory");	\
	__ret;								\
})

#define __atomic_op_release(op, args...)				\
({									\
	__asm__ __volatile__(PPC_RELEASE_BARRIER "" : : : "memory");	\
	op##_relaxed(args);						\
})

34 35 36 37 38 39 40 41 42 43 44 45 46
static __inline__ int atomic_read(const atomic_t *v)
{
	int t;

	__asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));

	return t;
}

static __inline__ void atomic_set(atomic_t *v, int i)
{
	__asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
}
L
Linus Torvalds 已提交
47

48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
#define ATOMIC_OP(op, asm_op)						\
static __inline__ void atomic_##op(int a, atomic_t *v)			\
{									\
	int t;								\
									\
	__asm__ __volatile__(						\
"1:	lwarx	%0,0,%3		# atomic_" #op "\n"			\
	#asm_op " %0,%2,%0\n"						\
	PPC405_ERR77(0,%3)						\
"	stwcx.	%0,0,%3 \n"						\
"	bne-	1b\n"							\
	: "=&r" (t), "+m" (v->counter)					\
	: "r" (a), "r" (&v->counter)					\
	: "cc");							\
}									\

64 65
#define ATOMIC_OP_RETURN_RELAXED(op, asm_op)				\
static inline int atomic_##op##_return_relaxed(int a, atomic_t *v)	\
66 67 68 69
{									\
	int t;								\
									\
	__asm__ __volatile__(						\
70 71 72 73
"1:	lwarx	%0,0,%3		# atomic_" #op "_return_relaxed\n"	\
	#asm_op " %0,%2,%0\n"						\
	PPC405_ERR77(0, %3)						\
"	stwcx.	%0,0,%3\n"						\
74
"	bne-	1b\n"							\
75
	: "=&r" (t), "+m" (v->counter)					\
76
	: "r" (a), "r" (&v->counter)					\
77
	: "cc");							\
78 79
									\
	return t;							\
L
Linus Torvalds 已提交
80 81
}

82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99
#define ATOMIC_FETCH_OP_RELAXED(op, asm_op)				\
static inline int atomic_fetch_##op##_relaxed(int a, atomic_t *v)	\
{									\
	int res, t;							\
									\
	__asm__ __volatile__(						\
"1:	lwarx	%0,0,%4		# atomic_fetch_" #op "_relaxed\n"	\
	#asm_op " %1,%3,%0\n"						\
	PPC405_ERR77(0, %4)						\
"	stwcx.	%1,0,%4\n"						\
"	bne-	1b\n"							\
	: "=&r" (res), "=&r" (t), "+m" (v->counter)			\
	: "r" (a), "r" (&v->counter)					\
	: "cc");							\
									\
	return res;							\
}

100 101
#define ATOMIC_OPS(op, asm_op)						\
	ATOMIC_OP(op, asm_op)						\
102 103
	ATOMIC_OP_RETURN_RELAXED(op, asm_op)				\
	ATOMIC_FETCH_OP_RELAXED(op, asm_op)
L
Linus Torvalds 已提交
104

105 106
ATOMIC_OPS(add, add)
ATOMIC_OPS(sub, subf)
L
Linus Torvalds 已提交
107

108 109 110
#define atomic_add_return_relaxed atomic_add_return_relaxed
#define atomic_sub_return_relaxed atomic_sub_return_relaxed

111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126
#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed

#undef ATOMIC_OPS
#define ATOMIC_OPS(op, asm_op)						\
	ATOMIC_OP(op, asm_op)						\
	ATOMIC_FETCH_OP_RELAXED(op, asm_op)

ATOMIC_OPS(and, and)
ATOMIC_OPS(or, or)
ATOMIC_OPS(xor, xor)

#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
#define atomic_fetch_or_relaxed  atomic_fetch_or_relaxed
#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed

127
#undef ATOMIC_OPS
128
#undef ATOMIC_FETCH_OP_RELAXED
129
#undef ATOMIC_OP_RETURN_RELAXED
130
#undef ATOMIC_OP
L
Linus Torvalds 已提交
131 132 133 134 135 136 137 138 139 140 141

static __inline__ void atomic_inc(atomic_t *v)
{
	int t;

	__asm__ __volatile__(
"1:	lwarx	%0,0,%2		# atomic_inc\n\
	addic	%0,%0,1\n"
	PPC405_ERR77(0,%2)
"	stwcx.	%0,0,%2 \n\
	bne-	1b"
142 143
	: "=&r" (t), "+m" (v->counter)
	: "r" (&v->counter)
144
	: "cc", "xer");
L
Linus Torvalds 已提交
145 146
}

147
static __inline__ int atomic_inc_return_relaxed(atomic_t *v)
L
Linus Torvalds 已提交
148 149 150 151
{
	int t;

	__asm__ __volatile__(
152 153 154 155 156 157
"1:	lwarx	%0,0,%2		# atomic_inc_return_relaxed\n"
"	addic	%0,%0,1\n"
	PPC405_ERR77(0, %2)
"	stwcx.	%0,0,%2\n"
"	bne-	1b"
	: "=&r" (t), "+m" (v->counter)
L
Linus Torvalds 已提交
158
	: "r" (&v->counter)
159
	: "cc", "xer");
L
Linus Torvalds 已提交
160 161 162 163 164 165 166 167 168 169 170 171 172 173

	return t;
}

static __inline__ void atomic_dec(atomic_t *v)
{
	int t;

	__asm__ __volatile__(
"1:	lwarx	%0,0,%2		# atomic_dec\n\
	addic	%0,%0,-1\n"
	PPC405_ERR77(0,%2)\
"	stwcx.	%0,0,%2\n\
	bne-	1b"
174 175
	: "=&r" (t), "+m" (v->counter)
	: "r" (&v->counter)
176
	: "cc", "xer");
L
Linus Torvalds 已提交
177 178
}

179
static __inline__ int atomic_dec_return_relaxed(atomic_t *v)
L
Linus Torvalds 已提交
180 181 182 183
{
	int t;

	__asm__ __volatile__(
184 185 186 187 188 189
"1:	lwarx	%0,0,%2		# atomic_dec_return_relaxed\n"
"	addic	%0,%0,-1\n"
	PPC405_ERR77(0, %2)
"	stwcx.	%0,0,%2\n"
"	bne-	1b"
	: "=&r" (t), "+m" (v->counter)
L
Linus Torvalds 已提交
190
	: "r" (&v->counter)
191
	: "cc", "xer");
L
Linus Torvalds 已提交
192 193 194 195

	return t;
}

196 197 198
#define atomic_inc_return_relaxed atomic_inc_return_relaxed
#define atomic_dec_return_relaxed atomic_dec_return_relaxed

199
#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
200 201 202 203 204
#define atomic_cmpxchg_relaxed(v, o, n) \
	cmpxchg_relaxed(&((v)->counter), (o), (n))
#define atomic_cmpxchg_acquire(v, o, n) \
	cmpxchg_acquire(&((v)->counter), (o), (n))

205
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
206
#define atomic_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
N
Nick Piggin 已提交
207

N
Nick Piggin 已提交
208
/**
209
 * atomic_fetch_add_unless - add unless the number is a given value
N
Nick Piggin 已提交
210 211 212 213 214
 * @v: pointer of type atomic_t
 * @a: the amount to add to v...
 * @u: ...unless v is equal to u.
 *
 * Atomically adds @a to @v, so long as it was not @u.
215
 * Returns the old value of @v.
N
Nick Piggin 已提交
216
 */
217
static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
218 219 220 221
{
	int t;

	__asm__ __volatile__ (
222
	PPC_ATOMIC_ENTRY_BARRIER
223
"1:	lwarx	%0,0,%1		# atomic_fetch_add_unless\n\
224
	cmpw	0,%0,%3 \n\
225
	beq	2f \n\
226 227 228 229
	add	%0,%2,%0 \n"
	PPC405_ERR77(0,%2)
"	stwcx.	%0,0,%1 \n\
	bne-	1b \n"
230
	PPC_ATOMIC_EXIT_BARRIER
231 232 233 234 235 236
"	subf	%0,%2,%0 \n\
2:"
	: "=&r" (t)
	: "r" (&v->counter), "r" (a), "r" (u)
	: "cc", "memory");

237
	return t;
238
}
239
#define atomic_fetch_add_unless atomic_fetch_add_unless
240

241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270
/**
 * atomic_inc_not_zero - increment unless the number is zero
 * @v: pointer of type atomic_t
 *
 * Atomically increments @v by 1, so long as @v is non-zero.
 * Returns non-zero if @v was non-zero, and zero otherwise.
 */
static __inline__ int atomic_inc_not_zero(atomic_t *v)
{
	int t1, t2;

	__asm__ __volatile__ (
	PPC_ATOMIC_ENTRY_BARRIER
"1:	lwarx	%0,0,%2		# atomic_inc_not_zero\n\
	cmpwi	0,%0,0\n\
	beq-	2f\n\
	addic	%1,%0,1\n"
	PPC405_ERR77(0,%2)
"	stwcx.	%1,0,%2\n\
	bne-	1b\n"
	PPC_ATOMIC_EXIT_BARRIER
	"\n\
2:"
	: "=&r" (t1), "=&r" (t2)
	: "r" (&v->counter)
	: "cc", "xer", "memory");

	return t1;
}
#define atomic_inc_not_zero(v) atomic_inc_not_zero((v))
N
Nick Piggin 已提交
271

L
Linus Torvalds 已提交
272 273
/*
 * Atomically test *v and decrement if it is greater than 0.
274 275
 * The function returns the old value of *v minus 1, even if
 * the atomic variable, v, was not decremented.
L
Linus Torvalds 已提交
276 277 278 279 280 281
 */
static __inline__ int atomic_dec_if_positive(atomic_t *v)
{
	int t;

	__asm__ __volatile__(
282
	PPC_ATOMIC_ENTRY_BARRIER
L
Linus Torvalds 已提交
283
"1:	lwarx	%0,0,%1		# atomic_dec_if_positive\n\
284 285
	cmpwi	%0,1\n\
	addi	%0,%0,-1\n\
L
Linus Torvalds 已提交
286 287 288 289
	blt-	2f\n"
	PPC405_ERR77(0,%1)
"	stwcx.	%0,0,%1\n\
	bne-	1b"
290
	PPC_ATOMIC_EXIT_BARRIER
L
Linus Torvalds 已提交
291
	"\n\
292
2:"	: "=&b" (t)
L
Linus Torvalds 已提交
293 294 295 296 297
	: "r" (&v->counter)
	: "cc", "memory");

	return t;
}
298
#define atomic_dec_if_positive atomic_dec_if_positive
L
Linus Torvalds 已提交
299

300 301 302 303
#ifdef __powerpc64__

#define ATOMIC64_INIT(i)	{ (i) }

304 305 306 307 308 309 310 311 312 313 314 315 316
static __inline__ long atomic64_read(const atomic64_t *v)
{
	long t;

	__asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));

	return t;
}

static __inline__ void atomic64_set(atomic64_t *v, long i)
{
	__asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
}
317

318 319 320 321 322 323 324 325 326 327 328 329 330
#define ATOMIC64_OP(op, asm_op)						\
static __inline__ void atomic64_##op(long a, atomic64_t *v)		\
{									\
	long t;								\
									\
	__asm__ __volatile__(						\
"1:	ldarx	%0,0,%3		# atomic64_" #op "\n"			\
	#asm_op " %0,%2,%0\n"						\
"	stdcx.	%0,0,%3 \n"						\
"	bne-	1b\n"							\
	: "=&r" (t), "+m" (v->counter)					\
	: "r" (a), "r" (&v->counter)					\
	: "cc");							\
331 332
}

333 334 335
#define ATOMIC64_OP_RETURN_RELAXED(op, asm_op)				\
static inline long							\
atomic64_##op##_return_relaxed(long a, atomic64_t *v)			\
336 337 338 339
{									\
	long t;								\
									\
	__asm__ __volatile__(						\
340 341 342
"1:	ldarx	%0,0,%3		# atomic64_" #op "_return_relaxed\n"	\
	#asm_op " %0,%2,%0\n"						\
"	stdcx.	%0,0,%3\n"						\
343
"	bne-	1b\n"							\
344
	: "=&r" (t), "+m" (v->counter)					\
345
	: "r" (a), "r" (&v->counter)					\
346
	: "cc");							\
347 348
									\
	return t;							\
349 350
}

351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368
#define ATOMIC64_FETCH_OP_RELAXED(op, asm_op)				\
static inline long							\
atomic64_fetch_##op##_relaxed(long a, atomic64_t *v)			\
{									\
	long res, t;							\
									\
	__asm__ __volatile__(						\
"1:	ldarx	%0,0,%4		# atomic64_fetch_" #op "_relaxed\n"	\
	#asm_op " %1,%3,%0\n"						\
"	stdcx.	%1,0,%4\n"						\
"	bne-	1b\n"							\
	: "=&r" (res), "=&r" (t), "+m" (v->counter)			\
	: "r" (a), "r" (&v->counter)					\
	: "cc");							\
									\
	return res;							\
}

369 370
#define ATOMIC64_OPS(op, asm_op)					\
	ATOMIC64_OP(op, asm_op)						\
371 372
	ATOMIC64_OP_RETURN_RELAXED(op, asm_op)				\
	ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
373

374 375
ATOMIC64_OPS(add, add)
ATOMIC64_OPS(sub, subf)
376

377 378 379
#define atomic64_add_return_relaxed atomic64_add_return_relaxed
#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed

380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395
#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed

#undef ATOMIC64_OPS
#define ATOMIC64_OPS(op, asm_op)					\
	ATOMIC64_OP(op, asm_op)						\
	ATOMIC64_FETCH_OP_RELAXED(op, asm_op)

ATOMIC64_OPS(and, and)
ATOMIC64_OPS(or, or)
ATOMIC64_OPS(xor, xor)

#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
#define atomic64_fetch_or_relaxed  atomic64_fetch_or_relaxed
#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed

396
#undef ATOPIC64_OPS
397
#undef ATOMIC64_FETCH_OP_RELAXED
398
#undef ATOMIC64_OP_RETURN_RELAXED
399
#undef ATOMIC64_OP
400 401 402 403 404 405 406 407 408 409

static __inline__ void atomic64_inc(atomic64_t *v)
{
	long t;

	__asm__ __volatile__(
"1:	ldarx	%0,0,%2		# atomic64_inc\n\
	addic	%0,%0,1\n\
	stdcx.	%0,0,%2 \n\
	bne-	1b"
410 411
	: "=&r" (t), "+m" (v->counter)
	: "r" (&v->counter)
412
	: "cc", "xer");
413 414
}

415
static __inline__ long atomic64_inc_return_relaxed(atomic64_t *v)
416 417 418 419
{
	long t;

	__asm__ __volatile__(
420 421 422 423 424
"1:	ldarx	%0,0,%2		# atomic64_inc_return_relaxed\n"
"	addic	%0,%0,1\n"
"	stdcx.	%0,0,%2\n"
"	bne-	1b"
	: "=&r" (t), "+m" (v->counter)
425
	: "r" (&v->counter)
426
	: "cc", "xer");
427 428 429 430 431 432 433 434 435 436 437 438 439

	return t;
}

static __inline__ void atomic64_dec(atomic64_t *v)
{
	long t;

	__asm__ __volatile__(
"1:	ldarx	%0,0,%2		# atomic64_dec\n\
	addic	%0,%0,-1\n\
	stdcx.	%0,0,%2\n\
	bne-	1b"
440 441
	: "=&r" (t), "+m" (v->counter)
	: "r" (&v->counter)
442
	: "cc", "xer");
443 444
}

445
static __inline__ long atomic64_dec_return_relaxed(atomic64_t *v)
446 447 448 449
{
	long t;

	__asm__ __volatile__(
450 451 452 453 454
"1:	ldarx	%0,0,%2		# atomic64_dec_return_relaxed\n"
"	addic	%0,%0,-1\n"
"	stdcx.	%0,0,%2\n"
"	bne-	1b"
	: "=&r" (t), "+m" (v->counter)
455
	: "r" (&v->counter)
456
	: "cc", "xer");
457 458 459 460

	return t;
}

461 462 463
#define atomic64_inc_return_relaxed atomic64_inc_return_relaxed
#define atomic64_dec_return_relaxed atomic64_dec_return_relaxed

464 465 466 467 468 469 470 471 472
/*
 * Atomically test *v and decrement if it is greater than 0.
 * The function returns the old value of *v minus 1.
 */
static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
{
	long t;

	__asm__ __volatile__(
473
	PPC_ATOMIC_ENTRY_BARRIER
474 475 476 477 478
"1:	ldarx	%0,0,%1		# atomic64_dec_if_positive\n\
	addic.	%0,%0,-1\n\
	blt-	2f\n\
	stdcx.	%0,0,%1\n\
	bne-	1b"
479
	PPC_ATOMIC_EXIT_BARRIER
480 481 482
	"\n\
2:"	: "=&r" (t)
	: "r" (&v->counter)
483
	: "cc", "xer", "memory");
484 485 486 487

	return t;
}

488
#define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
489 490 491 492 493
#define atomic64_cmpxchg_relaxed(v, o, n) \
	cmpxchg_relaxed(&((v)->counter), (o), (n))
#define atomic64_cmpxchg_acquire(v, o, n) \
	cmpxchg_acquire(&((v)->counter), (o), (n))

494
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
495
#define atomic64_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
496 497

/**
498
 * atomic64_fetch_add_unless - add unless the number is a given value
499 500 501 502 503
 * @v: pointer of type atomic64_t
 * @a: the amount to add to v...
 * @u: ...unless v is equal to u.
 *
 * Atomically adds @a to @v, so long as it was not @u.
504
 * Returns the old value of @v.
505
 */
506
static __inline__ long atomic64_fetch_add_unless(atomic64_t *v, long a, long u)
507 508 509 510
{
	long t;

	__asm__ __volatile__ (
511
	PPC_ATOMIC_ENTRY_BARRIER
512
"1:	ldarx	%0,0,%1		# atomic64_fetch_add_unless\n\
513
	cmpd	0,%0,%3 \n\
514
	beq	2f \n\
515 516 517
	add	%0,%2,%0 \n"
"	stdcx.	%0,0,%1 \n\
	bne-	1b \n"
518
	PPC_ATOMIC_EXIT_BARRIER
519 520 521 522 523 524
"	subf	%0,%2,%0 \n\
2:"
	: "=&r" (t)
	: "r" (&v->counter), "r" (a), "r" (u)
	: "cc", "memory");

525
	return t;
526
}
527
#define atomic64_fetch_add_unless atomic64_fetch_add_unless
528

529 530 531 532 533 534 535
/**
 * atomic_inc64_not_zero - increment unless the number is zero
 * @v: pointer of type atomic64_t
 *
 * Atomically increments @v by 1, so long as @v is non-zero.
 * Returns non-zero if @v was non-zero, and zero otherwise.
 */
536
static __inline__ int atomic64_inc_not_zero(atomic64_t *v)
537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554
{
	long t1, t2;

	__asm__ __volatile__ (
	PPC_ATOMIC_ENTRY_BARRIER
"1:	ldarx	%0,0,%2		# atomic64_inc_not_zero\n\
	cmpdi	0,%0,0\n\
	beq-	2f\n\
	addic	%1,%0,1\n\
	stdcx.	%1,0,%2\n\
	bne-	1b\n"
	PPC_ATOMIC_EXIT_BARRIER
	"\n\
2:"
	: "=&r" (t1), "=&r" (t2)
	: "r" (&v->counter)
	: "cc", "xer", "memory");

555
	return t1 != 0;
556
}
557
#define atomic64_inc_not_zero(v) atomic64_inc_not_zero((v))
558

559 560
#endif /* __powerpc64__ */

L
Linus Torvalds 已提交
561
#endif /* __KERNEL__ */
562
#endif /* _ASM_POWERPC_ATOMIC_H_ */