atomic.h 13.7 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
2 3 4
#ifndef _ASM_POWERPC_ATOMIC_H_
#define _ASM_POWERPC_ATOMIC_H_

L
Linus Torvalds 已提交
5 6 7 8 9
/*
 * PowerPC atomic operations
 */

#ifdef __KERNEL__
10 11
#include <linux/types.h>
#include <asm/cmpxchg.h>
12
#include <asm/barrier.h>
L
Linus Torvalds 已提交
13

14
#define ATOMIC_INIT(i)		{ (i) }
L
Linus Torvalds 已提交
15

16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33
/*
 * Since *_return_relaxed and {cmp}xchg_relaxed are implemented with
 * a "bne-" instruction at the end, so an isync is enough as a acquire barrier
 * on the platform without lwsync.
 */
#define __atomic_op_acquire(op, args...)				\
({									\
	typeof(op##_relaxed(args)) __ret  = op##_relaxed(args);		\
	__asm__ __volatile__(PPC_ACQUIRE_BARRIER "" : : : "memory");	\
	__ret;								\
})

#define __atomic_op_release(op, args...)				\
({									\
	__asm__ __volatile__(PPC_RELEASE_BARRIER "" : : : "memory");	\
	op##_relaxed(args);						\
})

34 35 36 37 38 39 40 41 42 43 44 45 46
static __inline__ int atomic_read(const atomic_t *v)
{
	int t;

	__asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));

	return t;
}

static __inline__ void atomic_set(atomic_t *v, int i)
{
	__asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
}
L
Linus Torvalds 已提交
47

48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
#define ATOMIC_OP(op, asm_op)						\
static __inline__ void atomic_##op(int a, atomic_t *v)			\
{									\
	int t;								\
									\
	__asm__ __volatile__(						\
"1:	lwarx	%0,0,%3		# atomic_" #op "\n"			\
	#asm_op " %0,%2,%0\n"						\
	PPC405_ERR77(0,%3)						\
"	stwcx.	%0,0,%3 \n"						\
"	bne-	1b\n"							\
	: "=&r" (t), "+m" (v->counter)					\
	: "r" (a), "r" (&v->counter)					\
	: "cc");							\
}									\

64 65
#define ATOMIC_OP_RETURN_RELAXED(op, asm_op)				\
static inline int atomic_##op##_return_relaxed(int a, atomic_t *v)	\
66 67 68 69
{									\
	int t;								\
									\
	__asm__ __volatile__(						\
70 71 72 73
"1:	lwarx	%0,0,%3		# atomic_" #op "_return_relaxed\n"	\
	#asm_op " %0,%2,%0\n"						\
	PPC405_ERR77(0, %3)						\
"	stwcx.	%0,0,%3\n"						\
74
"	bne-	1b\n"							\
75
	: "=&r" (t), "+m" (v->counter)					\
76
	: "r" (a), "r" (&v->counter)					\
77
	: "cc");							\
78 79
									\
	return t;							\
L
Linus Torvalds 已提交
80 81
}

82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99
#define ATOMIC_FETCH_OP_RELAXED(op, asm_op)				\
static inline int atomic_fetch_##op##_relaxed(int a, atomic_t *v)	\
{									\
	int res, t;							\
									\
	__asm__ __volatile__(						\
"1:	lwarx	%0,0,%4		# atomic_fetch_" #op "_relaxed\n"	\
	#asm_op " %1,%3,%0\n"						\
	PPC405_ERR77(0, %4)						\
"	stwcx.	%1,0,%4\n"						\
"	bne-	1b\n"							\
	: "=&r" (res), "=&r" (t), "+m" (v->counter)			\
	: "r" (a), "r" (&v->counter)					\
	: "cc");							\
									\
	return res;							\
}

100 101
#define ATOMIC_OPS(op, asm_op)						\
	ATOMIC_OP(op, asm_op)						\
102 103
	ATOMIC_OP_RETURN_RELAXED(op, asm_op)				\
	ATOMIC_FETCH_OP_RELAXED(op, asm_op)
L
Linus Torvalds 已提交
104

105 106
ATOMIC_OPS(add, add)
ATOMIC_OPS(sub, subf)
L
Linus Torvalds 已提交
107

108 109 110
#define atomic_add_return_relaxed atomic_add_return_relaxed
#define atomic_sub_return_relaxed atomic_sub_return_relaxed

111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126
#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed

#undef ATOMIC_OPS
#define ATOMIC_OPS(op, asm_op)						\
	ATOMIC_OP(op, asm_op)						\
	ATOMIC_FETCH_OP_RELAXED(op, asm_op)

ATOMIC_OPS(and, and)
ATOMIC_OPS(or, or)
ATOMIC_OPS(xor, xor)

#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
#define atomic_fetch_or_relaxed  atomic_fetch_or_relaxed
#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed

127
#undef ATOMIC_OPS
128
#undef ATOMIC_FETCH_OP_RELAXED
129
#undef ATOMIC_OP_RETURN_RELAXED
130
#undef ATOMIC_OP
L
Linus Torvalds 已提交
131 132 133 134 135 136 137 138 139 140 141 142 143

#define atomic_add_negative(a, v)	(atomic_add_return((a), (v)) < 0)

static __inline__ void atomic_inc(atomic_t *v)
{
	int t;

	__asm__ __volatile__(
"1:	lwarx	%0,0,%2		# atomic_inc\n\
	addic	%0,%0,1\n"
	PPC405_ERR77(0,%2)
"	stwcx.	%0,0,%2 \n\
	bne-	1b"
144 145
	: "=&r" (t), "+m" (v->counter)
	: "r" (&v->counter)
146
	: "cc", "xer");
L
Linus Torvalds 已提交
147 148
}

149
static __inline__ int atomic_inc_return_relaxed(atomic_t *v)
L
Linus Torvalds 已提交
150 151 152 153
{
	int t;

	__asm__ __volatile__(
154 155 156 157 158 159
"1:	lwarx	%0,0,%2		# atomic_inc_return_relaxed\n"
"	addic	%0,%0,1\n"
	PPC405_ERR77(0, %2)
"	stwcx.	%0,0,%2\n"
"	bne-	1b"
	: "=&r" (t), "+m" (v->counter)
L
Linus Torvalds 已提交
160
	: "r" (&v->counter)
161
	: "cc", "xer");
L
Linus Torvalds 已提交
162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185

	return t;
}

/*
 * atomic_inc_and_test - increment and test
 * @v: pointer of type atomic_t
 *
 * Atomically increments @v by 1
 * and returns true if the result is zero, or false for all
 * other cases.
 */
#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)

static __inline__ void atomic_dec(atomic_t *v)
{
	int t;

	__asm__ __volatile__(
"1:	lwarx	%0,0,%2		# atomic_dec\n\
	addic	%0,%0,-1\n"
	PPC405_ERR77(0,%2)\
"	stwcx.	%0,0,%2\n\
	bne-	1b"
186 187
	: "=&r" (t), "+m" (v->counter)
	: "r" (&v->counter)
188
	: "cc", "xer");
L
Linus Torvalds 已提交
189 190
}

191
static __inline__ int atomic_dec_return_relaxed(atomic_t *v)
L
Linus Torvalds 已提交
192 193 194 195
{
	int t;

	__asm__ __volatile__(
196 197 198 199 200 201
"1:	lwarx	%0,0,%2		# atomic_dec_return_relaxed\n"
"	addic	%0,%0,-1\n"
	PPC405_ERR77(0, %2)
"	stwcx.	%0,0,%2\n"
"	bne-	1b"
	: "=&r" (t), "+m" (v->counter)
L
Linus Torvalds 已提交
202
	: "r" (&v->counter)
203
	: "cc", "xer");
L
Linus Torvalds 已提交
204 205 206 207

	return t;
}

208 209 210
#define atomic_inc_return_relaxed atomic_inc_return_relaxed
#define atomic_dec_return_relaxed atomic_dec_return_relaxed

211
#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
212 213 214 215 216
#define atomic_cmpxchg_relaxed(v, o, n) \
	cmpxchg_relaxed(&((v)->counter), (o), (n))
#define atomic_cmpxchg_acquire(v, o, n) \
	cmpxchg_acquire(&((v)->counter), (o), (n))

217
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
218
#define atomic_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
N
Nick Piggin 已提交
219

N
Nick Piggin 已提交
220
/**
221
 * atomic_fetch_add_unless - add unless the number is a given value
N
Nick Piggin 已提交
222 223 224 225 226
 * @v: pointer of type atomic_t
 * @a: the amount to add to v...
 * @u: ...unless v is equal to u.
 *
 * Atomically adds @a to @v, so long as it was not @u.
227
 * Returns the old value of @v.
N
Nick Piggin 已提交
228
 */
229
static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
230 231 232 233
{
	int t;

	__asm__ __volatile__ (
234
	PPC_ATOMIC_ENTRY_BARRIER
235
"1:	lwarx	%0,0,%1		# atomic_fetch_add_unless\n\
236
	cmpw	0,%0,%3 \n\
237
	beq	2f \n\
238 239 240 241
	add	%0,%2,%0 \n"
	PPC405_ERR77(0,%2)
"	stwcx.	%0,0,%1 \n\
	bne-	1b \n"
242
	PPC_ATOMIC_EXIT_BARRIER
243 244 245 246 247 248
"	subf	%0,%2,%0 \n\
2:"
	: "=&r" (t)
	: "r" (&v->counter), "r" (a), "r" (u)
	: "cc", "memory");

249
	return t;
250
}
251
#define atomic_fetch_add_unless atomic_fetch_add_unless
252

253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282
/**
 * atomic_inc_not_zero - increment unless the number is zero
 * @v: pointer of type atomic_t
 *
 * Atomically increments @v by 1, so long as @v is non-zero.
 * Returns non-zero if @v was non-zero, and zero otherwise.
 */
static __inline__ int atomic_inc_not_zero(atomic_t *v)
{
	int t1, t2;

	__asm__ __volatile__ (
	PPC_ATOMIC_ENTRY_BARRIER
"1:	lwarx	%0,0,%2		# atomic_inc_not_zero\n\
	cmpwi	0,%0,0\n\
	beq-	2f\n\
	addic	%1,%0,1\n"
	PPC405_ERR77(0,%2)
"	stwcx.	%1,0,%2\n\
	bne-	1b\n"
	PPC_ATOMIC_EXIT_BARRIER
	"\n\
2:"
	: "=&r" (t1), "=&r" (t2)
	: "r" (&v->counter)
	: "cc", "xer", "memory");

	return t1;
}
#define atomic_inc_not_zero(v) atomic_inc_not_zero((v))
N
Nick Piggin 已提交
283

L
Linus Torvalds 已提交
284 285 286 287 288
#define atomic_sub_and_test(a, v)	(atomic_sub_return((a), (v)) == 0)
#define atomic_dec_and_test(v)		(atomic_dec_return((v)) == 0)

/*
 * Atomically test *v and decrement if it is greater than 0.
289 290
 * The function returns the old value of *v minus 1, even if
 * the atomic variable, v, was not decremented.
L
Linus Torvalds 已提交
291 292 293 294 295 296
 */
static __inline__ int atomic_dec_if_positive(atomic_t *v)
{
	int t;

	__asm__ __volatile__(
297
	PPC_ATOMIC_ENTRY_BARRIER
L
Linus Torvalds 已提交
298
"1:	lwarx	%0,0,%1		# atomic_dec_if_positive\n\
299 300
	cmpwi	%0,1\n\
	addi	%0,%0,-1\n\
L
Linus Torvalds 已提交
301 302 303 304
	blt-	2f\n"
	PPC405_ERR77(0,%1)
"	stwcx.	%0,0,%1\n\
	bne-	1b"
305
	PPC_ATOMIC_EXIT_BARRIER
L
Linus Torvalds 已提交
306
	"\n\
307
2:"	: "=&b" (t)
L
Linus Torvalds 已提交
308 309 310 311 312
	: "r" (&v->counter)
	: "cc", "memory");

	return t;
}
313
#define atomic_dec_if_positive atomic_dec_if_positive
L
Linus Torvalds 已提交
314

315 316 317 318
#ifdef __powerpc64__

#define ATOMIC64_INIT(i)	{ (i) }

319 320 321 322 323 324 325 326 327 328 329 330 331
static __inline__ long atomic64_read(const atomic64_t *v)
{
	long t;

	__asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));

	return t;
}

static __inline__ void atomic64_set(atomic64_t *v, long i)
{
	__asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
}
332

333 334 335 336 337 338 339 340 341 342 343 344 345
#define ATOMIC64_OP(op, asm_op)						\
static __inline__ void atomic64_##op(long a, atomic64_t *v)		\
{									\
	long t;								\
									\
	__asm__ __volatile__(						\
"1:	ldarx	%0,0,%3		# atomic64_" #op "\n"			\
	#asm_op " %0,%2,%0\n"						\
"	stdcx.	%0,0,%3 \n"						\
"	bne-	1b\n"							\
	: "=&r" (t), "+m" (v->counter)					\
	: "r" (a), "r" (&v->counter)					\
	: "cc");							\
346 347
}

348 349 350
#define ATOMIC64_OP_RETURN_RELAXED(op, asm_op)				\
static inline long							\
atomic64_##op##_return_relaxed(long a, atomic64_t *v)			\
351 352 353 354
{									\
	long t;								\
									\
	__asm__ __volatile__(						\
355 356 357
"1:	ldarx	%0,0,%3		# atomic64_" #op "_return_relaxed\n"	\
	#asm_op " %0,%2,%0\n"						\
"	stdcx.	%0,0,%3\n"						\
358
"	bne-	1b\n"							\
359
	: "=&r" (t), "+m" (v->counter)					\
360
	: "r" (a), "r" (&v->counter)					\
361
	: "cc");							\
362 363
									\
	return t;							\
364 365
}

366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383
#define ATOMIC64_FETCH_OP_RELAXED(op, asm_op)				\
static inline long							\
atomic64_fetch_##op##_relaxed(long a, atomic64_t *v)			\
{									\
	long res, t;							\
									\
	__asm__ __volatile__(						\
"1:	ldarx	%0,0,%4		# atomic64_fetch_" #op "_relaxed\n"	\
	#asm_op " %1,%3,%0\n"						\
"	stdcx.	%1,0,%4\n"						\
"	bne-	1b\n"							\
	: "=&r" (res), "=&r" (t), "+m" (v->counter)			\
	: "r" (a), "r" (&v->counter)					\
	: "cc");							\
									\
	return res;							\
}

384 385
#define ATOMIC64_OPS(op, asm_op)					\
	ATOMIC64_OP(op, asm_op)						\
386 387
	ATOMIC64_OP_RETURN_RELAXED(op, asm_op)				\
	ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
388

389 390
ATOMIC64_OPS(add, add)
ATOMIC64_OPS(sub, subf)
391

392 393 394
#define atomic64_add_return_relaxed atomic64_add_return_relaxed
#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed

395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410
#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed

#undef ATOMIC64_OPS
#define ATOMIC64_OPS(op, asm_op)					\
	ATOMIC64_OP(op, asm_op)						\
	ATOMIC64_FETCH_OP_RELAXED(op, asm_op)

ATOMIC64_OPS(and, and)
ATOMIC64_OPS(or, or)
ATOMIC64_OPS(xor, xor)

#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
#define atomic64_fetch_or_relaxed  atomic64_fetch_or_relaxed
#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed

411
#undef ATOPIC64_OPS
412
#undef ATOMIC64_FETCH_OP_RELAXED
413
#undef ATOMIC64_OP_RETURN_RELAXED
414
#undef ATOMIC64_OP
415

416
#define atomic64_add_negative(a, v)	(atomic64_add_return((a), (v)) < 0)
417 418 419 420 421 422 423 424 425 426

static __inline__ void atomic64_inc(atomic64_t *v)
{
	long t;

	__asm__ __volatile__(
"1:	ldarx	%0,0,%2		# atomic64_inc\n\
	addic	%0,%0,1\n\
	stdcx.	%0,0,%2 \n\
	bne-	1b"
427 428
	: "=&r" (t), "+m" (v->counter)
	: "r" (&v->counter)
429
	: "cc", "xer");
430 431
}

432
static __inline__ long atomic64_inc_return_relaxed(atomic64_t *v)
433 434 435 436
{
	long t;

	__asm__ __volatile__(
437 438 439 440 441
"1:	ldarx	%0,0,%2		# atomic64_inc_return_relaxed\n"
"	addic	%0,%0,1\n"
"	stdcx.	%0,0,%2\n"
"	bne-	1b"
	: "=&r" (t), "+m" (v->counter)
442
	: "r" (&v->counter)
443
	: "cc", "xer");
444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466

	return t;
}

/*
 * atomic64_inc_and_test - increment and test
 * @v: pointer of type atomic64_t
 *
 * Atomically increments @v by 1
 * and returns true if the result is zero, or false for all
 * other cases.
 */
#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)

static __inline__ void atomic64_dec(atomic64_t *v)
{
	long t;

	__asm__ __volatile__(
"1:	ldarx	%0,0,%2		# atomic64_dec\n\
	addic	%0,%0,-1\n\
	stdcx.	%0,0,%2\n\
	bne-	1b"
467 468
	: "=&r" (t), "+m" (v->counter)
	: "r" (&v->counter)
469
	: "cc", "xer");
470 471
}

472
static __inline__ long atomic64_dec_return_relaxed(atomic64_t *v)
473 474 475 476
{
	long t;

	__asm__ __volatile__(
477 478 479 480 481
"1:	ldarx	%0,0,%2		# atomic64_dec_return_relaxed\n"
"	addic	%0,%0,-1\n"
"	stdcx.	%0,0,%2\n"
"	bne-	1b"
	: "=&r" (t), "+m" (v->counter)
482
	: "r" (&v->counter)
483
	: "cc", "xer");
484 485 486 487

	return t;
}

488 489 490
#define atomic64_inc_return_relaxed atomic64_inc_return_relaxed
#define atomic64_dec_return_relaxed atomic64_dec_return_relaxed

491 492 493 494 495 496 497 498 499 500 501 502
#define atomic64_sub_and_test(a, v)	(atomic64_sub_return((a), (v)) == 0)
#define atomic64_dec_and_test(v)	(atomic64_dec_return((v)) == 0)

/*
 * Atomically test *v and decrement if it is greater than 0.
 * The function returns the old value of *v minus 1.
 */
static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
{
	long t;

	__asm__ __volatile__(
503
	PPC_ATOMIC_ENTRY_BARRIER
504 505 506 507 508
"1:	ldarx	%0,0,%1		# atomic64_dec_if_positive\n\
	addic.	%0,%0,-1\n\
	blt-	2f\n\
	stdcx.	%0,0,%1\n\
	bne-	1b"
509
	PPC_ATOMIC_EXIT_BARRIER
510 511 512
	"\n\
2:"	: "=&r" (t)
	: "r" (&v->counter)
513
	: "cc", "xer", "memory");
514 515 516 517

	return t;
}

518
#define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
519 520 521 522 523
#define atomic64_cmpxchg_relaxed(v, o, n) \
	cmpxchg_relaxed(&((v)->counter), (o), (n))
#define atomic64_cmpxchg_acquire(v, o, n) \
	cmpxchg_acquire(&((v)->counter), (o), (n))

524
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
525
#define atomic64_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
526 527 528 529 530 531 532 533

/**
 * atomic64_add_unless - add unless the number is a given value
 * @v: pointer of type atomic64_t
 * @a: the amount to add to v...
 * @u: ...unless v is equal to u.
 *
 * Atomically adds @a to @v, so long as it was not @u.
534
 * Returns the old value of @v.
535 536 537 538 539 540
 */
static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
{
	long t;

	__asm__ __volatile__ (
541
	PPC_ATOMIC_ENTRY_BARRIER
542
"1:	ldarx	%0,0,%1		# atomic_fetch_add_unless\n\
543
	cmpd	0,%0,%3 \n\
544
	beq	2f \n\
545 546 547
	add	%0,%2,%0 \n"
"	stdcx.	%0,0,%1 \n\
	bne-	1b \n"
548
	PPC_ATOMIC_EXIT_BARRIER
549 550 551 552 553 554 555 556 557
"	subf	%0,%2,%0 \n\
2:"
	: "=&r" (t)
	: "r" (&v->counter), "r" (a), "r" (u)
	: "cc", "memory");

	return t != u;
}

558 559 560 561 562 563 564
/**
 * atomic_inc64_not_zero - increment unless the number is zero
 * @v: pointer of type atomic64_t
 *
 * Atomically increments @v by 1, so long as @v is non-zero.
 * Returns non-zero if @v was non-zero, and zero otherwise.
 */
565
static __inline__ int atomic64_inc_not_zero(atomic64_t *v)
566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583
{
	long t1, t2;

	__asm__ __volatile__ (
	PPC_ATOMIC_ENTRY_BARRIER
"1:	ldarx	%0,0,%2		# atomic64_inc_not_zero\n\
	cmpdi	0,%0,0\n\
	beq-	2f\n\
	addic	%1,%0,1\n\
	stdcx.	%1,0,%2\n\
	bne-	1b\n"
	PPC_ATOMIC_EXIT_BARRIER
	"\n\
2:"
	: "=&r" (t1), "=&r" (t2)
	: "r" (&v->counter)
	: "cc", "xer", "memory");

584
	return t1 != 0;
585
}
586
#define atomic64_inc_not_zero(v) atomic64_inc_not_zero((v))
587

588 589
#endif /* __powerpc64__ */

L
Linus Torvalds 已提交
590
#endif /* __KERNEL__ */
591
#endif /* _ASM_POWERPC_ATOMIC_H_ */