atomic.h 13.6 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
2 3 4
#ifndef _ASM_POWERPC_ATOMIC_H_
#define _ASM_POWERPC_ATOMIC_H_

L
Linus Torvalds 已提交
5 6 7 8 9
/*
 * PowerPC atomic operations
 */

#ifdef __KERNEL__
10 11
#include <linux/types.h>
#include <asm/cmpxchg.h>
12
#include <asm/barrier.h>
L
Linus Torvalds 已提交
13

14
#define ATOMIC_INIT(i)		{ (i) }
L
Linus Torvalds 已提交
15

16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33
/*
 * Since *_return_relaxed and {cmp}xchg_relaxed are implemented with
 * a "bne-" instruction at the end, so an isync is enough as a acquire barrier
 * on the platform without lwsync.
 */
#define __atomic_op_acquire(op, args...)				\
({									\
	typeof(op##_relaxed(args)) __ret  = op##_relaxed(args);		\
	__asm__ __volatile__(PPC_ACQUIRE_BARRIER "" : : : "memory");	\
	__ret;								\
})

#define __atomic_op_release(op, args...)				\
({									\
	__asm__ __volatile__(PPC_RELEASE_BARRIER "" : : : "memory");	\
	op##_relaxed(args);						\
})

34 35 36 37 38 39 40 41 42 43 44 45 46
static __inline__ int atomic_read(const atomic_t *v)
{
	int t;

	__asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));

	return t;
}

static __inline__ void atomic_set(atomic_t *v, int i)
{
	__asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
}
L
Linus Torvalds 已提交
47

48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
#define ATOMIC_OP(op, asm_op)						\
static __inline__ void atomic_##op(int a, atomic_t *v)			\
{									\
	int t;								\
									\
	__asm__ __volatile__(						\
"1:	lwarx	%0,0,%3		# atomic_" #op "\n"			\
	#asm_op " %0,%2,%0\n"						\
	PPC405_ERR77(0,%3)						\
"	stwcx.	%0,0,%3 \n"						\
"	bne-	1b\n"							\
	: "=&r" (t), "+m" (v->counter)					\
	: "r" (a), "r" (&v->counter)					\
	: "cc");							\
}									\

64 65
#define ATOMIC_OP_RETURN_RELAXED(op, asm_op)				\
static inline int atomic_##op##_return_relaxed(int a, atomic_t *v)	\
66 67 68 69
{									\
	int t;								\
									\
	__asm__ __volatile__(						\
70 71 72 73
"1:	lwarx	%0,0,%3		# atomic_" #op "_return_relaxed\n"	\
	#asm_op " %0,%2,%0\n"						\
	PPC405_ERR77(0, %3)						\
"	stwcx.	%0,0,%3\n"						\
74
"	bne-	1b\n"							\
75
	: "=&r" (t), "+m" (v->counter)					\
76
	: "r" (a), "r" (&v->counter)					\
77
	: "cc");							\
78 79
									\
	return t;							\
L
Linus Torvalds 已提交
80 81
}

82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99
#define ATOMIC_FETCH_OP_RELAXED(op, asm_op)				\
static inline int atomic_fetch_##op##_relaxed(int a, atomic_t *v)	\
{									\
	int res, t;							\
									\
	__asm__ __volatile__(						\
"1:	lwarx	%0,0,%4		# atomic_fetch_" #op "_relaxed\n"	\
	#asm_op " %1,%3,%0\n"						\
	PPC405_ERR77(0, %4)						\
"	stwcx.	%1,0,%4\n"						\
"	bne-	1b\n"							\
	: "=&r" (res), "=&r" (t), "+m" (v->counter)			\
	: "r" (a), "r" (&v->counter)					\
	: "cc");							\
									\
	return res;							\
}

100 101
#define ATOMIC_OPS(op, asm_op)						\
	ATOMIC_OP(op, asm_op)						\
102 103
	ATOMIC_OP_RETURN_RELAXED(op, asm_op)				\
	ATOMIC_FETCH_OP_RELAXED(op, asm_op)
L
Linus Torvalds 已提交
104

105 106
ATOMIC_OPS(add, add)
ATOMIC_OPS(sub, subf)
L
Linus Torvalds 已提交
107

108 109 110
#define atomic_add_return_relaxed atomic_add_return_relaxed
#define atomic_sub_return_relaxed atomic_sub_return_relaxed

111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126
#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed

#undef ATOMIC_OPS
#define ATOMIC_OPS(op, asm_op)						\
	ATOMIC_OP(op, asm_op)						\
	ATOMIC_FETCH_OP_RELAXED(op, asm_op)

ATOMIC_OPS(and, and)
ATOMIC_OPS(or, or)
ATOMIC_OPS(xor, xor)

#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
#define atomic_fetch_or_relaxed  atomic_fetch_or_relaxed
#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed

127
#undef ATOMIC_OPS
128
#undef ATOMIC_FETCH_OP_RELAXED
129
#undef ATOMIC_OP_RETURN_RELAXED
130
#undef ATOMIC_OP
L
Linus Torvalds 已提交
131 132 133 134 135 136 137 138 139 140 141 142 143

#define atomic_add_negative(a, v)	(atomic_add_return((a), (v)) < 0)

static __inline__ void atomic_inc(atomic_t *v)
{
	int t;

	__asm__ __volatile__(
"1:	lwarx	%0,0,%2		# atomic_inc\n\
	addic	%0,%0,1\n"
	PPC405_ERR77(0,%2)
"	stwcx.	%0,0,%2 \n\
	bne-	1b"
144 145
	: "=&r" (t), "+m" (v->counter)
	: "r" (&v->counter)
146
	: "cc", "xer");
L
Linus Torvalds 已提交
147 148
}

149
static __inline__ int atomic_inc_return_relaxed(atomic_t *v)
L
Linus Torvalds 已提交
150 151 152 153
{
	int t;

	__asm__ __volatile__(
154 155 156 157 158 159
"1:	lwarx	%0,0,%2		# atomic_inc_return_relaxed\n"
"	addic	%0,%0,1\n"
	PPC405_ERR77(0, %2)
"	stwcx.	%0,0,%2\n"
"	bne-	1b"
	: "=&r" (t), "+m" (v->counter)
L
Linus Torvalds 已提交
160
	: "r" (&v->counter)
161
	: "cc", "xer");
L
Linus Torvalds 已提交
162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185

	return t;
}

/*
 * atomic_inc_and_test - increment and test
 * @v: pointer of type atomic_t
 *
 * Atomically increments @v by 1
 * and returns true if the result is zero, or false for all
 * other cases.
 */
#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)

static __inline__ void atomic_dec(atomic_t *v)
{
	int t;

	__asm__ __volatile__(
"1:	lwarx	%0,0,%2		# atomic_dec\n\
	addic	%0,%0,-1\n"
	PPC405_ERR77(0,%2)\
"	stwcx.	%0,0,%2\n\
	bne-	1b"
186 187
	: "=&r" (t), "+m" (v->counter)
	: "r" (&v->counter)
188
	: "cc", "xer");
L
Linus Torvalds 已提交
189 190
}

191
static __inline__ int atomic_dec_return_relaxed(atomic_t *v)
L
Linus Torvalds 已提交
192 193 194 195
{
	int t;

	__asm__ __volatile__(
196 197 198 199 200 201
"1:	lwarx	%0,0,%2		# atomic_dec_return_relaxed\n"
"	addic	%0,%0,-1\n"
	PPC405_ERR77(0, %2)
"	stwcx.	%0,0,%2\n"
"	bne-	1b"
	: "=&r" (t), "+m" (v->counter)
L
Linus Torvalds 已提交
202
	: "r" (&v->counter)
203
	: "cc", "xer");
L
Linus Torvalds 已提交
204 205 206 207

	return t;
}

208 209 210
#define atomic_inc_return_relaxed atomic_inc_return_relaxed
#define atomic_dec_return_relaxed atomic_dec_return_relaxed

211
#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
212 213 214 215 216
#define atomic_cmpxchg_relaxed(v, o, n) \
	cmpxchg_relaxed(&((v)->counter), (o), (n))
#define atomic_cmpxchg_acquire(v, o, n) \
	cmpxchg_acquire(&((v)->counter), (o), (n))

217
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
218
#define atomic_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
N
Nick Piggin 已提交
219

N
Nick Piggin 已提交
220
/**
221
 * __atomic_add_unless - add unless the number is a given value
N
Nick Piggin 已提交
222 223 224 225 226
 * @v: pointer of type atomic_t
 * @a: the amount to add to v...
 * @u: ...unless v is equal to u.
 *
 * Atomically adds @a to @v, so long as it was not @u.
227
 * Returns the old value of @v.
N
Nick Piggin 已提交
228
 */
229
static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
230 231 232 233
{
	int t;

	__asm__ __volatile__ (
234
	PPC_ATOMIC_ENTRY_BARRIER
235
"1:	lwarx	%0,0,%1		# __atomic_add_unless\n\
236
	cmpw	0,%0,%3 \n\
237
	beq	2f \n\
238 239 240 241
	add	%0,%2,%0 \n"
	PPC405_ERR77(0,%2)
"	stwcx.	%0,0,%1 \n\
	bne-	1b \n"
242
	PPC_ATOMIC_EXIT_BARRIER
243 244 245 246 247 248
"	subf	%0,%2,%0 \n\
2:"
	: "=&r" (t)
	: "r" (&v->counter), "r" (a), "r" (u)
	: "cc", "memory");

249
	return t;
250 251
}

252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281
/**
 * atomic_inc_not_zero - increment unless the number is zero
 * @v: pointer of type atomic_t
 *
 * Atomically increments @v by 1, so long as @v is non-zero.
 * Returns non-zero if @v was non-zero, and zero otherwise.
 */
static __inline__ int atomic_inc_not_zero(atomic_t *v)
{
	int t1, t2;

	__asm__ __volatile__ (
	PPC_ATOMIC_ENTRY_BARRIER
"1:	lwarx	%0,0,%2		# atomic_inc_not_zero\n\
	cmpwi	0,%0,0\n\
	beq-	2f\n\
	addic	%1,%0,1\n"
	PPC405_ERR77(0,%2)
"	stwcx.	%1,0,%2\n\
	bne-	1b\n"
	PPC_ATOMIC_EXIT_BARRIER
	"\n\
2:"
	: "=&r" (t1), "=&r" (t2)
	: "r" (&v->counter)
	: "cc", "xer", "memory");

	return t1;
}
#define atomic_inc_not_zero(v) atomic_inc_not_zero((v))
N
Nick Piggin 已提交
282

L
Linus Torvalds 已提交
283 284 285 286 287
#define atomic_sub_and_test(a, v)	(atomic_sub_return((a), (v)) == 0)
#define atomic_dec_and_test(v)		(atomic_dec_return((v)) == 0)

/*
 * Atomically test *v and decrement if it is greater than 0.
288 289
 * The function returns the old value of *v minus 1, even if
 * the atomic variable, v, was not decremented.
L
Linus Torvalds 已提交
290 291 292 293 294 295
 */
static __inline__ int atomic_dec_if_positive(atomic_t *v)
{
	int t;

	__asm__ __volatile__(
296
	PPC_ATOMIC_ENTRY_BARRIER
L
Linus Torvalds 已提交
297
"1:	lwarx	%0,0,%1		# atomic_dec_if_positive\n\
298 299
	cmpwi	%0,1\n\
	addi	%0,%0,-1\n\
L
Linus Torvalds 已提交
300 301 302 303
	blt-	2f\n"
	PPC405_ERR77(0,%1)
"	stwcx.	%0,0,%1\n\
	bne-	1b"
304
	PPC_ATOMIC_EXIT_BARRIER
L
Linus Torvalds 已提交
305
	"\n\
306
2:"	: "=&b" (t)
L
Linus Torvalds 已提交
307 308 309 310 311
	: "r" (&v->counter)
	: "cc", "memory");

	return t;
}
312
#define atomic_dec_if_positive atomic_dec_if_positive
L
Linus Torvalds 已提交
313

314 315 316 317
#ifdef __powerpc64__

#define ATOMIC64_INIT(i)	{ (i) }

318 319 320 321 322 323 324 325 326 327 328 329 330
static __inline__ long atomic64_read(const atomic64_t *v)
{
	long t;

	__asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));

	return t;
}

static __inline__ void atomic64_set(atomic64_t *v, long i)
{
	__asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
}
331

332 333 334 335 336 337 338 339 340 341 342 343 344
#define ATOMIC64_OP(op, asm_op)						\
static __inline__ void atomic64_##op(long a, atomic64_t *v)		\
{									\
	long t;								\
									\
	__asm__ __volatile__(						\
"1:	ldarx	%0,0,%3		# atomic64_" #op "\n"			\
	#asm_op " %0,%2,%0\n"						\
"	stdcx.	%0,0,%3 \n"						\
"	bne-	1b\n"							\
	: "=&r" (t), "+m" (v->counter)					\
	: "r" (a), "r" (&v->counter)					\
	: "cc");							\
345 346
}

347 348 349
#define ATOMIC64_OP_RETURN_RELAXED(op, asm_op)				\
static inline long							\
atomic64_##op##_return_relaxed(long a, atomic64_t *v)			\
350 351 352 353
{									\
	long t;								\
									\
	__asm__ __volatile__(						\
354 355 356
"1:	ldarx	%0,0,%3		# atomic64_" #op "_return_relaxed\n"	\
	#asm_op " %0,%2,%0\n"						\
"	stdcx.	%0,0,%3\n"						\
357
"	bne-	1b\n"							\
358
	: "=&r" (t), "+m" (v->counter)					\
359
	: "r" (a), "r" (&v->counter)					\
360
	: "cc");							\
361 362
									\
	return t;							\
363 364
}

365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382
#define ATOMIC64_FETCH_OP_RELAXED(op, asm_op)				\
static inline long							\
atomic64_fetch_##op##_relaxed(long a, atomic64_t *v)			\
{									\
	long res, t;							\
									\
	__asm__ __volatile__(						\
"1:	ldarx	%0,0,%4		# atomic64_fetch_" #op "_relaxed\n"	\
	#asm_op " %1,%3,%0\n"						\
"	stdcx.	%1,0,%4\n"						\
"	bne-	1b\n"							\
	: "=&r" (res), "=&r" (t), "+m" (v->counter)			\
	: "r" (a), "r" (&v->counter)					\
	: "cc");							\
									\
	return res;							\
}

383 384
#define ATOMIC64_OPS(op, asm_op)					\
	ATOMIC64_OP(op, asm_op)						\
385 386
	ATOMIC64_OP_RETURN_RELAXED(op, asm_op)				\
	ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
387

388 389
ATOMIC64_OPS(add, add)
ATOMIC64_OPS(sub, subf)
390

391 392 393
#define atomic64_add_return_relaxed atomic64_add_return_relaxed
#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed

394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409
#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed

#undef ATOMIC64_OPS
#define ATOMIC64_OPS(op, asm_op)					\
	ATOMIC64_OP(op, asm_op)						\
	ATOMIC64_FETCH_OP_RELAXED(op, asm_op)

ATOMIC64_OPS(and, and)
ATOMIC64_OPS(or, or)
ATOMIC64_OPS(xor, xor)

#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
#define atomic64_fetch_or_relaxed  atomic64_fetch_or_relaxed
#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed

410
#undef ATOPIC64_OPS
411
#undef ATOMIC64_FETCH_OP_RELAXED
412
#undef ATOMIC64_OP_RETURN_RELAXED
413
#undef ATOMIC64_OP
414

415
#define atomic64_add_negative(a, v)	(atomic64_add_return((a), (v)) < 0)
416 417 418 419 420 421 422 423 424 425

static __inline__ void atomic64_inc(atomic64_t *v)
{
	long t;

	__asm__ __volatile__(
"1:	ldarx	%0,0,%2		# atomic64_inc\n\
	addic	%0,%0,1\n\
	stdcx.	%0,0,%2 \n\
	bne-	1b"
426 427
	: "=&r" (t), "+m" (v->counter)
	: "r" (&v->counter)
428
	: "cc", "xer");
429 430
}

431
static __inline__ long atomic64_inc_return_relaxed(atomic64_t *v)
432 433 434 435
{
	long t;

	__asm__ __volatile__(
436 437 438 439 440
"1:	ldarx	%0,0,%2		# atomic64_inc_return_relaxed\n"
"	addic	%0,%0,1\n"
"	stdcx.	%0,0,%2\n"
"	bne-	1b"
	: "=&r" (t), "+m" (v->counter)
441
	: "r" (&v->counter)
442
	: "cc", "xer");
443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465

	return t;
}

/*
 * atomic64_inc_and_test - increment and test
 * @v: pointer of type atomic64_t
 *
 * Atomically increments @v by 1
 * and returns true if the result is zero, or false for all
 * other cases.
 */
#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)

static __inline__ void atomic64_dec(atomic64_t *v)
{
	long t;

	__asm__ __volatile__(
"1:	ldarx	%0,0,%2		# atomic64_dec\n\
	addic	%0,%0,-1\n\
	stdcx.	%0,0,%2\n\
	bne-	1b"
466 467
	: "=&r" (t), "+m" (v->counter)
	: "r" (&v->counter)
468
	: "cc", "xer");
469 470
}

471
static __inline__ long atomic64_dec_return_relaxed(atomic64_t *v)
472 473 474 475
{
	long t;

	__asm__ __volatile__(
476 477 478 479 480
"1:	ldarx	%0,0,%2		# atomic64_dec_return_relaxed\n"
"	addic	%0,%0,-1\n"
"	stdcx.	%0,0,%2\n"
"	bne-	1b"
	: "=&r" (t), "+m" (v->counter)
481
	: "r" (&v->counter)
482
	: "cc", "xer");
483 484 485 486

	return t;
}

487 488 489
#define atomic64_inc_return_relaxed atomic64_inc_return_relaxed
#define atomic64_dec_return_relaxed atomic64_dec_return_relaxed

490 491 492 493 494 495 496 497 498 499 500 501
#define atomic64_sub_and_test(a, v)	(atomic64_sub_return((a), (v)) == 0)
#define atomic64_dec_and_test(v)	(atomic64_dec_return((v)) == 0)

/*
 * Atomically test *v and decrement if it is greater than 0.
 * The function returns the old value of *v minus 1.
 */
static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
{
	long t;

	__asm__ __volatile__(
502
	PPC_ATOMIC_ENTRY_BARRIER
503 504 505 506 507
"1:	ldarx	%0,0,%1		# atomic64_dec_if_positive\n\
	addic.	%0,%0,-1\n\
	blt-	2f\n\
	stdcx.	%0,0,%1\n\
	bne-	1b"
508
	PPC_ATOMIC_EXIT_BARRIER
509 510 511
	"\n\
2:"	: "=&r" (t)
	: "r" (&v->counter)
512
	: "cc", "xer", "memory");
513 514 515 516

	return t;
}

517
#define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
518 519 520 521 522
#define atomic64_cmpxchg_relaxed(v, o, n) \
	cmpxchg_relaxed(&((v)->counter), (o), (n))
#define atomic64_cmpxchg_acquire(v, o, n) \
	cmpxchg_acquire(&((v)->counter), (o), (n))

523
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
524
#define atomic64_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
525 526 527 528 529 530 531 532

/**
 * atomic64_add_unless - add unless the number is a given value
 * @v: pointer of type atomic64_t
 * @a: the amount to add to v...
 * @u: ...unless v is equal to u.
 *
 * Atomically adds @a to @v, so long as it was not @u.
533
 * Returns the old value of @v.
534 535 536 537 538 539
 */
static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
{
	long t;

	__asm__ __volatile__ (
540
	PPC_ATOMIC_ENTRY_BARRIER
541
"1:	ldarx	%0,0,%1		# __atomic_add_unless\n\
542
	cmpd	0,%0,%3 \n\
543
	beq	2f \n\
544 545 546
	add	%0,%2,%0 \n"
"	stdcx.	%0,0,%1 \n\
	bne-	1b \n"
547
	PPC_ATOMIC_EXIT_BARRIER
548 549 550 551 552 553 554 555 556
"	subf	%0,%2,%0 \n\
2:"
	: "=&r" (t)
	: "r" (&v->counter), "r" (a), "r" (u)
	: "cc", "memory");

	return t != u;
}

557 558 559 560 561 562 563
/**
 * atomic_inc64_not_zero - increment unless the number is zero
 * @v: pointer of type atomic64_t
 *
 * Atomically increments @v by 1, so long as @v is non-zero.
 * Returns non-zero if @v was non-zero, and zero otherwise.
 */
564
static __inline__ int atomic64_inc_not_zero(atomic64_t *v)
565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582
{
	long t1, t2;

	__asm__ __volatile__ (
	PPC_ATOMIC_ENTRY_BARRIER
"1:	ldarx	%0,0,%2		# atomic64_inc_not_zero\n\
	cmpdi	0,%0,0\n\
	beq-	2f\n\
	addic	%1,%0,1\n\
	stdcx.	%1,0,%2\n\
	bne-	1b\n"
	PPC_ATOMIC_EXIT_BARRIER
	"\n\
2:"
	: "=&r" (t1), "=&r" (t2)
	: "r" (&v->counter)
	: "cc", "xer", "memory");

583
	return t1 != 0;
584
}
585

586 587
#endif /* __powerpc64__ */

L
Linus Torvalds 已提交
588
#endif /* __KERNEL__ */
589
#endif /* _ASM_POWERPC_ATOMIC_H_ */