atomic.h 15.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
/*
 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#ifndef _ASM_ARC_ATOMIC_H
#define _ASM_ARC_ATOMIC_H

#ifndef __ASSEMBLY__

#include <linux/types.h>
#include <linux/compiler.h>
#include <asm/cmpxchg.h>
#include <asm/barrier.h>
#include <asm/smp.h>

20 21
#ifndef CONFIG_ARC_PLAT_EZNPS

22
#define atomic_read(v)  READ_ONCE((v)->counter)
23
#define ATOMIC_INIT(i)	{ (i) }
24 25 26

#ifdef CONFIG_ARC_HAS_LLSC

27
#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
28

29 30 31
#define ATOMIC_OP(op, c_op, asm_op)					\
static inline void atomic_##op(int i, atomic_t *v)			\
{									\
32
	unsigned int val;						\
33 34
									\
	__asm__ __volatile__(						\
35 36 37
	"1:	llock   %[val], [%[ctr]]		\n"		\
	"	" #asm_op " %[val], %[val], %[i]	\n"		\
	"	scond   %[val], [%[ctr]]		\n"		\
38
	"	bnz     1b				\n"		\
39 40 41
	: [val]	"=&r"	(val) /* Early clobber to prevent reg reuse */	\
	: [ctr]	"r"	(&v->counter), /* Not "m": llock only supports reg direct addr mode */	\
	  [i]	"ir"	(i)						\
42 43 44 45 46 47
	: "cc");							\
}									\

#define ATOMIC_OP_RETURN(op, c_op, asm_op)				\
static inline int atomic_##op##_return(int i, atomic_t *v)		\
{									\
48
	unsigned int val;						\
49
									\
50 51 52 53 54 55
	/*								\
	 * Explicit full memory barrier needed before/after as		\
	 * LLOCK/SCOND thmeselves don't provide any such semantics	\
	 */								\
	smp_mb();							\
									\
56
	__asm__ __volatile__(						\
57 58 59
	"1:	llock   %[val], [%[ctr]]		\n"		\
	"	" #asm_op " %[val], %[val], %[i]	\n"		\
	"	scond   %[val], [%[ctr]]		\n"		\
60
	"	bnz     1b				\n"		\
61 62 63
	: [val]	"=&r"	(val)						\
	: [ctr]	"r"	(&v->counter),					\
	  [i]	"ir"	(i)						\
64 65
	: "cc");							\
									\
66 67
	smp_mb();							\
									\
68
	return val;							\
69 70
}

71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97
#define ATOMIC_FETCH_OP(op, c_op, asm_op)				\
static inline int atomic_fetch_##op(int i, atomic_t *v)			\
{									\
	unsigned int val, orig;						\
									\
	/*								\
	 * Explicit full memory barrier needed before/after as		\
	 * LLOCK/SCOND thmeselves don't provide any such semantics	\
	 */								\
	smp_mb();							\
									\
	__asm__ __volatile__(						\
	"1:	llock   %[orig], [%[ctr]]		\n"		\
	"	" #asm_op " %[val], %[orig], %[i]	\n"		\
	"	scond   %[val], [%[ctr]]		\n"		\
	"						\n"		\
	: [val]	"=&r"	(val),						\
	  [orig] "=&r" (orig)						\
	: [ctr]	"r"	(&v->counter),					\
	  [i]	"ir"	(i)						\
	: "cc");							\
									\
	smp_mb();							\
									\
	return orig;							\
}

98 99 100 101 102
#else	/* !CONFIG_ARC_HAS_LLSC */

#ifndef CONFIG_SMP

 /* violating atomic_xxx API locking protocol in UP for optimization sake */
103
#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120

#else

static inline void atomic_set(atomic_t *v, int i)
{
	/*
	 * Independent of hardware support, all of the atomic_xxx() APIs need
	 * to follow the same locking rules to make sure that a "hardware"
	 * atomic insn (e.g. LD) doesn't clobber an "emulated" atomic insn
	 * sequence
	 *
	 * Thus atomic_set() despite being 1 insn (and seemingly atomic)
	 * requires the locking.
	 */
	unsigned long flags;

	atomic_ops_lock(flags);
121
	WRITE_ONCE(v->counter, i);
122 123
	atomic_ops_unlock(flags);
}
124

125 126 127 128 129 130 131
#endif

/*
 * Non hardware assisted Atomic-R-M-W
 * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
 */

132 133 134 135 136 137 138 139
#define ATOMIC_OP(op, c_op, asm_op)					\
static inline void atomic_##op(int i, atomic_t *v)			\
{									\
	unsigned long flags;						\
									\
	atomic_ops_lock(flags);						\
	v->counter c_op i;						\
	atomic_ops_unlock(flags);					\
140 141
}

V
Vineet Gupta 已提交
142
#define ATOMIC_OP_RETURN(op, c_op, asm_op)				\
143 144 145 146 147
static inline int atomic_##op##_return(int i, atomic_t *v)		\
{									\
	unsigned long flags;						\
	unsigned long temp;						\
									\
148 149 150
	/*								\
	 * spin lock/unlock provides the needed smp_mb() before/after	\
	 */								\
151 152 153 154 155 156 157
	atomic_ops_lock(flags);						\
	temp = v->counter;						\
	temp c_op i;							\
	v->counter = temp;						\
	atomic_ops_unlock(flags);					\
									\
	return temp;							\
158 159
}

160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176
#define ATOMIC_FETCH_OP(op, c_op, asm_op)				\
static inline int atomic_fetch_##op(int i, atomic_t *v)			\
{									\
	unsigned long flags;						\
	unsigned long orig;						\
									\
	/*								\
	 * spin lock/unlock provides the needed smp_mb() before/after	\
	 */								\
	atomic_ops_lock(flags);						\
	orig = v->counter;						\
	v->counter c_op i;						\
	atomic_ops_unlock(flags);					\
									\
	return orig;							\
}

177
#endif /* !CONFIG_ARC_HAS_LLSC */
178

179 180
#define ATOMIC_OPS(op, c_op, asm_op)					\
	ATOMIC_OP(op, c_op, asm_op)					\
181 182
	ATOMIC_OP_RETURN(op, c_op, asm_op)				\
	ATOMIC_FETCH_OP(op, c_op, asm_op)
183

184 185
ATOMIC_OPS(add, +=, add)
ATOMIC_OPS(sub, -=, sub)
186

187 188
#define atomic_andnot atomic_andnot

189 190 191 192 193 194 195 196 197
#undef ATOMIC_OPS
#define ATOMIC_OPS(op, c_op, asm_op)					\
	ATOMIC_OP(op, c_op, asm_op)					\
	ATOMIC_FETCH_OP(op, c_op, asm_op)

ATOMIC_OPS(and, &=, and)
ATOMIC_OPS(andnot, &= ~, bic)
ATOMIC_OPS(or, |=, or)
ATOMIC_OPS(xor, ^=, xor)
198

199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257
#else /* CONFIG_ARC_PLAT_EZNPS */

static inline int atomic_read(const atomic_t *v)
{
	int temp;

	__asm__ __volatile__(
	"	ld.di %0, [%1]"
	: "=r"(temp)
	: "r"(&v->counter)
	: "memory");
	return temp;
}

static inline void atomic_set(atomic_t *v, int i)
{
	__asm__ __volatile__(
	"	st.di %0,[%1]"
	:
	: "r"(i), "r"(&v->counter)
	: "memory");
}

#define ATOMIC_OP(op, c_op, asm_op)					\
static inline void atomic_##op(int i, atomic_t *v)			\
{									\
	__asm__ __volatile__(						\
	"	mov r2, %0\n"						\
	"	mov r3, %1\n"						\
	"       .word %2\n"						\
	:								\
	: "r"(i), "r"(&v->counter), "i"(asm_op)				\
	: "r2", "r3", "memory");					\
}									\

#define ATOMIC_OP_RETURN(op, c_op, asm_op)				\
static inline int atomic_##op##_return(int i, atomic_t *v)		\
{									\
	unsigned int temp = i;						\
									\
	/* Explicit full memory barrier needed before/after */		\
	smp_mb();							\
									\
	__asm__ __volatile__(						\
	"	mov r2, %0\n"						\
	"	mov r3, %1\n"						\
	"       .word %2\n"						\
	"	mov %0, r2"						\
	: "+r"(temp)							\
	: "r"(&v->counter), "i"(asm_op)					\
	: "r2", "r3", "memory");					\
									\
	smp_mb();							\
									\
	temp c_op i;							\
									\
	return temp;							\
}

258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279
#define ATOMIC_FETCH_OP(op, c_op, asm_op)				\
static inline int atomic_fetch_##op(int i, atomic_t *v)			\
{									\
	unsigned int temp = i;						\
									\
	/* Explicit full memory barrier needed before/after */		\
	smp_mb();							\
									\
	__asm__ __volatile__(						\
	"	mov r2, %0\n"						\
	"	mov r3, %1\n"						\
	"       .word %2\n"						\
	"	mov %0, r2"						\
	: "+r"(temp)							\
	: "r"(&v->counter), "i"(asm_op)					\
	: "r2", "r3", "memory");					\
									\
	smp_mb();							\
									\
	return temp;							\
}

280 281
#define ATOMIC_OPS(op, c_op, asm_op)					\
	ATOMIC_OP(op, c_op, asm_op)					\
282 283
	ATOMIC_OP_RETURN(op, c_op, asm_op)				\
	ATOMIC_FETCH_OP(op, c_op, asm_op)
284 285 286 287 288

ATOMIC_OPS(add, +=, CTOP_INST_AADD_DI_R2_R2_R3)
#define atomic_sub(i, v) atomic_add(-(i), (v))
#define atomic_sub_return(i, v) atomic_add_return(-(i), (v))

289 290 291 292 293 294
#undef ATOMIC_OPS
#define ATOMIC_OPS(op, c_op, asm_op)					\
	ATOMIC_OP(op, c_op, asm_op)					\
	ATOMIC_FETCH_OP(op, c_op, asm_op)

ATOMIC_OPS(and, &=, CTOP_INST_AAND_DI_R2_R2_R3)
295
#define atomic_andnot(mask, v) atomic_and(~(mask), (v))
296 297
ATOMIC_OPS(or, |=, CTOP_INST_AOR_DI_R2_R2_R3)
ATOMIC_OPS(xor, ^=, CTOP_INST_AXOR_DI_R2_R2_R3)
298 299 300 301

#endif /* CONFIG_ARC_PLAT_EZNPS */

#undef ATOMIC_OPS
302
#undef ATOMIC_FETCH_OP
303 304 305
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP

306 307 308 309 310 311 312 313 314 315 316 317
/**
 * __atomic_add_unless - add unless the number is a given value
 * @v: pointer of type atomic_t
 * @a: the amount to add to v...
 * @u: ...unless v is equal to u.
 *
 * Atomically adds @a to @v, so long as it was not @u.
 * Returns the old value of @v
 */
#define __atomic_add_unless(v, a, u)					\
({									\
	int c, old;							\
318 319 320 321 322 323 324
									\
	/*								\
	 * Explicit full memory barrier needed before/after as		\
	 * LLOCK/SCOND thmeselves don't provide any such semantics	\
	 */								\
	smp_mb();							\
									\
325 326 327
	c = atomic_read(v);						\
	while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c)\
		c = old;						\
328 329 330
									\
	smp_mb();							\
									\
331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346
	c;								\
})

#define atomic_inc_not_zero(v)		atomic_add_unless((v), 1, 0)

#define atomic_inc(v)			atomic_add(1, v)
#define atomic_dec(v)			atomic_sub(1, v)

#define atomic_inc_and_test(v)		(atomic_add_return(1, v) == 0)
#define atomic_dec_and_test(v)		(atomic_sub_return(1, v) == 0)
#define atomic_inc_return(v)		atomic_add_return(1, (v))
#define atomic_dec_return(v)		atomic_sub_return(1, (v))
#define atomic_sub_and_test(i, v)	(atomic_sub_return(i, v) == 0)

#define atomic_add_negative(i, v)	(atomic_add_return(i, v) < 0)

347 348

#ifdef CONFIG_GENERIC_ATOMIC64
349 350 351

#include <asm-generic/atomic64.h>

352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607
#else	/* Kconfig ensures this is only enabled with needed h/w assist */

/*
 * ARCv2 supports 64-bit exclusive load (LLOCKD) / store (SCONDD)
 *  - The address HAS to be 64-bit aligned
 *  - There are 2 semantics involved here:
 *    = exclusive implies no interim update between load/store to same addr
 *    = both words are observed/updated together: this is guaranteed even
 *      for regular 64-bit load (LDD) / store (STD). Thus atomic64_set()
 *      is NOT required to use LLOCKD+SCONDD, STD suffices
 */

typedef struct {
	aligned_u64 counter;
} atomic64_t;

#define ATOMIC64_INIT(a) { (a) }

static inline long long atomic64_read(const atomic64_t *v)
{
	unsigned long long val;

	__asm__ __volatile__(
	"	ldd   %0, [%1]	\n"
	: "=r"(val)
	: "r"(&v->counter));

	return val;
}

static inline void atomic64_set(atomic64_t *v, long long a)
{
	/*
	 * This could have been a simple assignment in "C" but would need
	 * explicit volatile. Otherwise gcc optimizers could elide the store
	 * which borked atomic64 self-test
	 * In the inline asm version, memory clobber needed for exact same
	 * reason, to tell gcc about the store.
	 *
	 * This however is not needed for sibling atomic64_add() etc since both
	 * load/store are explicitly done in inline asm. As long as API is used
	 * for each access, gcc has no way to optimize away any load/store
	 */
	__asm__ __volatile__(
	"	std   %0, [%1]	\n"
	:
	: "r"(a), "r"(&v->counter)
	: "memory");
}

#define ATOMIC64_OP(op, op1, op2)					\
static inline void atomic64_##op(long long a, atomic64_t *v)		\
{									\
	unsigned long long val;						\
									\
	__asm__ __volatile__(						\
	"1:				\n"				\
	"	llockd  %0, [%1]	\n"				\
	"	" #op1 " %L0, %L0, %L2	\n"				\
	"	" #op2 " %H0, %H0, %H2	\n"				\
	"	scondd   %0, [%1]	\n"				\
	"	bnz     1b		\n"				\
	: "=&r"(val)							\
	: "r"(&v->counter), "ir"(a)					\
	: "cc");						\
}									\

#define ATOMIC64_OP_RETURN(op, op1, op2)		        	\
static inline long long atomic64_##op##_return(long long a, atomic64_t *v)	\
{									\
	unsigned long long val;						\
									\
	smp_mb();							\
									\
	__asm__ __volatile__(						\
	"1:				\n"				\
	"	llockd   %0, [%1]	\n"				\
	"	" #op1 " %L0, %L0, %L2	\n"				\
	"	" #op2 " %H0, %H0, %H2	\n"				\
	"	scondd   %0, [%1]	\n"				\
	"	bnz     1b		\n"				\
	: [val] "=&r"(val)						\
	: "r"(&v->counter), "ir"(a)					\
	: "cc");	/* memory clobber comes from smp_mb() */	\
									\
	smp_mb();							\
									\
	return val;							\
}

#define ATOMIC64_FETCH_OP(op, op1, op2)		        		\
static inline long long atomic64_fetch_##op(long long a, atomic64_t *v)	\
{									\
	unsigned long long val, orig;					\
									\
	smp_mb();							\
									\
	__asm__ __volatile__(						\
	"1:				\n"				\
	"	llockd   %0, [%2]	\n"				\
	"	" #op1 " %L1, %L0, %L3	\n"				\
	"	" #op2 " %H1, %H0, %H3	\n"				\
	"	scondd   %1, [%2]	\n"				\
	"	bnz     1b		\n"				\
	: "=&r"(orig), "=&r"(val)					\
	: "r"(&v->counter), "ir"(a)					\
	: "cc");	/* memory clobber comes from smp_mb() */	\
									\
	smp_mb();							\
									\
	return orig;							\
}

#define ATOMIC64_OPS(op, op1, op2)					\
	ATOMIC64_OP(op, op1, op2)					\
	ATOMIC64_OP_RETURN(op, op1, op2)				\
	ATOMIC64_FETCH_OP(op, op1, op2)

#define atomic64_andnot atomic64_andnot

ATOMIC64_OPS(add, add.f, adc)
ATOMIC64_OPS(sub, sub.f, sbc)
ATOMIC64_OPS(and, and, and)
ATOMIC64_OPS(andnot, bic, bic)
ATOMIC64_OPS(or, or, or)
ATOMIC64_OPS(xor, xor, xor)

#undef ATOMIC64_OPS
#undef ATOMIC64_FETCH_OP
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP

static inline long long
atomic64_cmpxchg(atomic64_t *ptr, long long expected, long long new)
{
	long long prev;

	smp_mb();

	__asm__ __volatile__(
	"1:	llockd  %0, [%1]	\n"
	"	brne    %L0, %L2, 2f	\n"
	"	brne    %H0, %H2, 2f	\n"
	"	scondd  %3, [%1]	\n"
	"	bnz     1b		\n"
	"2:				\n"
	: "=&r"(prev)
	: "r"(ptr), "ir"(expected), "r"(new)
	: "cc");	/* memory clobber comes from smp_mb() */

	smp_mb();

	return prev;
}

static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
{
	long long prev;

	smp_mb();

	__asm__ __volatile__(
	"1:	llockd  %0, [%1]	\n"
	"	scondd  %2, [%1]	\n"
	"	bnz     1b		\n"
	"2:				\n"
	: "=&r"(prev)
	: "r"(ptr), "r"(new)
	: "cc");	/* memory clobber comes from smp_mb() */

	smp_mb();

	return prev;
}

/**
 * atomic64_dec_if_positive - decrement by 1 if old value positive
 * @v: pointer of type atomic64_t
 *
 * The function returns the old value of *v minus 1, even if
 * the atomic variable, v, was not decremented.
 */

static inline long long atomic64_dec_if_positive(atomic64_t *v)
{
	long long val;

	smp_mb();

	__asm__ __volatile__(
	"1:	llockd  %0, [%1]	\n"
	"	sub.f   %L0, %L0, 1	# w0 - 1, set C on borrow\n"
	"	sub.c   %H0, %H0, 1	# if C set, w1 - 1\n"
	"	brlt    %H0, 0, 2f	\n"
	"	scondd  %0, [%1]	\n"
	"	bnz     1b		\n"
	"2:				\n"
	: "=&r"(val)
	: "r"(&v->counter)
	: "cc");	/* memory clobber comes from smp_mb() */

	smp_mb();

	return val;
}

/**
 * atomic64_add_unless - add unless the number is a given value
 * @v: pointer of type atomic64_t
 * @a: the amount to add to v...
 * @u: ...unless v is equal to u.
 *
 * if (v != u) { v += a; ret = 1} else {ret = 0}
 * Returns 1 iff @v was not @u (i.e. if add actually happened)
 */
static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
{
	long long val;
	int op_done;

	smp_mb();

	__asm__ __volatile__(
	"1:	llockd  %0, [%2]	\n"
	"	mov	%1, 1		\n"
	"	brne	%L0, %L4, 2f	# continue to add since v != u \n"
	"	breq.d	%H0, %H4, 3f	# return since v == u \n"
	"	mov	%1, 0		\n"
	"2:				\n"
	"	add.f   %L0, %L0, %L3	\n"
	"	adc     %H0, %H0, %H3	\n"
	"	scondd  %0, [%2]	\n"
	"	bnz     1b		\n"
	"3:				\n"
	: "=&r"(val), "=&r" (op_done)
	: "r"(&v->counter), "r"(a), "r"(u)
	: "cc");	/* memory clobber comes from smp_mb() */

	smp_mb();

	return op_done;
}

#define atomic64_add_negative(a, v)	(atomic64_add_return((a), (v)) < 0)
#define atomic64_inc(v)			atomic64_add(1LL, (v))
#define atomic64_inc_return(v)		atomic64_add_return(1LL, (v))
#define atomic64_inc_and_test(v)	(atomic64_inc_return(v) == 0)
#define atomic64_sub_and_test(a, v)	(atomic64_sub_return((a), (v)) == 0)
#define atomic64_dec(v)			atomic64_sub(1LL, (v))
#define atomic64_dec_return(v)		atomic64_sub_return(1LL, (v))
#define atomic64_dec_and_test(v)	(atomic64_dec_return((v)) == 0)
#define atomic64_inc_not_zero(v)	atomic64_add_unless((v), 1LL, 0LL)

#endif	/* !CONFIG_GENERIC_ATOMIC64 */

#endif	/* !__ASSEMBLY__ */
608 609

#endif