atomic.h 12.2 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
2
 *  arch/arm/include/asm/atomic.h
L
Linus Torvalds 已提交
3 4 5 6 7 8 9 10 11 12 13
 *
 *  Copyright (C) 1996 Russell King.
 *  Copyright (C) 2002 Deep Blue Solutions Ltd.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#ifndef __ASM_ARM_ATOMIC_H
#define __ASM_ARM_ATOMIC_H

14
#include <linux/compiler.h>
15
#include <linux/prefetch.h>
16
#include <linux/types.h>
17 18 19
#include <linux/irqflags.h>
#include <asm/barrier.h>
#include <asm/cmpxchg.h>
L
Linus Torvalds 已提交
20 21 22 23 24

#define ATOMIC_INIT(i)	{ (i) }

#ifdef __KERNEL__

25 26 27 28 29
/*
 * On ARM, ordinary assignment (str instruction) doesn't clear the local
 * strex/ldrex monitor on some implementations. The reason we can use it for
 * atomic_set() is the clrex or dummy strex done on every exception return.
 */
30 31
#define atomic_read(v)	READ_ONCE((v)->counter)
#define atomic_set(v,i)	WRITE_ONCE(((v)->counter), (i))
L
Linus Torvalds 已提交
32 33 34 35 36 37

#if __LINUX_ARM_ARCH__ >= 6

/*
 * ARMv6 UP and SMP safe atomic ops.  We use load exclusive and
 * store exclusive to ensure that these are atomic.  We may loop
38
 * to ensure that the update happens.
L
Linus Torvalds 已提交
39
 */
40

41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59
#define ATOMIC_OP(op, c_op, asm_op)					\
static inline void atomic_##op(int i, atomic_t *v)			\
{									\
	unsigned long tmp;						\
	int result;							\
									\
	prefetchw(&v->counter);						\
	__asm__ __volatile__("@ atomic_" #op "\n"			\
"1:	ldrex	%0, [%3]\n"						\
"	" #asm_op "	%0, %0, %4\n"					\
"	strex	%1, %0, [%3]\n"						\
"	teq	%1, #0\n"						\
"	bne	1b"							\
	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)		\
	: "r" (&v->counter), "Ir" (i)					\
	: "cc");							\
}									\

#define ATOMIC_OP_RETURN(op, c_op, asm_op)				\
60
static inline int atomic_##op##_return_relaxed(int i, atomic_t *v)	\
61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77
{									\
	unsigned long tmp;						\
	int result;							\
									\
	prefetchw(&v->counter);						\
									\
	__asm__ __volatile__("@ atomic_" #op "_return\n"		\
"1:	ldrex	%0, [%3]\n"						\
"	" #asm_op "	%0, %0, %4\n"					\
"	strex	%1, %0, [%3]\n"						\
"	teq	%1, #0\n"						\
"	bne	1b"							\
	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)		\
	: "r" (&v->counter), "Ir" (i)					\
	: "cc");							\
									\
	return result;							\
L
Linus Torvalds 已提交
78 79
}

80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100
#define ATOMIC_FETCH_OP(op, c_op, asm_op)				\
static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v)	\
{									\
	unsigned long tmp;						\
	int result, val;						\
									\
	prefetchw(&v->counter);						\
									\
	__asm__ __volatile__("@ atomic_fetch_" #op "\n"			\
"1:	ldrex	%0, [%4]\n"						\
"	" #asm_op "	%1, %0, %5\n"					\
"	strex	%2, %1, [%4]\n"						\
"	teq	%2, #0\n"						\
"	bne	1b"							\
	: "=&r" (result), "=&r" (val), "=&r" (tmp), "+Qo" (v->counter)	\
	: "r" (&v->counter), "Ir" (i)					\
	: "cc");							\
									\
	return result;							\
}

101 102
#define atomic_add_return_relaxed	atomic_add_return_relaxed
#define atomic_sub_return_relaxed	atomic_sub_return_relaxed
103 104 105 106 107 108 109
#define atomic_fetch_add_relaxed	atomic_fetch_add_relaxed
#define atomic_fetch_sub_relaxed	atomic_fetch_sub_relaxed

#define atomic_fetch_and_relaxed	atomic_fetch_and_relaxed
#define atomic_fetch_andnot_relaxed	atomic_fetch_andnot_relaxed
#define atomic_fetch_or_relaxed		atomic_fetch_or_relaxed
#define atomic_fetch_xor_relaxed	atomic_fetch_xor_relaxed
110 111

static inline int atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new)
N
Nick Piggin 已提交
112
{
113 114
	int oldval;
	unsigned long res;
N
Nick Piggin 已提交
115

116
	prefetchw(&ptr->counter);
117

N
Nick Piggin 已提交
118 119
	do {
		__asm__ __volatile__("@ atomic_cmpxchg\n"
120
		"ldrex	%1, [%3]\n"
121
		"mov	%0, #0\n"
122 123 124
		"teq	%1, %4\n"
		"strexeq %0, %5, [%3]\n"
		    : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
N
Nick Piggin 已提交
125 126 127 128 129 130
		    : "r" (&ptr->counter), "Ir" (old), "r" (new)
		    : "cc");
	} while (res);

	return oldval;
}
131
#define atomic_cmpxchg_relaxed		atomic_cmpxchg_relaxed
N
Nick Piggin 已提交
132

133
static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158
{
	int oldval, newval;
	unsigned long tmp;

	smp_mb();
	prefetchw(&v->counter);

	__asm__ __volatile__ ("@ atomic_add_unless\n"
"1:	ldrex	%0, [%4]\n"
"	teq	%0, %5\n"
"	beq	2f\n"
"	add	%1, %0, %6\n"
"	strex	%2, %1, [%4]\n"
"	teq	%2, #0\n"
"	bne	1b\n"
"2:"
	: "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
	: "r" (&v->counter), "r" (u), "r" (a)
	: "cc");

	if (oldval != u)
		smp_mb();

	return oldval;
}
159
#define atomic_fetch_add_unless		atomic_fetch_add_unless
160

L
Linus Torvalds 已提交
161 162 163 164 165 166
#else /* ARM_ARCH_6 */

#ifdef CONFIG_SMP
#error SMP not supported on pre-ARMv6 CPUs
#endif

167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188
#define ATOMIC_OP(op, c_op, asm_op)					\
static inline void atomic_##op(int i, atomic_t *v)			\
{									\
	unsigned long flags;						\
									\
	raw_local_irq_save(flags);					\
	v->counter c_op i;						\
	raw_local_irq_restore(flags);					\
}									\

#define ATOMIC_OP_RETURN(op, c_op, asm_op)				\
static inline int atomic_##op##_return(int i, atomic_t *v)		\
{									\
	unsigned long flags;						\
	int val;							\
									\
	raw_local_irq_save(flags);					\
	v->counter c_op i;						\
	val = v->counter;						\
	raw_local_irq_restore(flags);					\
									\
	return val;							\
L
Linus Torvalds 已提交
189 190
}

191 192 193 194 195 196 197 198 199 200 201 202 203 204
#define ATOMIC_FETCH_OP(op, c_op, asm_op)				\
static inline int atomic_fetch_##op(int i, atomic_t *v)			\
{									\
	unsigned long flags;						\
	int val;							\
									\
	raw_local_irq_save(flags);					\
	val = v->counter;						\
	v->counter c_op i;						\
	raw_local_irq_restore(flags);					\
									\
	return val;							\
}

N
Nick Piggin 已提交
205 206 207 208 209
static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
{
	int ret;
	unsigned long flags;

210
	raw_local_irq_save(flags);
N
Nick Piggin 已提交
211 212 213
	ret = v->counter;
	if (likely(ret == old))
		v->counter = new;
214
	raw_local_irq_restore(flags);
N
Nick Piggin 已提交
215 216 217 218

	return ret;
}

219 220
#endif /* __LINUX_ARM_ARCH__ */

221 222
#define ATOMIC_OPS(op, c_op, asm_op)					\
	ATOMIC_OP(op, c_op, asm_op)					\
223 224
	ATOMIC_OP_RETURN(op, c_op, asm_op)				\
	ATOMIC_FETCH_OP(op, c_op, asm_op)
225 226 227 228

ATOMIC_OPS(add, +=, add)
ATOMIC_OPS(sub, -=, sub)

229 230
#define atomic_andnot atomic_andnot

231 232 233 234 235 236 237 238 239
#undef ATOMIC_OPS
#define ATOMIC_OPS(op, c_op, asm_op)					\
	ATOMIC_OP(op, c_op, asm_op)					\
	ATOMIC_FETCH_OP(op, c_op, asm_op)

ATOMIC_OPS(and, &=, and)
ATOMIC_OPS(andnot, &= ~, bic)
ATOMIC_OPS(or,  |=, orr)
ATOMIC_OPS(xor, ^=, eor)
240

241
#undef ATOMIC_OPS
242
#undef ATOMIC_FETCH_OP
243 244 245
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP

246 247
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))

248 249
#ifndef CONFIG_GENERIC_ATOMIC64
typedef struct {
250
	long long counter;
251 252 253 254
} atomic64_t;

#define ATOMIC64_INIT(i) { (i) }

255
#ifdef CONFIG_ARM_LPAE
256
static inline long long atomic64_read(const atomic64_t *v)
257
{
258
	long long result;
259 260 261 262 263 264 265 266 267 268

	__asm__ __volatile__("@ atomic64_read\n"
"	ldrd	%0, %H0, [%1]"
	: "=&r" (result)
	: "r" (&v->counter), "Qo" (v->counter)
	);

	return result;
}

269
static inline void atomic64_set(atomic64_t *v, long long i)
270 271 272 273 274 275 276 277
{
	__asm__ __volatile__("@ atomic64_set\n"
"	strd	%2, %H2, [%1]"
	: "=Qo" (v->counter)
	: "r" (&v->counter), "r" (i)
	);
}
#else
278
static inline long long atomic64_read(const atomic64_t *v)
279
{
280
	long long result;
281 282 283 284

	__asm__ __volatile__("@ atomic64_read\n"
"	ldrexd	%0, %H0, [%1]"
	: "=&r" (result)
285
	: "r" (&v->counter), "Qo" (v->counter)
286 287 288 289 290
	);

	return result;
}

291
static inline void atomic64_set(atomic64_t *v, long long i)
292
{
293
	long long tmp;
294

295
	prefetchw(&v->counter);
296
	__asm__ __volatile__("@ atomic64_set\n"
297 298
"1:	ldrexd	%0, %H0, [%2]\n"
"	strexd	%0, %3, %H3, [%2]\n"
299 300
"	teq	%0, #0\n"
"	bne	1b"
301
	: "=&r" (tmp), "=Qo" (v->counter)
302 303 304
	: "r" (&v->counter), "r" (i)
	: "cc");
}
305
#endif
306

307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326
#define ATOMIC64_OP(op, op1, op2)					\
static inline void atomic64_##op(long long i, atomic64_t *v)		\
{									\
	long long result;						\
	unsigned long tmp;						\
									\
	prefetchw(&v->counter);						\
	__asm__ __volatile__("@ atomic64_" #op "\n"			\
"1:	ldrexd	%0, %H0, [%3]\n"					\
"	" #op1 " %Q0, %Q0, %Q4\n"					\
"	" #op2 " %R0, %R0, %R4\n"					\
"	strexd	%1, %0, %H0, [%3]\n"					\
"	teq	%1, #0\n"						\
"	bne	1b"							\
	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)		\
	: "r" (&v->counter), "r" (i)					\
	: "cc");							\
}									\

#define ATOMIC64_OP_RETURN(op, op1, op2)				\
327 328
static inline long long							\
atomic64_##op##_return_relaxed(long long i, atomic64_t *v)		\
329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346
{									\
	long long result;						\
	unsigned long tmp;						\
									\
	prefetchw(&v->counter);						\
									\
	__asm__ __volatile__("@ atomic64_" #op "_return\n"		\
"1:	ldrexd	%0, %H0, [%3]\n"					\
"	" #op1 " %Q0, %Q0, %Q4\n"					\
"	" #op2 " %R0, %R0, %R4\n"					\
"	strexd	%1, %0, %H0, [%3]\n"					\
"	teq	%1, #0\n"						\
"	bne	1b"							\
	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)		\
	: "r" (&v->counter), "r" (i)					\
	: "cc");							\
									\
	return result;							\
347 348
}

349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371
#define ATOMIC64_FETCH_OP(op, op1, op2)					\
static inline long long							\
atomic64_fetch_##op##_relaxed(long long i, atomic64_t *v)		\
{									\
	long long result, val;						\
	unsigned long tmp;						\
									\
	prefetchw(&v->counter);						\
									\
	__asm__ __volatile__("@ atomic64_fetch_" #op "\n"		\
"1:	ldrexd	%0, %H0, [%4]\n"					\
"	" #op1 " %Q1, %Q0, %Q5\n"					\
"	" #op2 " %R1, %R0, %R5\n"					\
"	strexd	%2, %1, %H1, [%4]\n"					\
"	teq	%2, #0\n"						\
"	bne	1b"							\
	: "=&r" (result), "=&r" (val), "=&r" (tmp), "+Qo" (v->counter)	\
	: "r" (&v->counter), "r" (i)					\
	: "cc");							\
									\
	return result;							\
}

372 373
#define ATOMIC64_OPS(op, op1, op2)					\
	ATOMIC64_OP(op, op1, op2)					\
374 375
	ATOMIC64_OP_RETURN(op, op1, op2)				\
	ATOMIC64_FETCH_OP(op, op1, op2)
376

377 378
ATOMIC64_OPS(add, adds, adc)
ATOMIC64_OPS(sub, subs, sbc)
379

380 381
#define atomic64_add_return_relaxed	atomic64_add_return_relaxed
#define atomic64_sub_return_relaxed	atomic64_sub_return_relaxed
382 383 384 385 386 387 388
#define atomic64_fetch_add_relaxed	atomic64_fetch_add_relaxed
#define atomic64_fetch_sub_relaxed	atomic64_fetch_sub_relaxed

#undef ATOMIC64_OPS
#define ATOMIC64_OPS(op, op1, op2)					\
	ATOMIC64_OP(op, op1, op2)					\
	ATOMIC64_FETCH_OP(op, op1, op2)
389

390 391
#define atomic64_andnot atomic64_andnot

392 393 394 395 396 397 398 399 400
ATOMIC64_OPS(and, and, and)
ATOMIC64_OPS(andnot, bic, bic)
ATOMIC64_OPS(or,  orr, orr)
ATOMIC64_OPS(xor, eor, eor)

#define atomic64_fetch_and_relaxed	atomic64_fetch_and_relaxed
#define atomic64_fetch_andnot_relaxed	atomic64_fetch_andnot_relaxed
#define atomic64_fetch_or_relaxed	atomic64_fetch_or_relaxed
#define atomic64_fetch_xor_relaxed	atomic64_fetch_xor_relaxed
401

402
#undef ATOMIC64_OPS
403
#undef ATOMIC64_FETCH_OP
404 405
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
406

407 408
static inline long long
atomic64_cmpxchg_relaxed(atomic64_t *ptr, long long old, long long new)
409
{
410
	long long oldval;
411 412
	unsigned long res;

413
	prefetchw(&ptr->counter);
414 415 416

	do {
		__asm__ __volatile__("@ atomic64_cmpxchg\n"
417
		"ldrexd		%1, %H1, [%3]\n"
418
		"mov		%0, #0\n"
419 420 421 422
		"teq		%1, %4\n"
		"teqeq		%H1, %H4\n"
		"strexdeq	%0, %5, %H5, [%3]"
		: "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
423 424 425 426 427 428
		: "r" (&ptr->counter), "r" (old), "r" (new)
		: "cc");
	} while (res);

	return oldval;
}
429
#define atomic64_cmpxchg_relaxed	atomic64_cmpxchg_relaxed
430

431
static inline long long atomic64_xchg_relaxed(atomic64_t *ptr, long long new)
432
{
433
	long long result;
434 435
	unsigned long tmp;

436
	prefetchw(&ptr->counter);
437 438

	__asm__ __volatile__("@ atomic64_xchg\n"
439 440
"1:	ldrexd	%0, %H0, [%3]\n"
"	strexd	%1, %4, %H4, [%3]\n"
441 442
"	teq	%1, #0\n"
"	bne	1b"
443
	: "=&r" (result), "=&r" (tmp), "+Qo" (ptr->counter)
444 445 446 447 448
	: "r" (&ptr->counter), "r" (new)
	: "cc");

	return result;
}
449
#define atomic64_xchg_relaxed		atomic64_xchg_relaxed
450

451
static inline long long atomic64_dec_if_positive(atomic64_t *v)
452
{
453
	long long result;
454 455 456
	unsigned long tmp;

	smp_mb();
457
	prefetchw(&v->counter);
458 459

	__asm__ __volatile__("@ atomic64_dec_if_positive\n"
460
"1:	ldrexd	%0, %H0, [%3]\n"
461 462 463
"	subs	%Q0, %Q0, #1\n"
"	sbc	%R0, %R0, #0\n"
"	teq	%R0, #0\n"
464
"	bmi	2f\n"
465
"	strexd	%1, %0, %H0, [%3]\n"
466 467 468
"	teq	%1, #0\n"
"	bne	1b\n"
"2:"
469
	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
470 471 472 473 474 475 476
	: "r" (&v->counter)
	: "cc");

	smp_mb();

	return result;
}
477
#define atomic64_dec_if_positive atomic64_dec_if_positive
478

479 480
static inline long long atomic64_fetch_add_unless(atomic64_t *v, long long a,
						  long long u)
481
{
482
	long long oldval, newval;
483 484 485
	unsigned long tmp;

	smp_mb();
486
	prefetchw(&v->counter);
487 488

	__asm__ __volatile__("@ atomic64_add_unless\n"
489 490 491
"1:	ldrexd	%0, %H0, [%4]\n"
"	teq	%0, %5\n"
"	teqeq	%H0, %H5\n"
492
"	beq	2f\n"
493 494 495
"	adds	%Q1, %Q0, %Q6\n"
"	adc	%R1, %R0, %R6\n"
"	strexd	%2, %1, %H1, [%4]\n"
496 497 498
"	teq	%2, #0\n"
"	bne	1b\n"
"2:"
499
	: "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
500 501 502
	: "r" (&v->counter), "r" (u), "r" (a)
	: "cc");

503
	if (oldval != u)
504 505
		smp_mb();

506
	return oldval;
507
}
508
#define atomic64_fetch_add_unless atomic64_fetch_add_unless
509

510
#endif /* !CONFIG_GENERIC_ATOMIC64 */
L
Linus Torvalds 已提交
511 512
#endif
#endif