spinlock.h 11.0 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5
/*
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 *
6
 * Copyright (C) 1999, 2000, 06 Ralf Baechle (ralf@linux-mips.org)
L
Linus Torvalds 已提交
7 8 9 10 11
 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
 */
#ifndef _ASM_SPINLOCK_H
#define _ASM_SPINLOCK_H

12 13
#include <linux/compiler.h>

14
#include <asm/barrier.h>
15
#include <asm/processor.h>
16
#include <asm/compiler.h>
L
Linus Torvalds 已提交
17 18 19 20
#include <asm/war.h>

/*
 * Your basic SMP spinlocks, allowing only a single CPU anywhere
21
 *
R
Ralf Baechle 已提交
22
 * Simple spin lock operations.	 There are two variants, one clears IRQ's
23 24 25 26 27
 * on the local processor, one does not.
 *
 * These are fair FIFO ticket locks
 *
 * (the type definitions are in asm/spinlock_types.h)
L
Linus Torvalds 已提交
28 29 30 31
 */


/*
32 33 34 35 36
 * Ticket locks are conceptually two parts, one indicating the current head of
 * the queue, and the other indicating the current tail. The lock is acquired
 * by atomically noting the tail and incrementing it by one (thus adding
 * ourself to the queue and noting our position), then waiting until the head
 * becomes equal to the the initial value of the tail.
L
Linus Torvalds 已提交
37 38
 */

39
static inline int arch_spin_is_locked(arch_spinlock_t *lock)
40
{
D
David Daney 已提交
41
	u32 counters = ACCESS_ONCE(lock->lock);
42

D
David Daney 已提交
43
	return ((counters >> 16) ^ counters) & 0xffff;
44 45
}

46 47 48 49 50
static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
{
	return lock.h.serving_now == lock.h.ticket;
}

51
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67

static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
	u16 owner = READ_ONCE(lock->h.serving_now);
	smp_rmb();
	for (;;) {
		arch_spinlock_t tmp = READ_ONCE(*lock);

		if (tmp.h.serving_now == tmp.h.ticket ||
		    tmp.h.serving_now != owner)
			break;

		cpu_relax();
	}
	smp_acquire__after_ctrl_dep();
}
68

69
static inline int arch_spin_is_contended(arch_spinlock_t *lock)
70
{
D
David Daney 已提交
71
	u32 counters = ACCESS_ONCE(lock->lock);
72

D
David Daney 已提交
73
	return (((counters >> 16) - counters) & 0xffff) > 1;
74
}
75
#define arch_spin_is_contended	arch_spin_is_contended
76

77
static inline void arch_spin_lock(arch_spinlock_t *lock)
L
Linus Torvalds 已提交
78
{
79 80
	int my_ticket;
	int tmp;
D
David Daney 已提交
81
	int inc = 0x10000;
L
Linus Torvalds 已提交
82 83

	if (R10000_LLSC_WAR) {
84
		__asm__ __volatile__ (
85
		"	.set push		# arch_spin_lock	\n"
86 87 88
		"	.set noreorder					\n"
		"							\n"
		"1:	ll	%[ticket], %[ticket_ptr]		\n"
D
David Daney 已提交
89
		"	addu	%[my_ticket], %[ticket], %[inc]		\n"
90 91
		"	sc	%[my_ticket], %[ticket_ptr]		\n"
		"	beqzl	%[my_ticket], 1b			\n"
L
Linus Torvalds 已提交
92
		"	 nop						\n"
D
David Daney 已提交
93 94
		"	srl	%[my_ticket], %[ticket], 16		\n"
		"	andi	%[ticket], %[ticket], 0xffff		\n"
95 96 97 98
		"	bne	%[ticket], %[my_ticket], 4f		\n"
		"	 subu	%[ticket], %[my_ticket], %[ticket]	\n"
		"2:							\n"
		"	.subsection 2					\n"
D
David Daney 已提交
99
		"4:	andi	%[ticket], %[ticket], 0xffff		\n"
100
		"	sll	%[ticket], 5				\n"
101 102 103 104
		"							\n"
		"6:	bnez	%[ticket], 6b				\n"
		"	 subu	%[ticket], 1				\n"
		"							\n"
D
David Daney 已提交
105
		"	lhu	%[ticket], %[serving_now_ptr]		\n"
106 107
		"	beq	%[ticket], %[my_ticket], 2b		\n"
		"	 subu	%[ticket], %[my_ticket], %[ticket]	\n"
108
		"	b	4b					\n"
109 110 111
		"	 subu	%[ticket], %[ticket], 1			\n"
		"	.previous					\n"
		"	.set pop					\n"
112
		: [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock),
D
David Daney 已提交
113
		  [serving_now_ptr] "+m" (lock->h.serving_now),
114
		  [ticket] "=&r" (tmp),
D
David Daney 已提交
115 116
		  [my_ticket] "=&r" (my_ticket)
		: [inc] "r" (inc));
L
Linus Torvalds 已提交
117
	} else {
118
		__asm__ __volatile__ (
119
		"	.set push		# arch_spin_lock	\n"
120 121
		"	.set noreorder					\n"
		"							\n"
D
David Daney 已提交
122 123
		"1:	ll	%[ticket], %[ticket_ptr]		\n"
		"	addu	%[my_ticket], %[ticket], %[inc]		\n"
124
		"	sc	%[my_ticket], %[ticket_ptr]		\n"
D
David Daney 已提交
125 126 127
		"	beqz	%[my_ticket], 1b			\n"
		"	 srl	%[my_ticket], %[ticket], 16		\n"
		"	andi	%[ticket], %[ticket], 0xffff		\n"
128 129
		"	bne	%[ticket], %[my_ticket], 4f		\n"
		"	 subu	%[ticket], %[my_ticket], %[ticket]	\n"
P
Paul Burton 已提交
130
		"2:	.insn						\n"
131
		"	.subsection 2					\n"
132
		"4:	andi	%[ticket], %[ticket], 0xffff		\n"
133
		"	sll	%[ticket], 5				\n"
134 135 136 137
		"							\n"
		"6:	bnez	%[ticket], 6b				\n"
		"	 subu	%[ticket], 1				\n"
		"							\n"
D
David Daney 已提交
138
		"	lhu	%[ticket], %[serving_now_ptr]		\n"
139 140
		"	beq	%[ticket], %[my_ticket], 2b		\n"
		"	 subu	%[ticket], %[my_ticket], %[ticket]	\n"
141
		"	b	4b					\n"
142
		"	 subu	%[ticket], %[ticket], 1			\n"
143
		"	.previous					\n"
144
		"	.set pop					\n"
145
		: [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock),
D
David Daney 已提交
146
		  [serving_now_ptr] "+m" (lock->h.serving_now),
147
		  [ticket] "=&r" (tmp),
D
David Daney 已提交
148 149
		  [my_ticket] "=&r" (my_ticket)
		: [inc] "r" (inc));
L
Linus Torvalds 已提交
150
	}
151

152
	smp_llsc_mb();
L
Linus Torvalds 已提交
153 154
}

155
static inline void arch_spin_unlock(arch_spinlock_t *lock)
L
Linus Torvalds 已提交
156
{
D
David Daney 已提交
157 158 159 160
	unsigned int serving_now = lock->h.serving_now + 1;
	wmb();
	lock->h.serving_now = (u16)serving_now;
	nudge_writes();
L
Linus Torvalds 已提交
161 162
}

163
static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
L
Linus Torvalds 已提交
164
{
165
	int tmp, tmp2, tmp3;
D
David Daney 已提交
166
	int inc = 0x10000;
L
Linus Torvalds 已提交
167 168

	if (R10000_LLSC_WAR) {
169
		__asm__ __volatile__ (
170
		"	.set push		# arch_spin_trylock	\n"
171 172 173
		"	.set noreorder					\n"
		"							\n"
		"1:	ll	%[ticket], %[ticket_ptr]		\n"
D
David Daney 已提交
174 175
		"	srl	%[my_ticket], %[ticket], 16		\n"
		"	andi	%[now_serving], %[ticket], 0xffff	\n"
176
		"	bne	%[my_ticket], %[now_serving], 3f	\n"
D
David Daney 已提交
177
		"	 addu	%[ticket], %[ticket], %[inc]		\n"
178 179 180 181 182 183 184 185 186
		"	sc	%[ticket], %[ticket_ptr]		\n"
		"	beqzl	%[ticket], 1b				\n"
		"	 li	%[ticket], 1				\n"
		"2:							\n"
		"	.subsection 2					\n"
		"3:	b	2b					\n"
		"	 li	%[ticket], 0				\n"
		"	.previous					\n"
		"	.set pop					\n"
187
		: [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock),
188 189
		  [ticket] "=&r" (tmp),
		  [my_ticket] "=&r" (tmp2),
D
David Daney 已提交
190 191
		  [now_serving] "=&r" (tmp3)
		: [inc] "r" (inc));
L
Linus Torvalds 已提交
192
	} else {
193
		__asm__ __volatile__ (
194
		"	.set push		# arch_spin_trylock	\n"
195 196
		"	.set noreorder					\n"
		"							\n"
D
David Daney 已提交
197 198 199
		"1:	ll	%[ticket], %[ticket_ptr]		\n"
		"	srl	%[my_ticket], %[ticket], 16		\n"
		"	andi	%[now_serving], %[ticket], 0xffff	\n"
200
		"	bne	%[my_ticket], %[now_serving], 3f	\n"
D
David Daney 已提交
201
		"	 addu	%[ticket], %[ticket], %[inc]		\n"
202
		"	sc	%[ticket], %[ticket_ptr]		\n"
D
David Daney 已提交
203
		"	beqz	%[ticket], 1b				\n"
204
		"	 li	%[ticket], 1				\n"
P
Paul Burton 已提交
205
		"2:	.insn						\n"
206
		"	.subsection 2					\n"
207 208
		"3:	b	2b					\n"
		"	 li	%[ticket], 0				\n"
209
		"	.previous					\n"
210
		"	.set pop					\n"
211
		: [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock),
212 213
		  [ticket] "=&r" (tmp),
		  [my_ticket] "=&r" (tmp2),
D
David Daney 已提交
214 215
		  [now_serving] "=&r" (tmp3)
		: [inc] "r" (inc));
L
Linus Torvalds 已提交
216 217
	}

218
	smp_llsc_mb();
219

220
	return tmp;
L
Linus Torvalds 已提交
221 222 223 224 225 226 227 228 229 230 231
}

/*
 * Read-write spinlocks, allowing multiple readers but only one writer.
 *
 * NOTE! it is quite common to have readers in interrupts but no interrupt
 * writers. For those circumstances we can "mix" irq-safe locks - any writer
 * needs to get a irq-safe write-lock, but readers can get non-irqsafe
 * read-locks.
 */

232 233 234 235
/*
 * read_can_lock - would read_trylock() succeed?
 * @lock: the rwlock in question.
 */
236
#define arch_read_can_lock(rw)	((rw)->lock >= 0)
237 238 239 240 241

/*
 * write_can_lock - would write_trylock() succeed?
 * @lock: the rwlock in question.
 */
R
Ralf Baechle 已提交
242
#define arch_write_can_lock(rw) (!(rw)->lock)
243

244
static inline void arch_read_lock(arch_rwlock_t *rw)
L
Linus Torvalds 已提交
245 246 247 248 249
{
	unsigned int tmp;

	if (R10000_LLSC_WAR) {
		__asm__ __volatile__(
250
		"	.set	noreorder	# arch_read_lock	\n"
L
Linus Torvalds 已提交
251 252 253 254 255 256 257
		"1:	ll	%1, %2					\n"
		"	bltz	%1, 1b					\n"
		"	 addu	%1, 1					\n"
		"	sc	%1, %0					\n"
		"	beqzl	%1, 1b					\n"
		"	 nop						\n"
		"	.set	reorder					\n"
258 259
		: "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
		: GCC_OFF_SMALL_ASM() (rw->lock)
L
Linus Torvalds 已提交
260 261
		: "memory");
	} else {
262 263 264 265 266 267
		do {
			__asm__ __volatile__(
			"1:	ll	%1, %2	# arch_read_lock	\n"
			"	bltz	%1, 1b				\n"
			"	 addu	%1, 1				\n"
			"2:	sc	%1, %0				\n"
268 269
			: "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
			: GCC_OFF_SMALL_ASM() (rw->lock)
270 271
			: "memory");
		} while (unlikely(!tmp));
L
Linus Torvalds 已提交
272
	}
273

274
	smp_llsc_mb();
L
Linus Torvalds 已提交
275 276
}

277
static inline void arch_read_unlock(arch_rwlock_t *rw)
L
Linus Torvalds 已提交
278 279 280
{
	unsigned int tmp;

281
	smp_mb__before_llsc();
282

L
Linus Torvalds 已提交
283 284
	if (R10000_LLSC_WAR) {
		__asm__ __volatile__(
285
		"1:	ll	%1, %2		# arch_read_unlock	\n"
286
		"	addiu	%1, -1					\n"
L
Linus Torvalds 已提交
287 288
		"	sc	%1, %0					\n"
		"	beqzl	%1, 1b					\n"
289 290
		: "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
		: GCC_OFF_SMALL_ASM() (rw->lock)
L
Linus Torvalds 已提交
291 292
		: "memory");
	} else {
293 294 295
		do {
			__asm__ __volatile__(
			"1:	ll	%1, %2	# arch_read_unlock	\n"
296
			"	addiu	%1, -1				\n"
297
			"	sc	%1, %0				\n"
298 299
			: "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
			: GCC_OFF_SMALL_ASM() (rw->lock)
300 301
			: "memory");
		} while (unlikely(!tmp));
L
Linus Torvalds 已提交
302 303 304
	}
}

305
static inline void arch_write_lock(arch_rwlock_t *rw)
L
Linus Torvalds 已提交
306 307 308 309 310
{
	unsigned int tmp;

	if (R10000_LLSC_WAR) {
		__asm__ __volatile__(
311
		"	.set	noreorder	# arch_write_lock	\n"
L
Linus Torvalds 已提交
312 313 314 315 316
		"1:	ll	%1, %2					\n"
		"	bnez	%1, 1b					\n"
		"	 lui	%1, 0x8000				\n"
		"	sc	%1, %0					\n"
		"	beqzl	%1, 1b					\n"
317
		"	 nop						\n"
L
Linus Torvalds 已提交
318
		"	.set	reorder					\n"
319 320
		: "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
		: GCC_OFF_SMALL_ASM() (rw->lock)
L
Linus Torvalds 已提交
321 322
		: "memory");
	} else {
323 324 325 326 327 328
		do {
			__asm__ __volatile__(
			"1:	ll	%1, %2	# arch_write_lock	\n"
			"	bnez	%1, 1b				\n"
			"	 lui	%1, 0x8000			\n"
			"2:	sc	%1, %0				\n"
329 330
			: "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
			: GCC_OFF_SMALL_ASM() (rw->lock)
331 332
			: "memory");
		} while (unlikely(!tmp));
L
Linus Torvalds 已提交
333
	}
334

335
	smp_llsc_mb();
L
Linus Torvalds 已提交
336 337
}

338
static inline void arch_write_unlock(arch_rwlock_t *rw)
L
Linus Torvalds 已提交
339
{
340
	smp_mb__before_llsc();
341

L
Linus Torvalds 已提交
342
	__asm__ __volatile__(
343
	"				# arch_write_unlock	\n"
L
Linus Torvalds 已提交
344 345 346 347 348 349
	"	sw	$0, %0					\n"
	: "=m" (rw->lock)
	: "m" (rw->lock)
	: "memory");
}

350
static inline int arch_read_trylock(arch_rwlock_t *rw)
351 352 353 354 355 356
{
	unsigned int tmp;
	int ret;

	if (R10000_LLSC_WAR) {
		__asm__ __volatile__(
357
		"	.set	noreorder	# arch_read_trylock	\n"
358 359
		"	li	%2, 0					\n"
		"1:	ll	%1, %3					\n"
360
		"	bltz	%1, 2f					\n"
361 362 363
		"	 addu	%1, 1					\n"
		"	sc	%1, %0					\n"
		"	.set	reorder					\n"
364 365
		"	beqzl	%1, 1b					\n"
		"	 nop						\n"
366
		__WEAK_LLSC_MB
367 368
		"	li	%2, 1					\n"
		"2:							\n"
369 370
		: "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
		: GCC_OFF_SMALL_ASM() (rw->lock)
371 372 373
		: "memory");
	} else {
		__asm__ __volatile__(
374
		"	.set	noreorder	# arch_read_trylock	\n"
375 376
		"	li	%2, 0					\n"
		"1:	ll	%1, %3					\n"
377
		"	bltz	%1, 2f					\n"
378 379 380
		"	 addu	%1, 1					\n"
		"	sc	%1, %0					\n"
		"	beqz	%1, 1b					\n"
381
		"	 nop						\n"
382
		"	.set	reorder					\n"
383
		__WEAK_LLSC_MB
384
		"	li	%2, 1					\n"
P
Paul Burton 已提交
385
		"2:	.insn						\n"
386 387
		: "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
		: GCC_OFF_SMALL_ASM() (rw->lock)
388 389 390 391 392
		: "memory");
	}

	return ret;
}
L
Linus Torvalds 已提交
393

394
static inline int arch_write_trylock(arch_rwlock_t *rw)
L
Linus Torvalds 已提交
395 396 397 398 399 400
{
	unsigned int tmp;
	int ret;

	if (R10000_LLSC_WAR) {
		__asm__ __volatile__(
401
		"	.set	noreorder	# arch_write_trylock	\n"
L
Linus Torvalds 已提交
402 403 404 405 406 407
		"	li	%2, 0					\n"
		"1:	ll	%1, %3					\n"
		"	bnez	%1, 2f					\n"
		"	 lui	%1, 0x8000				\n"
		"	sc	%1, %0					\n"
		"	beqzl	%1, 1b					\n"
408
		"	 nop						\n"
409
		__WEAK_LLSC_MB
L
Linus Torvalds 已提交
410 411 412
		"	li	%2, 1					\n"
		"	.set	reorder					\n"
		"2:							\n"
413 414
		: "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
		: GCC_OFF_SMALL_ASM() (rw->lock)
L
Linus Torvalds 已提交
415 416
		: "memory");
	} else {
417 418 419 420 421 422 423 424
		do {
			__asm__ __volatile__(
			"	ll	%1, %3	# arch_write_trylock	\n"
			"	li	%2, 0				\n"
			"	bnez	%1, 2f				\n"
			"	lui	%1, 0x8000			\n"
			"	sc	%1, %0				\n"
			"	li	%2, 1				\n"
P
Paul Burton 已提交
425
			"2:	.insn					\n"
426
			: "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp),
427
			  "=&r" (ret)
428
			: GCC_OFF_SMALL_ASM() (rw->lock)
429 430 431 432
			: "memory");
		} while (unlikely(!tmp));

		smp_llsc_mb();
L
Linus Torvalds 已提交
433 434 435 436 437
	}

	return ret;
}

438 439
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
440

441 442 443
#define arch_spin_relax(lock)	cpu_relax()
#define arch_read_relax(lock)	cpu_relax()
#define arch_write_relax(lock)	cpu_relax()
444

L
Linus Torvalds 已提交
445
#endif /* _ASM_SPINLOCK_H */