spinlock.h 8.1 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5
/*
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 *
6
 * Copyright (C) 1999, 2000, 06 Ralf Baechle (ralf@linux-mips.org)
L
Linus Torvalds 已提交
7 8 9 10 11
 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
 */
#ifndef _ASM_SPINLOCK_H
#define _ASM_SPINLOCK_H

12
#include <asm/barrier.h>
L
Linus Torvalds 已提交
13 14 15 16 17 18
#include <asm/war.h>

/*
 * Your basic SMP spinlocks, allowing only a single CPU anywhere
 */

R
Ralf Baechle 已提交
19
#define __raw_spin_is_locked(x)       ((x)->lock != 0)
I
Ingo Molnar 已提交
20 21
#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
#define __raw_spin_unlock_wait(x) \
R
Ralf Baechle 已提交
22
	do { cpu_relax(); } while ((x)->lock)
L
Linus Torvalds 已提交
23 24 25 26 27 28 29 30

/*
 * Simple spin lock operations.  There are two variants, one clears IRQ's
 * on the local processor, one does not.
 *
 * We make no fairness assumptions.  They have a cost.
 */

I
Ingo Molnar 已提交
31
static inline void __raw_spin_lock(raw_spinlock_t *lock)
L
Linus Torvalds 已提交
32 33 34 35 36
{
	unsigned int tmp;

	if (R10000_LLSC_WAR) {
		__asm__ __volatile__(
I
Ingo Molnar 已提交
37
		"	.set	noreorder	# __raw_spin_lock	\n"
L
Linus Torvalds 已提交
38 39 40 41 42 43 44 45 46 47 48 49
		"1:	ll	%1, %2					\n"
		"	bnez	%1, 1b					\n"
		"	 li	%1, 1					\n"
		"	sc	%1, %0					\n"
		"	beqzl	%1, 1b					\n"
		"	 nop						\n"
		"	.set	reorder					\n"
		: "=m" (lock->lock), "=&r" (tmp)
		: "m" (lock->lock)
		: "memory");
	} else {
		__asm__ __volatile__(
I
Ingo Molnar 已提交
50
		"	.set	noreorder	# __raw_spin_lock	\n"
L
Linus Torvalds 已提交
51
		"1:	ll	%1, %2					\n"
52
		"	bnez	%1, 2f					\n"
L
Linus Torvalds 已提交
53 54
		"	 li	%1, 1					\n"
		"	sc	%1, %0					\n"
55
		"	beqz	%1, 2f					\n"
56
		"	 nop						\n"
57 58 59 60 61 62 63
		"	.subsection 2					\n"
		"2:	ll	%1, %2					\n"
		"	bnez	%1, 2b					\n"
		"	 li	%1, 1					\n"
		"	b	1b					\n"
		"	 nop						\n"
		"	.previous					\n"
L
Linus Torvalds 已提交
64 65 66 67 68
		"	.set	reorder					\n"
		: "=m" (lock->lock), "=&r" (tmp)
		: "m" (lock->lock)
		: "memory");
	}
69

70
	smp_llsc_mb();
L
Linus Torvalds 已提交
71 72
}

I
Ingo Molnar 已提交
73
static inline void __raw_spin_unlock(raw_spinlock_t *lock)
L
Linus Torvalds 已提交
74
{
75 76
	smp_mb();

L
Linus Torvalds 已提交
77
	__asm__ __volatile__(
I
Ingo Molnar 已提交
78
	"	.set	noreorder	# __raw_spin_unlock	\n"
L
Linus Torvalds 已提交
79 80 81 82 83 84 85
	"	sw	$0, %0					\n"
	"	.set\treorder					\n"
	: "=m" (lock->lock)
	: "m" (lock->lock)
	: "memory");
}

I
Ingo Molnar 已提交
86
static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock)
L
Linus Torvalds 已提交
87 88 89 90 91
{
	unsigned int temp, res;

	if (R10000_LLSC_WAR) {
		__asm__ __volatile__(
I
Ingo Molnar 已提交
92
		"	.set	noreorder	# __raw_spin_trylock	\n"
L
Linus Torvalds 已提交
93 94 95 96 97 98 99 100 101 102 103 104
		"1:	ll	%0, %3					\n"
		"	ori	%2, %0, 1				\n"
		"	sc	%2, %1					\n"
		"	beqzl	%2, 1b					\n"
		"	 nop						\n"
		"	andi	%2, %0, 1				\n"
		"	.set	reorder"
		: "=&r" (temp), "=m" (lock->lock), "=&r" (res)
		: "m" (lock->lock)
		: "memory");
	} else {
		__asm__ __volatile__(
I
Ingo Molnar 已提交
105
		"	.set	noreorder	# __raw_spin_trylock	\n"
L
Linus Torvalds 已提交
106 107 108
		"1:	ll	%0, %3					\n"
		"	ori	%2, %0, 1				\n"
		"	sc	%2, %1					\n"
109
		"	beqz	%2, 2f					\n"
L
Linus Torvalds 已提交
110
		"	 andi	%2, %0, 1				\n"
111 112 113 114
		"	.subsection 2					\n"
		"2:	b	1b					\n"
		"	 nop						\n"
		"	.previous					\n"
L
Linus Torvalds 已提交
115 116 117 118 119 120
		"	.set	reorder"
		: "=&r" (temp), "=m" (lock->lock), "=&r" (res)
		: "m" (lock->lock)
		: "memory");
	}

121
	smp_llsc_mb();
122

L
Linus Torvalds 已提交
123 124 125 126 127 128 129 130 131 132 133 134
	return res == 0;
}

/*
 * Read-write spinlocks, allowing multiple readers but only one writer.
 *
 * NOTE! it is quite common to have readers in interrupts but no interrupt
 * writers. For those circumstances we can "mix" irq-safe locks - any writer
 * needs to get a irq-safe write-lock, but readers can get non-irqsafe
 * read-locks.
 */

135 136 137 138 139 140 141 142 143 144 145 146
/*
 * read_can_lock - would read_trylock() succeed?
 * @lock: the rwlock in question.
 */
#define __raw_read_can_lock(rw)	((rw)->lock >= 0)

/*
 * write_can_lock - would write_trylock() succeed?
 * @lock: the rwlock in question.
 */
#define __raw_write_can_lock(rw)	(!(rw)->lock)

I
Ingo Molnar 已提交
147
static inline void __raw_read_lock(raw_rwlock_t *rw)
L
Linus Torvalds 已提交
148 149 150 151 152
{
	unsigned int tmp;

	if (R10000_LLSC_WAR) {
		__asm__ __volatile__(
I
Ingo Molnar 已提交
153
		"	.set	noreorder	# __raw_read_lock	\n"
L
Linus Torvalds 已提交
154 155 156 157 158 159 160 161 162 163 164 165
		"1:	ll	%1, %2					\n"
		"	bltz	%1, 1b					\n"
		"	 addu	%1, 1					\n"
		"	sc	%1, %0					\n"
		"	beqzl	%1, 1b					\n"
		"	 nop						\n"
		"	.set	reorder					\n"
		: "=m" (rw->lock), "=&r" (tmp)
		: "m" (rw->lock)
		: "memory");
	} else {
		__asm__ __volatile__(
I
Ingo Molnar 已提交
166
		"	.set	noreorder	# __raw_read_lock	\n"
L
Linus Torvalds 已提交
167
		"1:	ll	%1, %2					\n"
168
		"	bltz	%1, 2f					\n"
L
Linus Torvalds 已提交
169 170 171
		"	 addu	%1, 1					\n"
		"	sc	%1, %0					\n"
		"	beqz	%1, 1b					\n"
172
		"	 nop						\n"
173 174 175 176 177 178 179
		"	.subsection 2					\n"
		"2:	ll	%1, %2					\n"
		"	bltz	%1, 2b					\n"
		"	 addu	%1, 1					\n"
		"	b	1b					\n"
		"	 nop						\n"
		"	.previous					\n"
L
Linus Torvalds 已提交
180 181 182 183 184
		"	.set	reorder					\n"
		: "=m" (rw->lock), "=&r" (tmp)
		: "m" (rw->lock)
		: "memory");
	}
185

186
	smp_llsc_mb();
L
Linus Torvalds 已提交
187 188 189 190 191
}

/* Note the use of sub, not subu which will make the kernel die with an
   overflow exception if we ever try to unlock an rwlock that is already
   unlocked or is being held by a writer.  */
I
Ingo Molnar 已提交
192
static inline void __raw_read_unlock(raw_rwlock_t *rw)
L
Linus Torvalds 已提交
193 194 195
{
	unsigned int tmp;

196
	smp_llsc_mb();
197

L
Linus Torvalds 已提交
198 199
	if (R10000_LLSC_WAR) {
		__asm__ __volatile__(
I
Ingo Molnar 已提交
200
		"1:	ll	%1, %2		# __raw_read_unlock	\n"
L
Linus Torvalds 已提交
201 202 203 204 205 206 207 208
		"	sub	%1, 1					\n"
		"	sc	%1, %0					\n"
		"	beqzl	%1, 1b					\n"
		: "=m" (rw->lock), "=&r" (tmp)
		: "m" (rw->lock)
		: "memory");
	} else {
		__asm__ __volatile__(
I
Ingo Molnar 已提交
209
		"	.set	noreorder	# __raw_read_unlock	\n"
L
Linus Torvalds 已提交
210 211 212
		"1:	ll	%1, %2					\n"
		"	sub	%1, 1					\n"
		"	sc	%1, %0					\n"
213 214 215 216
		"	beqz	%1, 2f					\n"
		"	 nop						\n"
		"	.subsection 2					\n"
		"2:	b	1b					\n"
217
		"	 nop						\n"
218
		"	.previous					\n"
L
Linus Torvalds 已提交
219 220 221 222 223 224 225
		"	.set	reorder					\n"
		: "=m" (rw->lock), "=&r" (tmp)
		: "m" (rw->lock)
		: "memory");
	}
}

I
Ingo Molnar 已提交
226
static inline void __raw_write_lock(raw_rwlock_t *rw)
L
Linus Torvalds 已提交
227 228 229 230 231
{
	unsigned int tmp;

	if (R10000_LLSC_WAR) {
		__asm__ __volatile__(
I
Ingo Molnar 已提交
232
		"	.set	noreorder	# __raw_write_lock	\n"
L
Linus Torvalds 已提交
233 234 235 236 237
		"1:	ll	%1, %2					\n"
		"	bnez	%1, 1b					\n"
		"	 lui	%1, 0x8000				\n"
		"	sc	%1, %0					\n"
		"	beqzl	%1, 1b					\n"
238
		"	 nop						\n"
L
Linus Torvalds 已提交
239 240 241 242 243 244
		"	.set	reorder					\n"
		: "=m" (rw->lock), "=&r" (tmp)
		: "m" (rw->lock)
		: "memory");
	} else {
		__asm__ __volatile__(
I
Ingo Molnar 已提交
245
		"	.set	noreorder	# __raw_write_lock	\n"
L
Linus Torvalds 已提交
246
		"1:	ll	%1, %2					\n"
247
		"	bnez	%1, 2f					\n"
L
Linus Torvalds 已提交
248 249
		"	 lui	%1, 0x8000				\n"
		"	sc	%1, %0					\n"
250 251 252 253 254 255 256
		"	beqz	%1, 2f					\n"
		"	 nop						\n"
		"	.subsection 2					\n"
		"2:	ll	%1, %2					\n"
		"	bnez	%1, 2b					\n"
		"	 lui	%1, 0x8000				\n"
		"	b	1b					\n"
257
		"	 nop						\n"
258
		"	.previous					\n"
L
Linus Torvalds 已提交
259 260 261 262 263
		"	.set	reorder					\n"
		: "=m" (rw->lock), "=&r" (tmp)
		: "m" (rw->lock)
		: "memory");
	}
264

265
	smp_llsc_mb();
L
Linus Torvalds 已提交
266 267
}

I
Ingo Molnar 已提交
268
static inline void __raw_write_unlock(raw_rwlock_t *rw)
L
Linus Torvalds 已提交
269
{
270 271
	smp_mb();

L
Linus Torvalds 已提交
272
	__asm__ __volatile__(
273
	"				# __raw_write_unlock	\n"
L
Linus Torvalds 已提交
274 275 276 277 278 279
	"	sw	$0, %0					\n"
	: "=m" (rw->lock)
	: "m" (rw->lock)
	: "memory");
}

280 281 282 283 284 285 286 287 288 289
static inline int __raw_read_trylock(raw_rwlock_t *rw)
{
	unsigned int tmp;
	int ret;

	if (R10000_LLSC_WAR) {
		__asm__ __volatile__(
		"	.set	noreorder	# __raw_read_trylock	\n"
		"	li	%2, 0					\n"
		"1:	ll	%1, %3					\n"
290
		"	bltz	%1, 2f					\n"
291 292 293
		"	 addu	%1, 1					\n"
		"	sc	%1, %0					\n"
		"	.set	reorder					\n"
294 295
		"	beqzl	%1, 1b					\n"
		"	 nop						\n"
296
		__WEAK_LLSC_MB
297 298 299 300 301 302 303 304 305 306
		"	li	%2, 1					\n"
		"2:							\n"
		: "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
		: "m" (rw->lock)
		: "memory");
	} else {
		__asm__ __volatile__(
		"	.set	noreorder	# __raw_read_trylock	\n"
		"	li	%2, 0					\n"
		"1:	ll	%1, %3					\n"
307
		"	bltz	%1, 2f					\n"
308 309 310
		"	 addu	%1, 1					\n"
		"	sc	%1, %0					\n"
		"	beqz	%1, 1b					\n"
311
		"	 nop						\n"
312
		"	.set	reorder					\n"
313
		__WEAK_LLSC_MB
314 315 316 317 318 319 320 321 322
		"	li	%2, 1					\n"
		"2:							\n"
		: "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
		: "m" (rw->lock)
		: "memory");
	}

	return ret;
}
L
Linus Torvalds 已提交
323

I
Ingo Molnar 已提交
324
static inline int __raw_write_trylock(raw_rwlock_t *rw)
L
Linus Torvalds 已提交
325 326 327 328 329 330
{
	unsigned int tmp;
	int ret;

	if (R10000_LLSC_WAR) {
		__asm__ __volatile__(
I
Ingo Molnar 已提交
331
		"	.set	noreorder	# __raw_write_trylock	\n"
L
Linus Torvalds 已提交
332 333 334 335 336 337
		"	li	%2, 0					\n"
		"1:	ll	%1, %3					\n"
		"	bnez	%1, 2f					\n"
		"	 lui	%1, 0x8000				\n"
		"	sc	%1, %0					\n"
		"	beqzl	%1, 1b					\n"
338
		"	 nop						\n"
339
		__WEAK_LLSC_MB
L
Linus Torvalds 已提交
340 341 342 343 344 345 346 347
		"	li	%2, 1					\n"
		"	.set	reorder					\n"
		"2:							\n"
		: "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
		: "m" (rw->lock)
		: "memory");
	} else {
		__asm__ __volatile__(
I
Ingo Molnar 已提交
348
		"	.set	noreorder	# __raw_write_trylock	\n"
L
Linus Torvalds 已提交
349 350 351 352 353
		"	li	%2, 0					\n"
		"1:	ll	%1, %3					\n"
		"	bnez	%1, 2f					\n"
		"	lui	%1, 0x8000				\n"
		"	sc	%1, %0					\n"
354 355 356
		"	beqz	%1, 3f					\n"
		"	 li	%2, 1					\n"
		"2:							\n"
357
		__WEAK_LLSC_MB
358 359 360 361
		"	.subsection 2					\n"
		"3:	b	1b					\n"
		"	 li	%2, 0					\n"
		"	.previous					\n"
L
Linus Torvalds 已提交
362 363 364 365 366 367 368 369 370
		"	.set	reorder					\n"
		: "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
		: "m" (rw->lock)
		: "memory");
	}

	return ret;
}

371

372 373 374 375
#define _raw_spin_relax(lock)	cpu_relax()
#define _raw_read_relax(lock)	cpu_relax()
#define _raw_write_relax(lock)	cpu_relax()

L
Linus Torvalds 已提交
376
#endif /* _ASM_SPINLOCK_H */