spinlock.h 5.4 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7
#ifndef __ASM_SPINLOCK_H
#define __ASM_SPINLOCK_H

#if __LINUX_ARM_ARCH__ < 6
#error SMP not supported on pre-ARMv6 CPUs
#endif

8 9
#include <asm/processor.h>

10 11 12 13 14 15 16 17 18 19 20 21 22
/*
 * sev and wfe are ARMv6K extensions.  Uniprocessor ARMv6 may not have the K
 * extensions, so when running on UP, we have to patch these instructions away.
 */
#define ALT_SMP(smp, up)					\
	"9998:	" smp "\n"					\
	"	.pushsection \".alt.smp.init\", \"a\"\n"	\
	"	.long	9998b\n"				\
	"	" up "\n"					\
	"	.popsection\n"

#ifdef CONFIG_THUMB2_KERNEL
#define SEV		ALT_SMP("sev.w", "nop.w")
23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39
/*
 * For Thumb-2, special care is needed to ensure that the conditional WFE
 * instruction really does assemble to exactly 4 bytes (as required by
 * the SMP_ON_UP fixup code).   By itself "wfene" might cause the
 * assembler to insert a extra (16-bit) IT instruction, depending on the
 * presence or absence of neighbouring conditional instructions.
 *
 * To avoid this unpredictableness, an approprite IT is inserted explicitly:
 * the assembler won't change IT instructions which are explicitly present
 * in the input.
 */
#define WFE(cond)	ALT_SMP(		\
	"it " cond "\n\t"			\
	"wfe" cond ".n",			\
						\
	"nop.w"					\
)
40 41 42 43 44
#else
#define SEV		ALT_SMP("sev", "nop")
#define WFE(cond)	ALT_SMP("wfe" cond, "nop")
#endif

45 46 47 48 49
static inline void dsb_sev(void)
{
#if __LINUX_ARM_ARCH__ >= 7
	__asm__ __volatile__ (
		"dsb\n"
50
		SEV
51
	);
52
#else
53 54
	__asm__ __volatile__ (
		"mcr p15, 0, %0, c7, c10, 4\n"
55
		SEV
56 57 58 59 60
		: : "r" (0)
	);
#endif
}

L
Linus Torvalds 已提交
61 62 63
/*
 * ARMv6 Spin-locking.
 *
64 65 66 67
 * We exclusively read the old value.  If it is zero, we may have
 * won the lock, so we try exclusively storing it.  A memory barrier
 * is required after we get a lock, and before we release it, because
 * V6 CPUs are assumed to have weakly ordered memory.
L
Linus Torvalds 已提交
68 69 70 71 72
 *
 * Unlocked value: 0
 * Locked value: 1
 */

73 74 75
#define arch_spin_is_locked(x)		((x)->lock != 0)
#define arch_spin_unlock_wait(lock) \
	do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
L
Linus Torvalds 已提交
76

77
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
L
Linus Torvalds 已提交
78

79
static inline void arch_spin_lock(arch_spinlock_t *lock)
L
Linus Torvalds 已提交
80 81 82 83 84 85
{
	unsigned long tmp;

	__asm__ __volatile__(
"1:	ldrex	%0, [%1]\n"
"	teq	%0, #0\n"
86
	WFE("ne")
L
Linus Torvalds 已提交
87 88 89 90 91
"	strexeq	%0, %2, [%1]\n"
"	teqeq	%0, #0\n"
"	bne	1b"
	: "=&r" (tmp)
	: "r" (&lock->lock), "r" (1)
92 93 94
	: "cc");

	smp_mb();
L
Linus Torvalds 已提交
95 96
}

97
static inline int arch_spin_trylock(arch_spinlock_t *lock)
L
Linus Torvalds 已提交
98 99 100 101 102 103 104 105 106
{
	unsigned long tmp;

	__asm__ __volatile__(
"	ldrex	%0, [%1]\n"
"	teq	%0, #0\n"
"	strexeq	%0, %2, [%1]"
	: "=&r" (tmp)
	: "r" (&lock->lock), "r" (1)
107 108 109 110 111 112 113 114
	: "cc");

	if (tmp == 0) {
		smp_mb();
		return 1;
	} else {
		return 0;
	}
L
Linus Torvalds 已提交
115 116
}

117
static inline void arch_spin_unlock(arch_spinlock_t *lock)
L
Linus Torvalds 已提交
118
{
119 120
	smp_mb();

L
Linus Torvalds 已提交
121
	__asm__ __volatile__(
122
"	str	%1, [%0]\n"
L
Linus Torvalds 已提交
123 124
	:
	: "r" (&lock->lock), "r" (0)
125
	: "cc");
126 127

	dsb_sev();
L
Linus Torvalds 已提交
128 129 130 131
}

/*
 * RWLOCKS
I
Ingo Molnar 已提交
132 133
 *
 *
L
Linus Torvalds 已提交
134 135 136
 * Write locks are easy - we just set bit 31.  When unlocking, we can
 * just write zero since the lock is exclusively held.
 */
I
Ingo Molnar 已提交
137

138
static inline void arch_write_lock(arch_rwlock_t *rw)
L
Linus Torvalds 已提交
139 140 141 142 143 144
{
	unsigned long tmp;

	__asm__ __volatile__(
"1:	ldrex	%0, [%1]\n"
"	teq	%0, #0\n"
145
	WFE("ne")
L
Linus Torvalds 已提交
146 147 148 149 150
"	strexeq	%0, %2, [%1]\n"
"	teq	%0, #0\n"
"	bne	1b"
	: "=&r" (tmp)
	: "r" (&rw->lock), "r" (0x80000000)
151 152 153
	: "cc");

	smp_mb();
L
Linus Torvalds 已提交
154 155
}

156
static inline int arch_write_trylock(arch_rwlock_t *rw)
157 158 159 160 161 162 163 164 165
{
	unsigned long tmp;

	__asm__ __volatile__(
"1:	ldrex	%0, [%1]\n"
"	teq	%0, #0\n"
"	strexeq	%0, %2, [%1]"
	: "=&r" (tmp)
	: "r" (&rw->lock), "r" (0x80000000)
166 167 168 169 170 171 172 173
	: "cc");

	if (tmp == 0) {
		smp_mb();
		return 1;
	} else {
		return 0;
	}
174 175
}

176
static inline void arch_write_unlock(arch_rwlock_t *rw)
L
Linus Torvalds 已提交
177
{
178 179
	smp_mb();

L
Linus Torvalds 已提交
180
	__asm__ __volatile__(
181
	"str	%1, [%0]\n"
L
Linus Torvalds 已提交
182 183
	:
	: "r" (&rw->lock), "r" (0)
184
	: "cc");
185 186

	dsb_sev();
L
Linus Torvalds 已提交
187 188
}

189
/* write_can_lock - would write_trylock() succeed? */
190
#define arch_write_can_lock(x)		((x)->lock == 0)
191

L
Linus Torvalds 已提交
192 193 194 195 196 197 198 199 200 201 202 203
/*
 * Read locks are a bit more hairy:
 *  - Exclusively load the lock value.
 *  - Increment it.
 *  - Store new lock value if positive, and we still own this location.
 *    If the value is negative, we've already failed.
 *  - If we failed to store the value, we want a negative result.
 *  - If we failed, try again.
 * Unlocking is similarly hairy.  We may have multiple read locks
 * currently active.  However, we know we won't have any write
 * locks.
 */
204
static inline void arch_read_lock(arch_rwlock_t *rw)
L
Linus Torvalds 已提交
205 206 207 208 209 210 211
{
	unsigned long tmp, tmp2;

	__asm__ __volatile__(
"1:	ldrex	%0, [%2]\n"
"	adds	%0, %0, #1\n"
"	strexpl	%1, %0, [%2]\n"
212
	WFE("mi")
L
Linus Torvalds 已提交
213 214 215 216
"	rsbpls	%0, %1, #0\n"
"	bmi	1b"
	: "=&r" (tmp), "=&r" (tmp2)
	: "r" (&rw->lock)
217 218 219
	: "cc");

	smp_mb();
L
Linus Torvalds 已提交
220 221
}

222
static inline void arch_read_unlock(arch_rwlock_t *rw)
L
Linus Torvalds 已提交
223
{
224 225
	unsigned long tmp, tmp2;

226 227
	smp_mb();

L
Linus Torvalds 已提交
228 229 230 231 232 233 234 235
	__asm__ __volatile__(
"1:	ldrex	%0, [%2]\n"
"	sub	%0, %0, #1\n"
"	strex	%1, %0, [%2]\n"
"	teq	%1, #0\n"
"	bne	1b"
	: "=&r" (tmp), "=&r" (tmp2)
	: "r" (&rw->lock)
236
	: "cc");
237 238 239

	if (tmp == 0)
		dsb_sev();
L
Linus Torvalds 已提交
240 241
}

242
static inline int arch_read_trylock(arch_rwlock_t *rw)
243
{
244
	unsigned long tmp, tmp2 = 1;
245 246 247 248 249 250 251 252 253 254 255 256

	__asm__ __volatile__(
"1:	ldrex	%0, [%2]\n"
"	adds	%0, %0, #1\n"
"	strexpl	%1, %0, [%2]\n"
	: "=&r" (tmp), "+r" (tmp2)
	: "r" (&rw->lock)
	: "cc");

	smp_mb();
	return tmp2 == 0;
}
L
Linus Torvalds 已提交
257

258
/* read_can_lock - would read_trylock() succeed? */
259
#define arch_read_can_lock(x)		((x)->lock < 0x80000000)
260

261 262
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
263

264 265 266
#define arch_spin_relax(lock)	cpu_relax()
#define arch_read_relax(lock)	cpu_relax()
#define arch_write_relax(lock)	cpu_relax()
267

L
Linus Torvalds 已提交
268
#endif /* __ASM_SPINLOCK_H */