spinlock.h 7.6 KB
Newer Older
H
H. Peter Anvin 已提交
1 2
#ifndef _ASM_X86_SPINLOCK_H
#define _ASM_X86_SPINLOCK_H
3

T
Thomas Gleixner 已提交
4 5 6 7
#include <asm/atomic.h>
#include <asm/rwlock.h>
#include <asm/page.h>
#include <asm/processor.h>
N
Nick Piggin 已提交
8
#include <linux/compiler.h>
9
#include <asm/paravirt.h>
T
Thomas Gleixner 已提交
10 11 12 13 14 15
/*
 * Your basic SMP spinlocks, allowing only a single CPU anywhere
 *
 * Simple spin lock operations.  There are two variants, one clears IRQ's
 * on the local processor, one does not.
 *
N
Nick Piggin 已提交
16 17
 * These are fair FIFO ticket locks, which are currently limited to 256
 * CPUs.
T
Thomas Gleixner 已提交
18 19 20 21
 *
 * (the type definitions are in asm/spinlock_types.h)
 */

22
#ifdef CONFIG_X86_32
T
Thomas Gleixner 已提交
23
# define LOCK_PTR_REG "a"
24
# define REG_PTR_MODE "k"
25
#else
T
Thomas Gleixner 已提交
26
# define LOCK_PTR_REG "D"
27
# define REG_PTR_MODE "q"
T
Thomas Gleixner 已提交
28 29
#endif

N
Nick Piggin 已提交
30 31 32 33 34 35 36 37 38
#if defined(CONFIG_X86_32) && \
	(defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE))
/*
 * On PPro SMP or if we are using OOSTORE, we use a locked operation to unlock
 * (PPro errata 66, 92)
 */
# define UNLOCK_LOCK_PREFIX LOCK_PREFIX
#else
# define UNLOCK_LOCK_PREFIX
N
Nick Piggin 已提交
39 40
#endif

N
Nick Piggin 已提交
41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58
/*
 * Ticket locks are conceptually two parts, one indicating the current head of
 * the queue, and the other indicating the current tail. The lock is acquired
 * by atomically noting the tail and incrementing it by one (thus adding
 * ourself to the queue and noting our position), then waiting until the head
 * becomes equal to the the initial value of the tail.
 *
 * We use an xadd covering *both* parts of the lock, to increment the tail and
 * also load the position of the head, which takes care of memory ordering
 * issues and should be optimal for the uncontended case. Note the tail must be
 * in the high part, because a wide xadd increment of the low part would carry
 * up and contaminate the high part.
 *
 * With fewer than 2^8 possible CPUs, we can use x86's partial registers to
 * save some instructions and make the code more elegant. There really isn't
 * much between them in performance though, especially as locks are out of line.
 */
#if (NR_CPUS < 256)
59
#define TICKET_SHIFT 8
T
Thomas Gleixner 已提交
60

61
static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
T
Thomas Gleixner 已提交
62
{
N
Nick Piggin 已提交
63 64
	short inc = 0x0100;

65
	asm volatile (
N
Nick Piggin 已提交
66 67 68 69 70 71 72
		LOCK_PREFIX "xaddw %w0, %1\n"
		"1:\t"
		"cmpb %h0, %b0\n\t"
		"je 2f\n\t"
		"rep ; nop\n\t"
		"movb %1, %b0\n\t"
		/* don't need lfence here, because loads are in-order */
T
Thomas Gleixner 已提交
73
		"jmp 1b\n"
N
Nick Piggin 已提交
74
		"2:"
75
		: "+Q" (inc), "+m" (lock->slock)
N
Nick Piggin 已提交
76
		:
77
		: "memory", "cc");
T
Thomas Gleixner 已提交
78
}
N
Nick Piggin 已提交
79

80
static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
T
Thomas Gleixner 已提交
81
{
82
	int tmp, new;
T
Thomas Gleixner 已提交
83

84
	asm volatile("movzwl %2, %0\n\t"
85
		     "cmpb %h0,%b0\n\t"
86
		     "leal 0x100(%" REG_PTR_MODE "0), %1\n\t"
87
		     "jne 1f\n\t"
88
		     LOCK_PREFIX "cmpxchgw %w1,%2\n\t"
89 90 91
		     "1:"
		     "sete %b1\n\t"
		     "movzbl %b1,%0\n\t"
92
		     : "=&a" (tmp), "=&q" (new), "+m" (lock->slock)
93 94
		     :
		     : "memory", "cc");
N
Nick Piggin 已提交
95 96

	return tmp;
T
Thomas Gleixner 已提交
97 98
}

99
static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
N
Nick Piggin 已提交
100
{
101 102 103 104
	asm volatile(UNLOCK_LOCK_PREFIX "incb %0"
		     : "+m" (lock->slock)
		     :
		     : "memory", "cc");
N
Nick Piggin 已提交
105
}
T
Thomas Gleixner 已提交
106
#else
107
#define TICKET_SHIFT 16
N
Nick Piggin 已提交
108

109
static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
N
Nick Piggin 已提交
110 111 112 113
{
	int inc = 0x00010000;
	int tmp;

114
	asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
115 116 117 118 119 120 121 122 123 124
		     "movzwl %w0, %2\n\t"
		     "shrl $16, %0\n\t"
		     "1:\t"
		     "cmpl %0, %2\n\t"
		     "je 2f\n\t"
		     "rep ; nop\n\t"
		     "movzwl %1, %2\n\t"
		     /* don't need lfence here, because loads are in-order */
		     "jmp 1b\n"
		     "2:"
125
		     : "+r" (inc), "+m" (lock->slock), "=&r" (tmp)
126 127
		     :
		     : "memory", "cc");
N
Nick Piggin 已提交
128 129
}

130
static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
N
Nick Piggin 已提交
131 132 133 134
{
	int tmp;
	int new;

135 136 137 138
	asm volatile("movl %2,%0\n\t"
		     "movl %0,%1\n\t"
		     "roll $16, %0\n\t"
		     "cmpl %0,%1\n\t"
139
		     "leal 0x00010000(%" REG_PTR_MODE "0), %1\n\t"
140
		     "jne 1f\n\t"
141
		     LOCK_PREFIX "cmpxchgl %1,%2\n\t"
142 143 144
		     "1:"
		     "sete %b1\n\t"
		     "movzbl %b1,%0\n\t"
145
		     : "=&a" (tmp), "=&q" (new), "+m" (lock->slock)
146 147
		     :
		     : "memory", "cc");
N
Nick Piggin 已提交
148 149 150

	return tmp;
}
T
Thomas Gleixner 已提交
151

152
static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
T
Thomas Gleixner 已提交
153
{
154 155 156 157
	asm volatile(UNLOCK_LOCK_PREFIX "incw %0"
		     : "+m" (lock->slock)
		     :
		     : "memory", "cc");
T
Thomas Gleixner 已提交
158
}
N
Nick Piggin 已提交
159
#endif
T
Thomas Gleixner 已提交
160

161
static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
162 163 164 165 166 167
{
	int tmp = ACCESS_ONCE(lock->slock);

	return !!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1 << TICKET_SHIFT) - 1));
}

168
static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
169 170 171 172 173
{
	int tmp = ACCESS_ONCE(lock->slock);

	return (((tmp >> TICKET_SHIFT) - tmp) & ((1 << TICKET_SHIFT) - 1)) > 1;
}
174

175
#ifndef CONFIG_PARAVIRT_SPINLOCKS
176

177
static inline int arch_spin_is_locked(arch_spinlock_t *lock)
178 179 180 181
{
	return __ticket_spin_is_locked(lock);
}

182
static inline int arch_spin_is_contended(arch_spinlock_t *lock)
183 184 185
{
	return __ticket_spin_is_contended(lock);
}
186
#define arch_spin_is_contended	arch_spin_is_contended
187

188
static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
189 190 191 192
{
	__ticket_spin_lock(lock);
}

193
static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
194 195 196 197
{
	return __ticket_spin_trylock(lock);
}

198
static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
199 200 201
{
	__ticket_spin_unlock(lock);
}
202

203
static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
204 205
						  unsigned long flags)
{
206
	arch_spin_lock(lock);
207 208
}

209
#endif	/* CONFIG_PARAVIRT_SPINLOCKS */
210

211
static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
T
Thomas Gleixner 已提交
212
{
213
	while (arch_spin_is_locked(lock))
T
Thomas Gleixner 已提交
214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230
		cpu_relax();
}

/*
 * Read-write spinlocks, allowing multiple readers
 * but only one writer.
 *
 * NOTE! it is quite common to have readers in interrupts
 * but no interrupt writers. For those circumstances we
 * can "mix" irq-safe locks - any writer needs to get a
 * irq-safe write-lock, but readers can get non-irqsafe
 * read-locks.
 *
 * On x86, we implement read-write locks as a 32-bit counter
 * with the high bit (sign) being the "contended" bit.
 */

N
Nick Piggin 已提交
231 232 233 234
/**
 * read_can_lock - would read_trylock() succeed?
 * @lock: the rwlock in question.
 */
235
static inline int __raw_read_can_lock(arch_rwlock_t *lock)
T
Thomas Gleixner 已提交
236 237 238 239
{
	return (int)(lock)->lock > 0;
}

N
Nick Piggin 已提交
240 241 242 243
/**
 * write_can_lock - would write_trylock() succeed?
 * @lock: the rwlock in question.
 */
244
static inline int __raw_write_can_lock(arch_rwlock_t *lock)
T
Thomas Gleixner 已提交
245 246 247 248
{
	return (lock)->lock == RW_LOCK_BIAS;
}

249
static inline void __raw_read_lock(arch_rwlock_t *rw)
T
Thomas Gleixner 已提交
250 251 252 253 254 255 256 257
{
	asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
		     "jns 1f\n"
		     "call __read_lock_failed\n\t"
		     "1:\n"
		     ::LOCK_PTR_REG (rw) : "memory");
}

258
static inline void __raw_write_lock(arch_rwlock_t *rw)
T
Thomas Gleixner 已提交
259 260 261 262 263 264 265 266
{
	asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
		     "jz 1f\n"
		     "call __write_lock_failed\n\t"
		     "1:\n"
		     ::LOCK_PTR_REG (rw), "i" (RW_LOCK_BIAS) : "memory");
}

267
static inline int __raw_read_trylock(arch_rwlock_t *lock)
T
Thomas Gleixner 已提交
268 269 270
{
	atomic_t *count = (atomic_t *)lock;

271
	if (atomic_dec_return(count) >= 0)
T
Thomas Gleixner 已提交
272 273 274 275 276
		return 1;
	atomic_inc(count);
	return 0;
}

277
static inline int __raw_write_trylock(arch_rwlock_t *lock)
T
Thomas Gleixner 已提交
278 279 280 281 282 283 284 285 286
{
	atomic_t *count = (atomic_t *)lock;

	if (atomic_sub_and_test(RW_LOCK_BIAS, count))
		return 1;
	atomic_add(RW_LOCK_BIAS, count);
	return 0;
}

287
static inline void __raw_read_unlock(arch_rwlock_t *rw)
T
Thomas Gleixner 已提交
288 289 290 291
{
	asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
}

292
static inline void __raw_write_unlock(arch_rwlock_t *rw)
T
Thomas Gleixner 已提交
293 294 295 296 297
{
	asm volatile(LOCK_PREFIX "addl %1, %0"
		     : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
}

298 299 300
#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)

301 302 303
#define arch_spin_relax(lock)	cpu_relax()
#define arch_read_relax(lock)	cpu_relax()
#define arch_write_relax(lock)	cpu_relax()
T
Thomas Gleixner 已提交
304

305 306 307 308
/* The {read|write|spin}_lock() on x86 are full memory barriers. */
static inline void smp_mb__after_lock(void) { }
#define ARCH_HAS_SMP_MB_AFTER_LOCK

H
H. Peter Anvin 已提交
309
#endif /* _ASM_X86_SPINLOCK_H */