spinlock.h 7.5 KB
Newer Older
H
H. Peter Anvin 已提交
1 2
#ifndef _ASM_X86_SPINLOCK_H
#define _ASM_X86_SPINLOCK_H
3

T
Thomas Gleixner 已提交
4 5 6 7
#include <asm/atomic.h>
#include <asm/rwlock.h>
#include <asm/page.h>
#include <asm/processor.h>
N
Nick Piggin 已提交
8
#include <linux/compiler.h>
9
#include <asm/paravirt.h>
T
Thomas Gleixner 已提交
10 11 12 13 14 15
/*
 * Your basic SMP spinlocks, allowing only a single CPU anywhere
 *
 * Simple spin lock operations.  There are two variants, one clears IRQ's
 * on the local processor, one does not.
 *
N
Nick Piggin 已提交
16 17
 * These are fair FIFO ticket locks, which are currently limited to 256
 * CPUs.
T
Thomas Gleixner 已提交
18 19 20 21
 *
 * (the type definitions are in asm/spinlock_types.h)
 */

22
#ifdef CONFIG_X86_32
T
Thomas Gleixner 已提交
23
# define LOCK_PTR_REG "a"
24
# define REG_PTR_MODE "k"
25
#else
T
Thomas Gleixner 已提交
26
# define LOCK_PTR_REG "D"
27
# define REG_PTR_MODE "q"
T
Thomas Gleixner 已提交
28 29
#endif

N
Nick Piggin 已提交
30 31 32 33 34 35 36 37 38
#if defined(CONFIG_X86_32) && \
	(defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE))
/*
 * On PPro SMP or if we are using OOSTORE, we use a locked operation to unlock
 * (PPro errata 66, 92)
 */
# define UNLOCK_LOCK_PREFIX LOCK_PREFIX
#else
# define UNLOCK_LOCK_PREFIX
N
Nick Piggin 已提交
39 40
#endif

N
Nick Piggin 已提交
41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58
/*
 * Ticket locks are conceptually two parts, one indicating the current head of
 * the queue, and the other indicating the current tail. The lock is acquired
 * by atomically noting the tail and incrementing it by one (thus adding
 * ourself to the queue and noting our position), then waiting until the head
 * becomes equal to the the initial value of the tail.
 *
 * We use an xadd covering *both* parts of the lock, to increment the tail and
 * also load the position of the head, which takes care of memory ordering
 * issues and should be optimal for the uncontended case. Note the tail must be
 * in the high part, because a wide xadd increment of the low part would carry
 * up and contaminate the high part.
 *
 * With fewer than 2^8 possible CPUs, we can use x86's partial registers to
 * save some instructions and make the code more elegant. There really isn't
 * much between them in performance though, especially as locks are out of line.
 */
#if (NR_CPUS < 256)
59
#define TICKET_SHIFT 8
T
Thomas Gleixner 已提交
60

61
static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
T
Thomas Gleixner 已提交
62
{
N
Nick Piggin 已提交
63 64
	short inc = 0x0100;

65
	asm volatile (
N
Nick Piggin 已提交
66 67 68 69 70 71 72
		LOCK_PREFIX "xaddw %w0, %1\n"
		"1:\t"
		"cmpb %h0, %b0\n\t"
		"je 2f\n\t"
		"rep ; nop\n\t"
		"movb %1, %b0\n\t"
		/* don't need lfence here, because loads are in-order */
T
Thomas Gleixner 已提交
73
		"jmp 1b\n"
N
Nick Piggin 已提交
74
		"2:"
75
		: "+Q" (inc), "+m" (lock->slock)
N
Nick Piggin 已提交
76
		:
77
		: "memory", "cc");
T
Thomas Gleixner 已提交
78
}
N
Nick Piggin 已提交
79

80
static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
T
Thomas Gleixner 已提交
81
{
82
	int tmp, new;
T
Thomas Gleixner 已提交
83

84
	asm volatile("movzwl %2, %0\n\t"
85
		     "cmpb %h0,%b0\n\t"
86
		     "leal 0x100(%" REG_PTR_MODE "0), %1\n\t"
87
		     "jne 1f\n\t"
88
		     LOCK_PREFIX "cmpxchgw %w1,%2\n\t"
89 90 91
		     "1:"
		     "sete %b1\n\t"
		     "movzbl %b1,%0\n\t"
92
		     : "=&a" (tmp), "=&q" (new), "+m" (lock->slock)
93 94
		     :
		     : "memory", "cc");
N
Nick Piggin 已提交
95 96

	return tmp;
T
Thomas Gleixner 已提交
97 98
}

99
static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
N
Nick Piggin 已提交
100
{
101 102 103 104
	asm volatile(UNLOCK_LOCK_PREFIX "incb %0"
		     : "+m" (lock->slock)
		     :
		     : "memory", "cc");
N
Nick Piggin 已提交
105
}
T
Thomas Gleixner 已提交
106
#else
107
#define TICKET_SHIFT 16
N
Nick Piggin 已提交
108

109
static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
N
Nick Piggin 已提交
110 111 112 113
{
	int inc = 0x00010000;
	int tmp;

114
	asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
115 116 117 118 119 120 121 122 123 124
		     "movzwl %w0, %2\n\t"
		     "shrl $16, %0\n\t"
		     "1:\t"
		     "cmpl %0, %2\n\t"
		     "je 2f\n\t"
		     "rep ; nop\n\t"
		     "movzwl %1, %2\n\t"
		     /* don't need lfence here, because loads are in-order */
		     "jmp 1b\n"
		     "2:"
125
		     : "+r" (inc), "+m" (lock->slock), "=&r" (tmp)
126 127
		     :
		     : "memory", "cc");
N
Nick Piggin 已提交
128 129
}

130
static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
N
Nick Piggin 已提交
131 132 133 134
{
	int tmp;
	int new;

135 136 137 138
	asm volatile("movl %2,%0\n\t"
		     "movl %0,%1\n\t"
		     "roll $16, %0\n\t"
		     "cmpl %0,%1\n\t"
139
		     "leal 0x00010000(%" REG_PTR_MODE "0), %1\n\t"
140
		     "jne 1f\n\t"
141
		     LOCK_PREFIX "cmpxchgl %1,%2\n\t"
142 143 144
		     "1:"
		     "sete %b1\n\t"
		     "movzbl %b1,%0\n\t"
145
		     : "=&a" (tmp), "=&q" (new), "+m" (lock->slock)
146 147
		     :
		     : "memory", "cc");
N
Nick Piggin 已提交
148 149 150

	return tmp;
}
T
Thomas Gleixner 已提交
151

152
static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
T
Thomas Gleixner 已提交
153
{
154 155 156 157
	asm volatile(UNLOCK_LOCK_PREFIX "incw %0"
		     : "+m" (lock->slock)
		     :
		     : "memory", "cc");
T
Thomas Gleixner 已提交
158
}
N
Nick Piggin 已提交
159
#endif
T
Thomas Gleixner 已提交
160

161 162 163 164 165 166 167 168 169 170 171 172 173
static inline int __ticket_spin_is_locked(raw_spinlock_t *lock)
{
	int tmp = ACCESS_ONCE(lock->slock);

	return !!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1 << TICKET_SHIFT) - 1));
}

static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
{
	int tmp = ACCESS_ONCE(lock->slock);

	return (((tmp >> TICKET_SHIFT) - tmp) & ((1 << TICKET_SHIFT) - 1)) > 1;
}
174

175
#ifndef CONFIG_PARAVIRT_SPINLOCKS
176

177 178 179 180 181 182 183 184 185
static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
{
	return __ticket_spin_is_locked(lock);
}

static inline int __raw_spin_is_contended(raw_spinlock_t *lock)
{
	return __ticket_spin_is_contended(lock);
}
186
#define __raw_spin_is_contended	__raw_spin_is_contended
187 188 189 190 191 192 193 194 195 196 197 198 199 200 201

static __always_inline void __raw_spin_lock(raw_spinlock_t *lock)
{
	__ticket_spin_lock(lock);
}

static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock)
{
	return __ticket_spin_trylock(lock);
}

static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock)
{
	__ticket_spin_unlock(lock);
}
202 203 204 205 206 207 208

static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock,
						  unsigned long flags)
{
	__raw_spin_lock(lock);
}

209
#endif	/* CONFIG_PARAVIRT_SPINLOCKS */
210

T
Thomas Gleixner 已提交
211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230
static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
{
	while (__raw_spin_is_locked(lock))
		cpu_relax();
}

/*
 * Read-write spinlocks, allowing multiple readers
 * but only one writer.
 *
 * NOTE! it is quite common to have readers in interrupts
 * but no interrupt writers. For those circumstances we
 * can "mix" irq-safe locks - any writer needs to get a
 * irq-safe write-lock, but readers can get non-irqsafe
 * read-locks.
 *
 * On x86, we implement read-write locks as a 32-bit counter
 * with the high bit (sign) being the "contended" bit.
 */

N
Nick Piggin 已提交
231 232 233 234
/**
 * read_can_lock - would read_trylock() succeed?
 * @lock: the rwlock in question.
 */
T
Thomas Gleixner 已提交
235 236 237 238 239
static inline int __raw_read_can_lock(raw_rwlock_t *lock)
{
	return (int)(lock)->lock > 0;
}

N
Nick Piggin 已提交
240 241 242 243
/**
 * write_can_lock - would write_trylock() succeed?
 * @lock: the rwlock in question.
 */
T
Thomas Gleixner 已提交
244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270
static inline int __raw_write_can_lock(raw_rwlock_t *lock)
{
	return (lock)->lock == RW_LOCK_BIAS;
}

static inline void __raw_read_lock(raw_rwlock_t *rw)
{
	asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
		     "jns 1f\n"
		     "call __read_lock_failed\n\t"
		     "1:\n"
		     ::LOCK_PTR_REG (rw) : "memory");
}

static inline void __raw_write_lock(raw_rwlock_t *rw)
{
	asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
		     "jz 1f\n"
		     "call __write_lock_failed\n\t"
		     "1:\n"
		     ::LOCK_PTR_REG (rw), "i" (RW_LOCK_BIAS) : "memory");
}

static inline int __raw_read_trylock(raw_rwlock_t *lock)
{
	atomic_t *count = (atomic_t *)lock;

271
	if (atomic_dec_return(count) >= 0)
T
Thomas Gleixner 已提交
272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297
		return 1;
	atomic_inc(count);
	return 0;
}

static inline int __raw_write_trylock(raw_rwlock_t *lock)
{
	atomic_t *count = (atomic_t *)lock;

	if (atomic_sub_and_test(RW_LOCK_BIAS, count))
		return 1;
	atomic_add(RW_LOCK_BIAS, count);
	return 0;
}

static inline void __raw_read_unlock(raw_rwlock_t *rw)
{
	asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
}

static inline void __raw_write_unlock(raw_rwlock_t *rw)
{
	asm volatile(LOCK_PREFIX "addl %1, %0"
		     : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
}

298 299 300
#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)

T
Thomas Gleixner 已提交
301 302 303 304
#define _raw_spin_relax(lock)	cpu_relax()
#define _raw_read_relax(lock)	cpu_relax()
#define _raw_write_relax(lock)	cpu_relax()

H
H. Peter Anvin 已提交
305
#endif /* _ASM_X86_SPINLOCK_H */