spinlock.h 8.9 KB
Newer Older
1 2 3
#ifndef _X86_SPINLOCK_H_
#define _X86_SPINLOCK_H_

T
Thomas Gleixner 已提交
4 5 6 7
#include <asm/atomic.h>
#include <asm/rwlock.h>
#include <asm/page.h>
#include <asm/processor.h>
N
Nick Piggin 已提交
8
#include <linux/compiler.h>
9
#include <asm/paravirt.h>
T
Thomas Gleixner 已提交
10 11 12 13 14 15
/*
 * Your basic SMP spinlocks, allowing only a single CPU anywhere
 *
 * Simple spin lock operations.  There are two variants, one clears IRQ's
 * on the local processor, one does not.
 *
N
Nick Piggin 已提交
16 17
 * These are fair FIFO ticket locks, which are currently limited to 256
 * CPUs.
T
Thomas Gleixner 已提交
18 19 20 21
 *
 * (the type definitions are in asm/spinlock_types.h)
 */

22
#ifdef CONFIG_X86_32
T
Thomas Gleixner 已提交
23
# define LOCK_PTR_REG "a"
24
#else
T
Thomas Gleixner 已提交
25 26 27
# define LOCK_PTR_REG "D"
#endif

N
Nick Piggin 已提交
28 29 30 31 32 33 34 35 36
#if defined(CONFIG_X86_32) && \
	(defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE))
/*
 * On PPro SMP or if we are using OOSTORE, we use a locked operation to unlock
 * (PPro errata 66, 92)
 */
# define UNLOCK_LOCK_PREFIX LOCK_PREFIX
#else
# define UNLOCK_LOCK_PREFIX
N
Nick Piggin 已提交
37 38
#endif

N
Nick Piggin 已提交
39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56
/*
 * Ticket locks are conceptually two parts, one indicating the current head of
 * the queue, and the other indicating the current tail. The lock is acquired
 * by atomically noting the tail and incrementing it by one (thus adding
 * ourself to the queue and noting our position), then waiting until the head
 * becomes equal to the the initial value of the tail.
 *
 * We use an xadd covering *both* parts of the lock, to increment the tail and
 * also load the position of the head, which takes care of memory ordering
 * issues and should be optimal for the uncontended case. Note the tail must be
 * in the high part, because a wide xadd increment of the low part would carry
 * up and contaminate the high part.
 *
 * With fewer than 2^8 possible CPUs, we can use x86's partial registers to
 * save some instructions and make the code more elegant. There really isn't
 * much between them in performance though, especially as locks are out of line.
 */
#if (NR_CPUS < 256)
57
static inline int __ticket_spin_is_locked(raw_spinlock_t *lock)
T
Thomas Gleixner 已提交
58
{
59
	int tmp = ACCESS_ONCE(lock->slock);
N
Nick Piggin 已提交
60 61

	return (((tmp >> 8) & 0xff) != (tmp & 0xff));
T
Thomas Gleixner 已提交
62 63
}

64
static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
T
Thomas Gleixner 已提交
65
{
66
	int tmp = ACCESS_ONCE(lock->slock);
N
Nick Piggin 已提交
67 68

	return (((tmp >> 8) & 0xff) - (tmp & 0xff)) > 1;
T
Thomas Gleixner 已提交
69 70
}

71
static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
T
Thomas Gleixner 已提交
72
{
N
Nick Piggin 已提交
73 74
	short inc = 0x0100;

75
	asm volatile (
N
Nick Piggin 已提交
76 77 78 79 80 81 82
		LOCK_PREFIX "xaddw %w0, %1\n"
		"1:\t"
		"cmpb %h0, %b0\n\t"
		"je 2f\n\t"
		"rep ; nop\n\t"
		"movb %1, %b0\n\t"
		/* don't need lfence here, because loads are in-order */
T
Thomas Gleixner 已提交
83
		"jmp 1b\n"
N
Nick Piggin 已提交
84
		"2:"
85
		: "+Q" (inc), "+m" (lock->slock)
N
Nick Piggin 已提交
86
		:
87
		: "memory", "cc");
T
Thomas Gleixner 已提交
88
}
N
Nick Piggin 已提交
89

90
static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
T
Thomas Gleixner 已提交
91
{
N
Nick Piggin 已提交
92 93
	int tmp;
	short new;
T
Thomas Gleixner 已提交
94

95 96 97 98 99 100 101 102 103 104 105 106
	asm volatile("movw %2,%w0\n\t"
		     "cmpb %h0,%b0\n\t"
		     "jne 1f\n\t"
		     "movw %w0,%w1\n\t"
		     "incb %h1\n\t"
		     "lock ; cmpxchgw %w1,%2\n\t"
		     "1:"
		     "sete %b1\n\t"
		     "movzbl %b1,%0\n\t"
		     : "=&a" (tmp), "=Q" (new), "+m" (lock->slock)
		     :
		     : "memory", "cc");
N
Nick Piggin 已提交
107 108

	return tmp;
T
Thomas Gleixner 已提交
109 110
}

111
static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
N
Nick Piggin 已提交
112
{
113 114 115 116
	asm volatile(UNLOCK_LOCK_PREFIX "incb %0"
		     : "+m" (lock->slock)
		     :
		     : "memory", "cc");
N
Nick Piggin 已提交
117
}
T
Thomas Gleixner 已提交
118
#else
119
static inline int __ticket_spin_is_locked(raw_spinlock_t *lock)
N
Nick Piggin 已提交
120
{
121
	int tmp = ACCESS_ONCE(lock->slock);
N
Nick Piggin 已提交
122 123 124 125

	return (((tmp >> 16) & 0xffff) != (tmp & 0xffff));
}

126
static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
N
Nick Piggin 已提交
127
{
128
	int tmp = ACCESS_ONCE(lock->slock);
N
Nick Piggin 已提交
129 130 131 132

	return (((tmp >> 16) & 0xffff) - (tmp & 0xffff)) > 1;
}

133
static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
N
Nick Piggin 已提交
134 135 136 137
{
	int inc = 0x00010000;
	int tmp;

138 139 140 141 142 143 144 145 146 147 148 149 150 151
	asm volatile("lock ; xaddl %0, %1\n"
		     "movzwl %w0, %2\n\t"
		     "shrl $16, %0\n\t"
		     "1:\t"
		     "cmpl %0, %2\n\t"
		     "je 2f\n\t"
		     "rep ; nop\n\t"
		     "movzwl %1, %2\n\t"
		     /* don't need lfence here, because loads are in-order */
		     "jmp 1b\n"
		     "2:"
		     : "+Q" (inc), "+m" (lock->slock), "=r" (tmp)
		     :
		     : "memory", "cc");
N
Nick Piggin 已提交
152 153
}

154
static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
N
Nick Piggin 已提交
155 156 157 158
{
	int tmp;
	int new;

159 160 161 162 163 164 165 166 167 168 169 170 171
	asm volatile("movl %2,%0\n\t"
		     "movl %0,%1\n\t"
		     "roll $16, %0\n\t"
		     "cmpl %0,%1\n\t"
		     "jne 1f\n\t"
		     "addl $0x00010000, %1\n\t"
		     "lock ; cmpxchgl %1,%2\n\t"
		     "1:"
		     "sete %b1\n\t"
		     "movzbl %b1,%0\n\t"
		     : "=&a" (tmp), "=r" (new), "+m" (lock->slock)
		     :
		     : "memory", "cc");
N
Nick Piggin 已提交
172 173 174

	return tmp;
}
T
Thomas Gleixner 已提交
175

176
static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
T
Thomas Gleixner 已提交
177
{
178 179 180 181
	asm volatile(UNLOCK_LOCK_PREFIX "incw %0"
		     : "+m" (lock->slock)
		     :
		     : "memory", "cc");
T
Thomas Gleixner 已提交
182
}
N
Nick Piggin 已提交
183
#endif
T
Thomas Gleixner 已提交
184

185 186
#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)

187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250
#ifdef CONFIG_PARAVIRT
/*
 * Define virtualization-friendly old-style lock byte lock, for use in
 * pv_lock_ops if desired.
 *
 * This differs from the pre-2.6.24 spinlock by always using xchgb
 * rather than decb to take the lock; this allows it to use a
 * zero-initialized lock structure.  It also maintains a 1-byte
 * contention counter, so that we can implement
 * __byte_spin_is_contended.
 */
struct __byte_spinlock {
	s8 lock;
	s8 spinners;
};

static inline int __byte_spin_is_locked(raw_spinlock_t *lock)
{
	struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
	return bl->lock != 0;
}

static inline int __byte_spin_is_contended(raw_spinlock_t *lock)
{
	struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
	return bl->spinners != 0;
}

static inline void __byte_spin_lock(raw_spinlock_t *lock)
{
	struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
	s8 val = 1;

	asm("1: xchgb %1, %0\n"
	    "   test %1,%1\n"
	    "   jz 3f\n"
	    "   " LOCK_PREFIX "incb %2\n"
	    "2: rep;nop\n"
	    "   cmpb $1, %0\n"
	    "   je 2b\n"
	    "   " LOCK_PREFIX "decb %2\n"
	    "   jmp 1b\n"
	    "3:"
	    : "+m" (bl->lock), "+q" (val), "+m" (bl->spinners): : "memory");
}

static inline int __byte_spin_trylock(raw_spinlock_t *lock)
{
	struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
	u8 old = 1;

	asm("xchgb %1,%0"
	    : "+m" (bl->lock), "+q" (old) : : "memory");

	return old == 0;
}

static inline void __byte_spin_unlock(raw_spinlock_t *lock)
{
	struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
	smp_wmb();
	bl->lock = 0;
}
#else  /* !CONFIG_PARAVIRT */
251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276
static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
{
	return __ticket_spin_is_locked(lock);
}

static inline int __raw_spin_is_contended(raw_spinlock_t *lock)
{
	return __ticket_spin_is_contended(lock);
}

static __always_inline void __raw_spin_lock(raw_spinlock_t *lock)
{
	__ticket_spin_lock(lock);
}

static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock)
{
	return __ticket_spin_trylock(lock);
}

static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock)
{
	__ticket_spin_unlock(lock);
}
#endif	/* CONFIG_PARAVIRT */

T
Thomas Gleixner 已提交
277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296
static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
{
	while (__raw_spin_is_locked(lock))
		cpu_relax();
}

/*
 * Read-write spinlocks, allowing multiple readers
 * but only one writer.
 *
 * NOTE! it is quite common to have readers in interrupts
 * but no interrupt writers. For those circumstances we
 * can "mix" irq-safe locks - any writer needs to get a
 * irq-safe write-lock, but readers can get non-irqsafe
 * read-locks.
 *
 * On x86, we implement read-write locks as a 32-bit counter
 * with the high bit (sign) being the "contended" bit.
 */

N
Nick Piggin 已提交
297 298 299 300
/**
 * read_can_lock - would read_trylock() succeed?
 * @lock: the rwlock in question.
 */
T
Thomas Gleixner 已提交
301 302 303 304 305
static inline int __raw_read_can_lock(raw_rwlock_t *lock)
{
	return (int)(lock)->lock > 0;
}

N
Nick Piggin 已提交
306 307 308 309
/**
 * write_can_lock - would write_trylock() succeed?
 * @lock: the rwlock in question.
 */
T
Thomas Gleixner 已提交
310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368
static inline int __raw_write_can_lock(raw_rwlock_t *lock)
{
	return (lock)->lock == RW_LOCK_BIAS;
}

static inline void __raw_read_lock(raw_rwlock_t *rw)
{
	asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
		     "jns 1f\n"
		     "call __read_lock_failed\n\t"
		     "1:\n"
		     ::LOCK_PTR_REG (rw) : "memory");
}

static inline void __raw_write_lock(raw_rwlock_t *rw)
{
	asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
		     "jz 1f\n"
		     "call __write_lock_failed\n\t"
		     "1:\n"
		     ::LOCK_PTR_REG (rw), "i" (RW_LOCK_BIAS) : "memory");
}

static inline int __raw_read_trylock(raw_rwlock_t *lock)
{
	atomic_t *count = (atomic_t *)lock;

	atomic_dec(count);
	if (atomic_read(count) >= 0)
		return 1;
	atomic_inc(count);
	return 0;
}

static inline int __raw_write_trylock(raw_rwlock_t *lock)
{
	atomic_t *count = (atomic_t *)lock;

	if (atomic_sub_and_test(RW_LOCK_BIAS, count))
		return 1;
	atomic_add(RW_LOCK_BIAS, count);
	return 0;
}

static inline void __raw_read_unlock(raw_rwlock_t *rw)
{
	asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
}

static inline void __raw_write_unlock(raw_rwlock_t *rw)
{
	asm volatile(LOCK_PREFIX "addl %1, %0"
		     : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
}

#define _raw_spin_relax(lock)	cpu_relax()
#define _raw_read_relax(lock)	cpu_relax()
#define _raw_write_relax(lock)	cpu_relax()

369
#endif