spinlock.h 6.9 KB
Newer Older
1 2 3
#ifndef _X86_SPINLOCK_H_
#define _X86_SPINLOCK_H_

T
Thomas Gleixner 已提交
4 5 6 7
#include <asm/atomic.h>
#include <asm/rwlock.h>
#include <asm/page.h>
#include <asm/processor.h>
N
Nick Piggin 已提交
8
#include <linux/compiler.h>
T
Thomas Gleixner 已提交
9 10 11 12 13 14 15

/*
 * Your basic SMP spinlocks, allowing only a single CPU anywhere
 *
 * Simple spin lock operations.  There are two variants, one clears IRQ's
 * on the local processor, one does not.
 *
N
Nick Piggin 已提交
16 17
 * These are fair FIFO ticket locks, which are currently limited to 256
 * CPUs.
T
Thomas Gleixner 已提交
18 19 20 21
 *
 * (the type definitions are in asm/spinlock_types.h)
 */

22
#ifdef CONFIG_X86_32
T
Thomas Gleixner 已提交
23 24 25 26 27 28
typedef char _slock_t;
# define LOCK_INS_DEC "decb"
# define LOCK_INS_XCH "xchgb"
# define LOCK_INS_MOV "movb"
# define LOCK_INS_CMP "cmpb"
# define LOCK_PTR_REG "a"
29
#else
T
Thomas Gleixner 已提交
30 31 32 33 34 35 36 37
typedef int _slock_t;
# define LOCK_INS_DEC "decl"
# define LOCK_INS_XCH "xchgl"
# define LOCK_INS_MOV "movl"
# define LOCK_INS_CMP "cmpl"
# define LOCK_PTR_REG "D"
#endif

N
Nick Piggin 已提交
38 39 40 41 42 43 44 45 46
#if defined(CONFIG_X86_32) && \
	(defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE))
/*
 * On PPro SMP or if we are using OOSTORE, we use a locked operation to unlock
 * (PPro errata 66, 92)
 */
# define UNLOCK_LOCK_PREFIX LOCK_PREFIX
#else
# define UNLOCK_LOCK_PREFIX
N
Nick Piggin 已提交
47 48
#endif

N
Nick Piggin 已提交
49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66
/*
 * Ticket locks are conceptually two parts, one indicating the current head of
 * the queue, and the other indicating the current tail. The lock is acquired
 * by atomically noting the tail and incrementing it by one (thus adding
 * ourself to the queue and noting our position), then waiting until the head
 * becomes equal to the the initial value of the tail.
 *
 * We use an xadd covering *both* parts of the lock, to increment the tail and
 * also load the position of the head, which takes care of memory ordering
 * issues and should be optimal for the uncontended case. Note the tail must be
 * in the high part, because a wide xadd increment of the low part would carry
 * up and contaminate the high part.
 *
 * With fewer than 2^8 possible CPUs, we can use x86's partial registers to
 * save some instructions and make the code more elegant. There really isn't
 * much between them in performance though, especially as locks are out of line.
 */
#if (NR_CPUS < 256)
T
Thomas Gleixner 已提交
67 68
static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
{
N
Nick Piggin 已提交
69 70 71
	int tmp = *(volatile signed int *)(&(lock)->slock);

	return (((tmp >> 8) & 0xff) != (tmp & 0xff));
T
Thomas Gleixner 已提交
72 73
}

N
Nick Piggin 已提交
74
static inline int __raw_spin_is_contended(raw_spinlock_t *lock)
T
Thomas Gleixner 已提交
75
{
N
Nick Piggin 已提交
76 77 78
	int tmp = *(volatile signed int *)(&(lock)->slock);

	return (((tmp >> 8) & 0xff) - (tmp & 0xff)) > 1;
T
Thomas Gleixner 已提交
79 80
}

N
Nick Piggin 已提交
81
static inline void __raw_spin_lock(raw_spinlock_t *lock)
T
Thomas Gleixner 已提交
82
{
N
Nick Piggin 已提交
83 84 85 86 87 88 89 90 91 92
	short inc = 0x0100;

	__asm__ __volatile__ (
		LOCK_PREFIX "xaddw %w0, %1\n"
		"1:\t"
		"cmpb %h0, %b0\n\t"
		"je 2f\n\t"
		"rep ; nop\n\t"
		"movb %1, %b0\n\t"
		/* don't need lfence here, because loads are in-order */
T
Thomas Gleixner 已提交
93
		"jmp 1b\n"
N
Nick Piggin 已提交
94 95 96 97
		"2:"
		:"+Q" (inc), "+m" (lock->slock)
		:
		:"memory", "cc");
T
Thomas Gleixner 已提交
98
}
N
Nick Piggin 已提交
99 100

#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
T
Thomas Gleixner 已提交
101 102 103

static inline int __raw_spin_trylock(raw_spinlock_t *lock)
{
N
Nick Piggin 已提交
104 105
	int tmp;
	short new;
T
Thomas Gleixner 已提交
106 107

	asm volatile(
N
Nick Piggin 已提交
108 109 110 111 112 113 114 115 116 117 118 119 120 121
		"movw %2,%w0\n\t"
		"cmpb %h0,%b0\n\t"
		"jne 1f\n\t"
		"movw %w0,%w1\n\t"
		"incb %h1\n\t"
		"lock ; cmpxchgw %w1,%2\n\t"
		"1:"
		"sete %b1\n\t"
		"movzbl %b1,%0\n\t"
		:"=&a" (tmp), "=Q" (new), "+m" (lock->slock)
		:
		: "memory", "cc");

	return tmp;
T
Thomas Gleixner 已提交
122 123
}

N
Nick Piggin 已提交
124 125 126 127 128 129 130 131
static inline void __raw_spin_unlock(raw_spinlock_t *lock)
{
	__asm__ __volatile__(
		UNLOCK_LOCK_PREFIX "incb %0"
		:"+m" (lock->slock)
		:
		:"memory", "cc");
}
T
Thomas Gleixner 已提交
132
#else
N
Nick Piggin 已提交
133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192
static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
{
	int tmp = *(volatile signed int *)(&(lock)->slock);

	return (((tmp >> 16) & 0xffff) != (tmp & 0xffff));
}

static inline int __raw_spin_is_contended(raw_spinlock_t *lock)
{
	int tmp = *(volatile signed int *)(&(lock)->slock);

	return (((tmp >> 16) & 0xffff) - (tmp & 0xffff)) > 1;
}

static inline void __raw_spin_lock(raw_spinlock_t *lock)
{
	int inc = 0x00010000;
	int tmp;

	__asm__ __volatile__ (
		"lock ; xaddl %0, %1\n"
		"movzwl %w0, %2\n\t"
		"shrl $16, %0\n\t"
		"1:\t"
		"cmpl %0, %2\n\t"
		"je 2f\n\t"
		"rep ; nop\n\t"
		"movzwl %1, %2\n\t"
		/* don't need lfence here, because loads are in-order */
		"jmp 1b\n"
		"2:"
		:"+Q" (inc), "+m" (lock->slock), "=r" (tmp)
		:
		:"memory", "cc");
}

#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)

static inline int __raw_spin_trylock(raw_spinlock_t *lock)
{
	int tmp;
	int new;

	asm volatile(
		"movl %2,%0\n\t"
		"movl %0,%1\n\t"
		"roll $16, %0\n\t"
		"cmpl %0,%1\n\t"
		"jne 1f\n\t"
		"addl $0x00010000, %1\n\t"
		"lock ; cmpxchgl %1,%2\n\t"
		"1:"
		"sete %b1\n\t"
		"movzbl %b1,%0\n\t"
		:"=&a" (tmp), "=r" (new), "+m" (lock->slock)
		:
		: "memory", "cc");

	return tmp;
}
T
Thomas Gleixner 已提交
193 194 195

static inline void __raw_spin_unlock(raw_spinlock_t *lock)
{
N
Nick Piggin 已提交
196
	__asm__ __volatile__(
N
Nick Piggin 已提交
197
		UNLOCK_LOCK_PREFIX "incw %0"
N
Nick Piggin 已提交
198 199 200
		:"+m" (lock->slock)
		:
		:"memory", "cc");
T
Thomas Gleixner 已提交
201
}
N
Nick Piggin 已提交
202
#endif
T
Thomas Gleixner 已提交
203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223

static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
{
	while (__raw_spin_is_locked(lock))
		cpu_relax();
}

/*
 * Read-write spinlocks, allowing multiple readers
 * but only one writer.
 *
 * NOTE! it is quite common to have readers in interrupts
 * but no interrupt writers. For those circumstances we
 * can "mix" irq-safe locks - any writer needs to get a
 * irq-safe write-lock, but readers can get non-irqsafe
 * read-locks.
 *
 * On x86, we implement read-write locks as a 32-bit counter
 * with the high bit (sign) being the "contended" bit.
 */

N
Nick Piggin 已提交
224 225 226 227
/**
 * read_can_lock - would read_trylock() succeed?
 * @lock: the rwlock in question.
 */
T
Thomas Gleixner 已提交
228 229 230 231 232
static inline int __raw_read_can_lock(raw_rwlock_t *lock)
{
	return (int)(lock)->lock > 0;
}

N
Nick Piggin 已提交
233 234 235 236
/**
 * write_can_lock - would write_trylock() succeed?
 * @lock: the rwlock in question.
 */
T
Thomas Gleixner 已提交
237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295
static inline int __raw_write_can_lock(raw_rwlock_t *lock)
{
	return (lock)->lock == RW_LOCK_BIAS;
}

static inline void __raw_read_lock(raw_rwlock_t *rw)
{
	asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
		     "jns 1f\n"
		     "call __read_lock_failed\n\t"
		     "1:\n"
		     ::LOCK_PTR_REG (rw) : "memory");
}

static inline void __raw_write_lock(raw_rwlock_t *rw)
{
	asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
		     "jz 1f\n"
		     "call __write_lock_failed\n\t"
		     "1:\n"
		     ::LOCK_PTR_REG (rw), "i" (RW_LOCK_BIAS) : "memory");
}

static inline int __raw_read_trylock(raw_rwlock_t *lock)
{
	atomic_t *count = (atomic_t *)lock;

	atomic_dec(count);
	if (atomic_read(count) >= 0)
		return 1;
	atomic_inc(count);
	return 0;
}

static inline int __raw_write_trylock(raw_rwlock_t *lock)
{
	atomic_t *count = (atomic_t *)lock;

	if (atomic_sub_and_test(RW_LOCK_BIAS, count))
		return 1;
	atomic_add(RW_LOCK_BIAS, count);
	return 0;
}

static inline void __raw_read_unlock(raw_rwlock_t *rw)
{
	asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
}

static inline void __raw_write_unlock(raw_rwlock_t *rw)
{
	asm volatile(LOCK_PREFIX "addl %1, %0"
		     : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
}

#define _raw_spin_relax(lock)	cpu_relax()
#define _raw_read_relax(lock)	cpu_relax()
#define _raw_write_relax(lock)	cpu_relax()

296
#endif