spinlock.h 6.3 KB
Newer Older
H
H. Peter Anvin 已提交
1 2
#ifndef _ASM_X86_SPINLOCK_H
#define _ASM_X86_SPINLOCK_H
3

A
Arun Sharma 已提交
4
#include <linux/atomic.h>
T
Thomas Gleixner 已提交
5 6
#include <asm/page.h>
#include <asm/processor.h>
N
Nick Piggin 已提交
7
#include <linux/compiler.h>
8
#include <asm/paravirt.h>
T
Thomas Gleixner 已提交
9 10 11 12 13 14
/*
 * Your basic SMP spinlocks, allowing only a single CPU anywhere
 *
 * Simple spin lock operations.  There are two variants, one clears IRQ's
 * on the local processor, one does not.
 *
N
Nick Piggin 已提交
15 16
 * These are fair FIFO ticket locks, which are currently limited to 256
 * CPUs.
T
Thomas Gleixner 已提交
17 18 19 20
 *
 * (the type definitions are in asm/spinlock_types.h)
 */

21
#ifdef CONFIG_X86_32
T
Thomas Gleixner 已提交
22
# define LOCK_PTR_REG "a"
23
# define REG_PTR_MODE "k"
24
#else
T
Thomas Gleixner 已提交
25
# define LOCK_PTR_REG "D"
26
# define REG_PTR_MODE "q"
T
Thomas Gleixner 已提交
27 28
#endif

N
Nick Piggin 已提交
29 30 31 32 33 34 35 36 37
#if defined(CONFIG_X86_32) && \
	(defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE))
/*
 * On PPro SMP or if we are using OOSTORE, we use a locked operation to unlock
 * (PPro errata 66, 92)
 */
# define UNLOCK_LOCK_PREFIX LOCK_PREFIX
#else
# define UNLOCK_LOCK_PREFIX
N
Nick Piggin 已提交
38 39
#endif

N
Nick Piggin 已提交
40 41 42 43 44 45 46 47 48 49 50 51 52
/*
 * Ticket locks are conceptually two parts, one indicating the current head of
 * the queue, and the other indicating the current tail. The lock is acquired
 * by atomically noting the tail and incrementing it by one (thus adding
 * ourself to the queue and noting our position), then waiting until the head
 * becomes equal to the the initial value of the tail.
 *
 * We use an xadd covering *both* parts of the lock, to increment the tail and
 * also load the position of the head, which takes care of memory ordering
 * issues and should be optimal for the uncontended case. Note the tail must be
 * in the high part, because a wide xadd increment of the low part would carry
 * up and contaminate the high part.
 */
53
static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
T
Thomas Gleixner 已提交
54
{
55
	register struct __raw_tickets inc = { .tail = 1 };
N
Nick Piggin 已提交
56

57
	inc = xadd(&lock->tickets, inc);
58 59

	for (;;) {
60
		if (inc.head == inc.tail)
61 62
			break;
		cpu_relax();
63
		inc.head = ACCESS_ONCE(lock->tickets.head);
64 65
	}
	barrier();		/* make sure nothing creeps before the lock is taken */
T
Thomas Gleixner 已提交
66
}
N
Nick Piggin 已提交
67

68
static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
T
Thomas Gleixner 已提交
69
{
70 71 72 73 74
	arch_spinlock_t old, new;

	old.tickets = ACCESS_ONCE(lock->tickets);
	if (old.tickets.head != old.tickets.tail)
		return 0;
N
Nick Piggin 已提交
75

76 77 78 79
	new.head_tail = old.head_tail + (1 << TICKET_SHIFT);

	/* cmpxchg is a full barrier, so nothing can move before it */
	return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail;
T
Thomas Gleixner 已提交
80 81
}

82
static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
N
Nick Piggin 已提交
83
{
84
	__add(&lock->tickets.head, 1, UNLOCK_LOCK_PREFIX);
N
Nick Piggin 已提交
85
}
T
Thomas Gleixner 已提交
86

87
static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
88
{
89
	struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
90

91
	return !!(tmp.tail ^ tmp.head);
92 93
}

94
static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
95
{
96
	struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
97

98
	return ((tmp.tail - tmp.head) & TICKET_MASK) > 1;
99
}
100

101
#ifndef CONFIG_PARAVIRT_SPINLOCKS
102

103
static inline int arch_spin_is_locked(arch_spinlock_t *lock)
104 105 106 107
{
	return __ticket_spin_is_locked(lock);
}

108
static inline int arch_spin_is_contended(arch_spinlock_t *lock)
109 110 111
{
	return __ticket_spin_is_contended(lock);
}
112
#define arch_spin_is_contended	arch_spin_is_contended
113

114
static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
115 116 117 118
{
	__ticket_spin_lock(lock);
}

119
static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
120 121 122 123
{
	return __ticket_spin_trylock(lock);
}

124
static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
125 126 127
{
	__ticket_spin_unlock(lock);
}
128

129
static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
130 131
						  unsigned long flags)
{
132
	arch_spin_lock(lock);
133 134
}

135
#endif	/* CONFIG_PARAVIRT_SPINLOCKS */
136

137
static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
T
Thomas Gleixner 已提交
138
{
139
	while (arch_spin_is_locked(lock))
T
Thomas Gleixner 已提交
140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156
		cpu_relax();
}

/*
 * Read-write spinlocks, allowing multiple readers
 * but only one writer.
 *
 * NOTE! it is quite common to have readers in interrupts
 * but no interrupt writers. For those circumstances we
 * can "mix" irq-safe locks - any writer needs to get a
 * irq-safe write-lock, but readers can get non-irqsafe
 * read-locks.
 *
 * On x86, we implement read-write locks as a 32-bit counter
 * with the high bit (sign) being the "contended" bit.
 */

N
Nick Piggin 已提交
157 158 159 160
/**
 * read_can_lock - would read_trylock() succeed?
 * @lock: the rwlock in question.
 */
161
static inline int arch_read_can_lock(arch_rwlock_t *lock)
T
Thomas Gleixner 已提交
162
{
163
	return lock->lock > 0;
T
Thomas Gleixner 已提交
164 165
}

N
Nick Piggin 已提交
166 167 168 169
/**
 * write_can_lock - would write_trylock() succeed?
 * @lock: the rwlock in question.
 */
170
static inline int arch_write_can_lock(arch_rwlock_t *lock)
T
Thomas Gleixner 已提交
171
{
172
	return lock->write == WRITE_LOCK_CMP;
T
Thomas Gleixner 已提交
173 174
}

175
static inline void arch_read_lock(arch_rwlock_t *rw)
T
Thomas Gleixner 已提交
176
{
177
	asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
T
Thomas Gleixner 已提交
178 179 180 181 182 183
		     "jns 1f\n"
		     "call __read_lock_failed\n\t"
		     "1:\n"
		     ::LOCK_PTR_REG (rw) : "memory");
}

184
static inline void arch_write_lock(arch_rwlock_t *rw)
T
Thomas Gleixner 已提交
185
{
186
	asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
T
Thomas Gleixner 已提交
187 188 189
		     "jz 1f\n"
		     "call __write_lock_failed\n\t"
		     "1:\n"
190 191
		     ::LOCK_PTR_REG (&rw->write), "i" (RW_LOCK_BIAS)
		     : "memory");
T
Thomas Gleixner 已提交
192 193
}

194
static inline int arch_read_trylock(arch_rwlock_t *lock)
T
Thomas Gleixner 已提交
195
{
196
	READ_LOCK_ATOMIC(t) *count = (READ_LOCK_ATOMIC(t) *)lock;
T
Thomas Gleixner 已提交
197

198
	if (READ_LOCK_ATOMIC(dec_return)(count) >= 0)
T
Thomas Gleixner 已提交
199
		return 1;
200
	READ_LOCK_ATOMIC(inc)(count);
T
Thomas Gleixner 已提交
201 202 203
	return 0;
}

204
static inline int arch_write_trylock(arch_rwlock_t *lock)
T
Thomas Gleixner 已提交
205
{
206
	atomic_t *count = (atomic_t *)&lock->write;
T
Thomas Gleixner 已提交
207

208
	if (atomic_sub_and_test(WRITE_LOCK_CMP, count))
T
Thomas Gleixner 已提交
209
		return 1;
210
	atomic_add(WRITE_LOCK_CMP, count);
T
Thomas Gleixner 已提交
211 212 213
	return 0;
}

214
static inline void arch_read_unlock(arch_rwlock_t *rw)
T
Thomas Gleixner 已提交
215
{
216 217
	asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
		     :"+m" (rw->lock) : : "memory");
T
Thomas Gleixner 已提交
218 219
}

220
static inline void arch_write_unlock(arch_rwlock_t *rw)
T
Thomas Gleixner 已提交
221
{
222 223
	asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
		     : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
T
Thomas Gleixner 已提交
224 225
}

226 227
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
228

229 230 231 232 233 234
#undef READ_LOCK_SIZE
#undef READ_LOCK_ATOMIC
#undef WRITE_LOCK_ADD
#undef WRITE_LOCK_SUB
#undef WRITE_LOCK_CMP

235 236 237
#define arch_spin_relax(lock)	cpu_relax()
#define arch_read_relax(lock)	cpu_relax()
#define arch_write_relax(lock)	cpu_relax()
T
Thomas Gleixner 已提交
238

239 240 241 242
/* The {read|write|spin}_lock() on x86 are full memory barriers. */
static inline void smp_mb__after_lock(void) { }
#define ARCH_HAS_SMP_MB_AFTER_LOCK

H
H. Peter Anvin 已提交
243
#endif /* _ASM_X86_SPINLOCK_H */