rwsem.h 6.7 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4
/* rwsem.h: R/W semaphores implemented using XADD/CMPXCHG for i486+
 *
 * Written by David Howells (dhowells@redhat.com).
 *
5
 * Derived from asm-x86/semaphore.h
L
Linus Torvalds 已提交
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
 *
 *
 * The MSW of the count is the negated number of active writers and waiting
 * lockers, and the LSW is the total number of active locks
 *
 * The lock count is initialized to 0 (no active and no waiting lockers).
 *
 * When a writer subtracts WRITE_BIAS, it'll get 0xffff0001 for the case of an
 * uncontended lock. This can be determined because XADD returns the old value.
 * Readers increment by 1 and see a positive value when uncontended, negative
 * if there are writers (and maybe) readers waiting (in which case it goes to
 * sleep).
 *
 * The value of WAITING_BIAS supports up to 32766 waiting processes. This can
 * be extended to 65534 by manually checking the whole MSW rather than relying
 * on the S flag.
 *
 * The value of ACTIVE_BIAS supports up to 65535 active processes.
 *
 * This should be totally fair - if anything is waiting, a process that wants a
 * lock will go to the back of the queue. When the currently active lock is
 * released, if there's a writer at the front of the queue, then that and only
28
 * that will be woken up; if there's a bunch of consecutive readers at the
L
Linus Torvalds 已提交
29 30 31
 * front, then they'll all be woken up, but no other readers will be.
 */

H
H. Peter Anvin 已提交
32 33
#ifndef _ASM_X86_RWSEM_H
#define _ASM_X86_RWSEM_H
L
Linus Torvalds 已提交
34 35 36 37 38 39

#ifndef _LINUX_RWSEM_H
#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
#endif

#ifdef __KERNEL__
40
#include <asm/asm.h>
L
Linus Torvalds 已提交
41 42

/*
43 44 45
 * The bias values and the counter type limits the number of
 * potential readers/writers to 32767 for 32 bits and 2147483647
 * for 64 bits.
L
Linus Torvalds 已提交
46
 */
47

48 49 50 51 52 53 54 55 56
#ifdef CONFIG_X86_64
# define RWSEM_ACTIVE_MASK		0xffffffffL
#else
# define RWSEM_ACTIVE_MASK		0x0000ffffL
#endif

#define RWSEM_UNLOCKED_VALUE		0x00000000L
#define RWSEM_ACTIVE_BIAS		0x00000001L
#define RWSEM_WAITING_BIAS		(-RWSEM_ACTIVE_MASK-1)
L
Linus Torvalds 已提交
57 58
#define RWSEM_ACTIVE_READ_BIAS		RWSEM_ACTIVE_BIAS
#define RWSEM_ACTIVE_WRITE_BIAS		(RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
59

L
Linus Torvalds 已提交
60 61 62 63 64
/*
 * lock for reading
 */
static inline void __down_read(struct rw_semaphore *sem)
{
65
	asm volatile("# beginning down_read\n\t"
66
		     LOCK_PREFIX _ASM_INC "(%1)\n\t"
M
Michel Lespinasse 已提交
67
		     /* adds 0x00000001 */
68 69 70 71 72 73 74
		     "  jns        1f\n"
		     "  call call_rwsem_down_read_failed\n"
		     "1:\n\t"
		     "# ending down_read\n\t"
		     : "+m" (sem->count)
		     : "a" (sem)
		     : "memory", "cc");
L
Linus Torvalds 已提交
75 76 77 78 79 80 81
}

/*
 * trylock for reading -- returns 1 if successful, 0 if contention
 */
static inline int __down_read_trylock(struct rw_semaphore *sem)
{
82
	long result, tmp;
83
	asm volatile("# beginning __down_read_trylock\n\t"
84
		     "  mov          %0,%1\n\t"
85
		     "1:\n\t"
86 87
		     "  mov          %1,%2\n\t"
		     "  add          %3,%2\n\t"
88
		     "  jle	     2f\n\t"
89
		     LOCK_PREFIX "  cmpxchg  %2,%0\n\t"
90 91 92 93 94 95 96
		     "  jnz	     1b\n\t"
		     "2:\n\t"
		     "# ending __down_read_trylock\n\t"
		     : "+m" (sem->count), "=&a" (result), "=&r" (tmp)
		     : "i" (RWSEM_ACTIVE_READ_BIAS)
		     : "memory", "cc");
	return result >= 0 ? 1 : 0;
L
Linus Torvalds 已提交
97 98 99 100 101
}

/*
 * lock for writing
 */
102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120
#define ____down_write(sem, slow_path)			\
({							\
	long tmp;					\
	struct rw_semaphore* ret = sem;			\
	asm volatile("# beginning down_write\n\t"	\
		     LOCK_PREFIX "  xadd      %1,(%2)\n\t"	\
		     /* adds 0xffff0001, returns the old value */ \
		     "  test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t" \
		     /* was the active mask 0 before? */\
		     "  jz        1f\n"			\
		     "  call " slow_path "\n"		\
		     "1:\n"				\
		     "# ending down_write"		\
		     : "+m" (sem->count), "=d" (tmp), "+a" (ret)	\
		     : "a" (sem), "1" (RWSEM_ACTIVE_WRITE_BIAS) \
		     : "memory", "cc");			\
	ret;						\
})

121
static inline void __down_write(struct rw_semaphore *sem)
L
Linus Torvalds 已提交
122
{
123 124 125 126 127 128 129 130 131
	____down_write(sem, "call_rwsem_down_write_failed");
}

static inline int __down_write_killable(struct rw_semaphore *sem)
{
	if (IS_ERR(____down_write(sem, "call_rwsem_down_write_failed_killable")))
		return -EINTR;

	return 0;
L
Linus Torvalds 已提交
132 133 134 135 136 137 138
}

/*
 * trylock for writing -- returns 1 if successful, 0 if contention
 */
static inline int __down_write_trylock(struct rw_semaphore *sem)
{
139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157
	long result, tmp;
	asm volatile("# beginning __down_write_trylock\n\t"
		     "  mov          %0,%1\n\t"
		     "1:\n\t"
		     "  test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t"
		     /* was the active mask 0 before? */
		     "  jnz          2f\n\t"
		     "  mov          %1,%2\n\t"
		     "  add          %3,%2\n\t"
		     LOCK_PREFIX "  cmpxchg  %2,%0\n\t"
		     "  jnz	     1b\n\t"
		     "2:\n\t"
		     "  sete         %b1\n\t"
		     "  movzbl       %b1, %k1\n\t"
		     "# ending __down_write_trylock\n\t"
		     : "+m" (sem->count), "=&a" (result), "=&r" (tmp)
		     : "er" (RWSEM_ACTIVE_WRITE_BIAS)
		     : "memory", "cc");
	return result;
L
Linus Torvalds 已提交
158 159 160 161 162 163 164
}

/*
 * unlock after reading
 */
static inline void __up_read(struct rw_semaphore *sem)
{
165
	long tmp;
166
	asm volatile("# beginning __up_read\n\t"
167
		     LOCK_PREFIX "  xadd      %1,(%2)\n\t"
168 169
		     /* subtracts 1, returns the old value */
		     "  jns        1f\n\t"
M
Michel Lespinasse 已提交
170
		     "  call call_rwsem_wake\n" /* expects old value in %edx */
171 172 173
		     "1:\n"
		     "# ending __up_read\n"
		     : "+m" (sem->count), "=d" (tmp)
M
Michel Lespinasse 已提交
174
		     : "a" (sem), "1" (-RWSEM_ACTIVE_READ_BIAS)
175
		     : "memory", "cc");
L
Linus Torvalds 已提交
176 177 178 179 180 181 182
}

/*
 * unlock after writing
 */
static inline void __up_write(struct rw_semaphore *sem)
{
183
	long tmp;
184
	asm volatile("# beginning __up_write\n\t"
185
		     LOCK_PREFIX "  xadd      %1,(%2)\n\t"
186 187
		     /* subtracts 0xffff0001, returns the old value */
		     "  jns        1f\n\t"
M
Michel Lespinasse 已提交
188
		     "  call call_rwsem_wake\n" /* expects old value in %edx */
189 190
		     "1:\n\t"
		     "# ending __up_write\n"
191 192 193
		     : "+m" (sem->count), "=d" (tmp)
		     : "a" (sem), "1" (-RWSEM_ACTIVE_WRITE_BIAS)
		     : "memory", "cc");
L
Linus Torvalds 已提交
194 195 196 197 198 199 200
}

/*
 * downgrade write lock to read lock
 */
static inline void __downgrade_write(struct rw_semaphore *sem)
{
201
	asm volatile("# beginning __downgrade_write\n\t"
202
		     LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
203 204 205 206
		     /*
		      * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
		      *     0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
		      */
207 208 209 210 211
		     "  jns       1f\n\t"
		     "  call call_rwsem_downgrade_wake\n"
		     "1:\n\t"
		     "# ending __downgrade_write\n"
		     : "+m" (sem->count)
212
		     : "a" (sem), "er" (-RWSEM_WAITING_BIAS)
213
		     : "memory", "cc");
L
Linus Torvalds 已提交
214 215 216 217 218
}

/*
 * implement atomic add functionality
 */
219
static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
L
Linus Torvalds 已提交
220
{
221
	asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
222
		     : "+m" (sem->count)
223
		     : "er" (delta));
L
Linus Torvalds 已提交
224 225 226 227 228
}

/*
 * implement exchange and add functionality
 */
229
static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
L
Linus Torvalds 已提交
230
{
231
	return delta + xadd(&sem->count, delta);
L
Linus Torvalds 已提交
232 233 234
}

#endif /* __KERNEL__ */
H
H. Peter Anvin 已提交
235
#endif /* _ASM_X86_RWSEM_H */