rwsem-xadd.c 10.0 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4
/* rwsem.c: R/W semaphores: contention handling functions
 *
 * Written by David Howells (dhowells@redhat.com).
 * Derived from arch/i386/kernel/semaphore.c
5 6
 *
 * Writer lock-stealing by Alex Shi <alex.shi@intel.com>
7
 * and Michel Lespinasse <walken@google.com>
L
Linus Torvalds 已提交
8 9 10 11
 */
#include <linux/rwsem.h>
#include <linux/sched.h>
#include <linux/init.h>
12
#include <linux/export.h>
L
Linus Torvalds 已提交
13

14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62
/*
 * Guide to the rw_semaphore's count field for common values.
 * (32-bit case illustrated, similar for 64-bit)
 *
 * 0x0000000X	(1) X readers active or attempting lock, no writer waiting
 *		    X = #active_readers + #readers attempting to lock
 *		    (X*ACTIVE_BIAS)
 *
 * 0x00000000	rwsem is unlocked, and no one is waiting for the lock or
 *		attempting to read lock or write lock.
 *
 * 0xffff000X	(1) X readers active or attempting lock, with waiters for lock
 *		    X = #active readers + # readers attempting lock
 *		    (X*ACTIVE_BIAS + WAITING_BIAS)
 *		(2) 1 writer attempting lock, no waiters for lock
 *		    X-1 = #active readers + #readers attempting lock
 *		    ((X-1)*ACTIVE_BIAS + ACTIVE_WRITE_BIAS)
 *		(3) 1 writer active, no waiters for lock
 *		    X-1 = #active readers + #readers attempting lock
 *		    ((X-1)*ACTIVE_BIAS + ACTIVE_WRITE_BIAS)
 *
 * 0xffff0001	(1) 1 reader active or attempting lock, waiters for lock
 *		    (WAITING_BIAS + ACTIVE_BIAS)
 *		(2) 1 writer active or attempting lock, no waiters for lock
 *		    (ACTIVE_WRITE_BIAS)
 *
 * 0xffff0000	(1) There are writers or readers queued but none active
 *		    or in the process of attempting lock.
 *		    (WAITING_BIAS)
 *		Note: writer can attempt to steal lock for this count by adding
 *		ACTIVE_WRITE_BIAS in cmpxchg and checking the old count
 *
 * 0xfffe0001	(1) 1 writer active, or attempting lock. Waiters on queue.
 *		    (ACTIVE_WRITE_BIAS + WAITING_BIAS)
 *
 * Note: Readers attempt to lock by adding ACTIVE_BIAS in down_read and checking
 *	 the count becomes more than 0 for successful lock acquisition,
 *	 i.e. the case where there are only readers or nobody has lock.
 *	 (1st and 2nd case above).
 *
 *	 Writers attempt to lock by adding ACTIVE_WRITE_BIAS in down_write and
 *	 checking the count becomes ACTIVE_WRITE_BIAS for successful lock
 *	 acquisition (i.e. nobody else has lock or attempts lock).  If
 *	 unsuccessful, in rwsem_down_write_failed, we'll check to see if there
 *	 are only waiters but none active (5th case above), and attempt to
 *	 steal the lock.
 *
 */

63 64 65 66 67 68 69 70 71 72 73
/*
 * Initialize an rwsem:
 */
void __init_rwsem(struct rw_semaphore *sem, const char *name,
		  struct lock_class_key *key)
{
#ifdef CONFIG_DEBUG_LOCK_ALLOC
	/*
	 * Make sure we are not reinitializing a held semaphore:
	 */
	debug_check_no_locks_freed((void *)sem, sizeof(*sem));
74
	lockdep_init_map(&sem->dep_map, name, key, 0);
75 76
#endif
	sem->count = RWSEM_UNLOCKED_VALUE;
77
	raw_spin_lock_init(&sem->wait_lock);
78 79 80 81 82
	INIT_LIST_HEAD(&sem->wait_list);
}

EXPORT_SYMBOL(__init_rwsem);

83 84 85 86 87
enum rwsem_waiter_type {
	RWSEM_WAITING_FOR_WRITE,
	RWSEM_WAITING_FOR_READ
};

L
Linus Torvalds 已提交
88 89 90
struct rwsem_waiter {
	struct list_head list;
	struct task_struct *task;
91
	enum rwsem_waiter_type type;
L
Linus Torvalds 已提交
92 93
};

94 95 96 97 98
enum rwsem_wake_type {
	RWSEM_WAKE_ANY,		/* Wake whatever's at head of wait list */
	RWSEM_WAKE_READERS,	/* Wake readers only */
	RWSEM_WAKE_READ_OWNED	/* Waker thread holds the read lock */
};
99

L
Linus Torvalds 已提交
100 101 102 103 104
/*
 * handle the lock release when processes blocked on it that can now run
 * - if we come here from up_xxxx(), then:
 *   - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed)
 *   - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so)
105
 * - there must be someone on the queue
L
Linus Torvalds 已提交
106 107 108 109
 * - the spinlock must be held by the caller
 * - woken process blocks are discarded from the list after having task zeroed
 * - writers are only woken if downgrading is false
 */
110
static struct rw_semaphore *
111
__rwsem_do_wake(struct rw_semaphore *sem, enum rwsem_wake_type wake_type)
L
Linus Torvalds 已提交
112 113 114 115
{
	struct rwsem_waiter *waiter;
	struct task_struct *tsk;
	struct list_head *next;
116
	long oldcount, woken, loop, adjustment;
L
Linus Torvalds 已提交
117

118
	waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
119
	if (waiter->type == RWSEM_WAITING_FOR_WRITE) {
120
		if (wake_type == RWSEM_WAKE_ANY)
121 122 123 124 125 126
			/* Wake writer at the front of the queue, but do not
			 * grant it the lock yet as we want other writers
			 * to be able to steal it.  Readers, on the other hand,
			 * will block as they will notice the queued writer.
			 */
			wake_up_process(waiter->task);
127
		goto out;
128
	}
L
Linus Torvalds 已提交
129

130 131 132
	/* Writers might steal the lock before we grant it to the next reader.
	 * We prefer to do the first reader grant before counting readers
	 * so we can bail out early if a writer stole the lock.
133
	 */
134 135 136 137 138 139 140 141 142 143 144 145 146 147
	adjustment = 0;
	if (wake_type != RWSEM_WAKE_READ_OWNED) {
		adjustment = RWSEM_ACTIVE_READ_BIAS;
 try_reader_grant:
		oldcount = rwsem_atomic_update(adjustment, sem) - adjustment;
		if (unlikely(oldcount < RWSEM_WAITING_BIAS)) {
			/* A writer stole the lock. Undo our reader grant. */
			if (rwsem_atomic_update(-adjustment, sem) &
						RWSEM_ACTIVE_MASK)
				goto out;
			/* Last active locker left. Retry waking readers. */
			goto try_reader_grant;
		}
	}
L
Linus Torvalds 已提交
148

149 150 151
	/* Grant an infinite number of read locks to the readers at the front
	 * of the queue.  Note we increment the 'active part' of the count by
	 * the number of readers before waking any processes up.
L
Linus Torvalds 已提交
152 153 154 155 156 157 158 159 160 161 162
	 */
	woken = 0;
	do {
		woken++;

		if (waiter->list.next == &sem->wait_list)
			break;

		waiter = list_entry(waiter->list.next,
					struct rwsem_waiter, list);

163
	} while (waiter->type != RWSEM_WAITING_FOR_WRITE);
L
Linus Torvalds 已提交
164

165
	adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment;
166
	if (waiter->type != RWSEM_WAITING_FOR_WRITE)
167 168
		/* hit end of list above */
		adjustment -= RWSEM_WAITING_BIAS;
L
Linus Torvalds 已提交
169

170 171
	if (adjustment)
		rwsem_atomic_add(adjustment, sem);
L
Linus Torvalds 已提交
172 173

	next = sem->wait_list.next;
174 175
	loop = woken;
	do {
L
Linus Torvalds 已提交
176 177 178
		waiter = list_entry(next, struct rwsem_waiter, list);
		next = waiter->list.next;
		tsk = waiter->task;
179
		smp_mb();
L
Linus Torvalds 已提交
180 181 182
		waiter->task = NULL;
		wake_up_process(tsk);
		put_task_struct(tsk);
183
	} while (--loop);
L
Linus Torvalds 已提交
184 185 186 187 188 189

	sem->wait_list.next = next;
	next->prev = &sem->wait_list;

 out:
	return sem;
190 191
}

L
Linus Torvalds 已提交
192
/*
193
 * wait for the read lock to be granted
L
Linus Torvalds 已提交
194
 */
195
__visible
196
struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem)
L
Linus Torvalds 已提交
197
{
198
	long count, adjustment = -RWSEM_ACTIVE_READ_BIAS;
199
	struct rwsem_waiter waiter;
L
Linus Torvalds 已提交
200 201 202
	struct task_struct *tsk = current;

	/* set up my own style of waitqueue */
203
	waiter.task = tsk;
204
	waiter.type = RWSEM_WAITING_FOR_READ;
L
Linus Torvalds 已提交
205 206
	get_task_struct(tsk);

207
	raw_spin_lock_irq(&sem->wait_lock);
208 209
	if (list_empty(&sem->wait_list))
		adjustment += RWSEM_WAITING_BIAS;
210
	list_add_tail(&waiter.list, &sem->wait_list);
L
Linus Torvalds 已提交
211

212
	/* we're now waiting on the lock, but no longer actively locking */
L
Linus Torvalds 已提交
213 214
	count = rwsem_atomic_update(adjustment, sem);

215 216 217 218 219 220 221 222
	/* If there are no active locks, wake the front queued process(es).
	 *
	 * If there are no writers and we are first in the queue,
	 * wake our own waiter to join the existing active readers !
	 */
	if (count == RWSEM_WAITING_BIAS ||
	    (count > RWSEM_WAITING_BIAS &&
	     adjustment != -RWSEM_ACTIVE_READ_BIAS))
223
		sem = __rwsem_do_wake(sem, RWSEM_WAKE_ANY);
L
Linus Torvalds 已提交
224

225
	raw_spin_unlock_irq(&sem->wait_lock);
L
Linus Torvalds 已提交
226 227

	/* wait to be given the lock */
228 229
	while (true) {
		set_task_state(tsk, TASK_UNINTERRUPTIBLE);
230
		if (!waiter.task)
L
Linus Torvalds 已提交
231 232 233 234 235 236 237 238 239 240
			break;
		schedule();
	}

	tsk->state = TASK_RUNNING;

	return sem;
}

/*
241
 * wait until we successfully acquire the write lock
L
Linus Torvalds 已提交
242
 */
243
__visible
244
struct rw_semaphore __sched *rwsem_down_write_failed(struct rw_semaphore *sem)
L
Linus Torvalds 已提交
245
{
246
	long count, adjustment = -RWSEM_ACTIVE_WRITE_BIAS;
247 248 249 250 251
	struct rwsem_waiter waiter;
	struct task_struct *tsk = current;

	/* set up my own style of waitqueue */
	waiter.task = tsk;
252
	waiter.type = RWSEM_WAITING_FOR_WRITE;
253 254 255 256 257 258 259 260 261

	raw_spin_lock_irq(&sem->wait_lock);
	if (list_empty(&sem->wait_list))
		adjustment += RWSEM_WAITING_BIAS;
	list_add_tail(&waiter.list, &sem->wait_list);

	/* we're now waiting on the lock, but no longer actively locking */
	count = rwsem_atomic_update(adjustment, sem);

262 263 264 265 266
	/* If there were already threads queued before us and there are no
	 * active writers, the lock must be read owned; so we try to wake
	 * any read locks that were queued ahead of us. */
	if (count > RWSEM_WAITING_BIAS &&
	    adjustment == -RWSEM_ACTIVE_WRITE_BIAS)
267
		sem = __rwsem_do_wake(sem, RWSEM_WAKE_READERS);
268

269
	/* wait until we successfully acquire the lock */
270
	set_task_state(tsk, TASK_UNINTERRUPTIBLE);
271
	while (true) {
272 273 274 275 276
		if (!(count & RWSEM_ACTIVE_MASK)) {
			/* Try acquiring the write lock. */
			count = RWSEM_ACTIVE_WRITE_BIAS;
			if (!list_is_singular(&sem->wait_list))
				count += RWSEM_WAITING_BIAS;
277 278 279

			if (sem->count == RWSEM_WAITING_BIAS &&
			    cmpxchg(&sem->count, RWSEM_WAITING_BIAS, count) ==
280
							RWSEM_WAITING_BIAS)
281 282
				break;
		}
283 284

		raw_spin_unlock_irq(&sem->wait_lock);
285 286 287 288 289

		/* Block until there are no active lockers. */
		do {
			schedule();
			set_task_state(tsk, TASK_UNINTERRUPTIBLE);
290
		} while ((count = sem->count) & RWSEM_ACTIVE_MASK);
291

292
		raw_spin_lock_irq(&sem->wait_lock);
293 294
	}

295 296
	list_del(&waiter.list);
	raw_spin_unlock_irq(&sem->wait_lock);
297 298 299
	tsk->state = TASK_RUNNING;

	return sem;
L
Linus Torvalds 已提交
300 301 302 303 304 305
}

/*
 * handle waking up a waiter on the semaphore
 * - up_read/up_write has decremented the active part of count if we come here
 */
306
__visible
307
struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
L
Linus Torvalds 已提交
308 309 310
{
	unsigned long flags;

311
	raw_spin_lock_irqsave(&sem->wait_lock, flags);
L
Linus Torvalds 已提交
312 313 314

	/* do nothing if list empty */
	if (!list_empty(&sem->wait_list))
315
		sem = __rwsem_do_wake(sem, RWSEM_WAKE_ANY);
L
Linus Torvalds 已提交
316

317
	raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
L
Linus Torvalds 已提交
318 319 320 321 322 323 324 325 326

	return sem;
}

/*
 * downgrade a write lock into a read lock
 * - caller incremented waiting part of count and discovered it still negative
 * - just wake up any readers at the front of the queue
 */
327
__visible
328
struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
L
Linus Torvalds 已提交
329 330 331
{
	unsigned long flags;

332
	raw_spin_lock_irqsave(&sem->wait_lock, flags);
L
Linus Torvalds 已提交
333 334 335

	/* do nothing if list empty */
	if (!list_empty(&sem->wait_list))
336
		sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED);
L
Linus Torvalds 已提交
337

338
	raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
L
Linus Torvalds 已提交
339 340 341 342 343 344 345 346

	return sem;
}

EXPORT_SYMBOL(rwsem_down_read_failed);
EXPORT_SYMBOL(rwsem_down_write_failed);
EXPORT_SYMBOL(rwsem_wake);
EXPORT_SYMBOL(rwsem_downgrade_wake);