rwsem-xadd.c 15.7 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4
/* rwsem.c: R/W semaphores: contention handling functions
 *
 * Written by David Howells (dhowells@redhat.com).
 * Derived from arch/i386/kernel/semaphore.c
5 6
 *
 * Writer lock-stealing by Alex Shi <alex.shi@intel.com>
7
 * and Michel Lespinasse <walken@google.com>
8 9 10
 *
 * Optimistic spinning by Tim Chen <tim.c.chen@intel.com>
 * and Davidlohr Bueso <davidlohr@hp.com>. Based on mutexes.
L
Linus Torvalds 已提交
11 12 13 14
 */
#include <linux/rwsem.h>
#include <linux/sched.h>
#include <linux/init.h>
15
#include <linux/export.h>
16
#include <linux/sched/rt.h>
17
#include <linux/osq_lock.h>
18

19
#include "rwsem.h"
L
Linus Torvalds 已提交
20

21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69
/*
 * Guide to the rw_semaphore's count field for common values.
 * (32-bit case illustrated, similar for 64-bit)
 *
 * 0x0000000X	(1) X readers active or attempting lock, no writer waiting
 *		    X = #active_readers + #readers attempting to lock
 *		    (X*ACTIVE_BIAS)
 *
 * 0x00000000	rwsem is unlocked, and no one is waiting for the lock or
 *		attempting to read lock or write lock.
 *
 * 0xffff000X	(1) X readers active or attempting lock, with waiters for lock
 *		    X = #active readers + # readers attempting lock
 *		    (X*ACTIVE_BIAS + WAITING_BIAS)
 *		(2) 1 writer attempting lock, no waiters for lock
 *		    X-1 = #active readers + #readers attempting lock
 *		    ((X-1)*ACTIVE_BIAS + ACTIVE_WRITE_BIAS)
 *		(3) 1 writer active, no waiters for lock
 *		    X-1 = #active readers + #readers attempting lock
 *		    ((X-1)*ACTIVE_BIAS + ACTIVE_WRITE_BIAS)
 *
 * 0xffff0001	(1) 1 reader active or attempting lock, waiters for lock
 *		    (WAITING_BIAS + ACTIVE_BIAS)
 *		(2) 1 writer active or attempting lock, no waiters for lock
 *		    (ACTIVE_WRITE_BIAS)
 *
 * 0xffff0000	(1) There are writers or readers queued but none active
 *		    or in the process of attempting lock.
 *		    (WAITING_BIAS)
 *		Note: writer can attempt to steal lock for this count by adding
 *		ACTIVE_WRITE_BIAS in cmpxchg and checking the old count
 *
 * 0xfffe0001	(1) 1 writer active, or attempting lock. Waiters on queue.
 *		    (ACTIVE_WRITE_BIAS + WAITING_BIAS)
 *
 * Note: Readers attempt to lock by adding ACTIVE_BIAS in down_read and checking
 *	 the count becomes more than 0 for successful lock acquisition,
 *	 i.e. the case where there are only readers or nobody has lock.
 *	 (1st and 2nd case above).
 *
 *	 Writers attempt to lock by adding ACTIVE_WRITE_BIAS in down_write and
 *	 checking the count becomes ACTIVE_WRITE_BIAS for successful lock
 *	 acquisition (i.e. nobody else has lock or attempts lock).  If
 *	 unsuccessful, in rwsem_down_write_failed, we'll check to see if there
 *	 are only waiters but none active (5th case above), and attempt to
 *	 steal the lock.
 *
 */

70 71 72 73 74 75 76 77 78 79 80
/*
 * Initialize an rwsem:
 */
void __init_rwsem(struct rw_semaphore *sem, const char *name,
		  struct lock_class_key *key)
{
#ifdef CONFIG_DEBUG_LOCK_ALLOC
	/*
	 * Make sure we are not reinitializing a held semaphore:
	 */
	debug_check_no_locks_freed((void *)sem, sizeof(*sem));
81
	lockdep_init_map(&sem->dep_map, name, key, 0);
82 83
#endif
	sem->count = RWSEM_UNLOCKED_VALUE;
84
	raw_spin_lock_init(&sem->wait_lock);
85
	INIT_LIST_HEAD(&sem->wait_list);
86
#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
87
	sem->owner = NULL;
88
	osq_lock_init(&sem->osq);
89
#endif
90 91 92 93
}

EXPORT_SYMBOL(__init_rwsem);

94 95 96 97 98
enum rwsem_waiter_type {
	RWSEM_WAITING_FOR_WRITE,
	RWSEM_WAITING_FOR_READ
};

L
Linus Torvalds 已提交
99 100 101
struct rwsem_waiter {
	struct list_head list;
	struct task_struct *task;
102
	enum rwsem_waiter_type type;
L
Linus Torvalds 已提交
103 104
};

105 106 107 108 109
enum rwsem_wake_type {
	RWSEM_WAKE_ANY,		/* Wake whatever's at head of wait list */
	RWSEM_WAKE_READERS,	/* Wake readers only */
	RWSEM_WAKE_READ_OWNED	/* Waker thread holds the read lock */
};
110

L
Linus Torvalds 已提交
111 112 113 114 115
/*
 * handle the lock release when processes blocked on it that can now run
 * - if we come here from up_xxxx(), then:
 *   - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed)
 *   - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so)
116
 * - there must be someone on the queue
L
Linus Torvalds 已提交
117 118 119 120
 * - the spinlock must be held by the caller
 * - woken process blocks are discarded from the list after having task zeroed
 * - writers are only woken if downgrading is false
 */
121
static struct rw_semaphore *
122
__rwsem_do_wake(struct rw_semaphore *sem, enum rwsem_wake_type wake_type)
L
Linus Torvalds 已提交
123 124 125 126
{
	struct rwsem_waiter *waiter;
	struct task_struct *tsk;
	struct list_head *next;
127
	long oldcount, woken, loop, adjustment;
L
Linus Torvalds 已提交
128

129
	waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
130
	if (waiter->type == RWSEM_WAITING_FOR_WRITE) {
131
		if (wake_type == RWSEM_WAKE_ANY)
132 133 134 135 136 137
			/* Wake writer at the front of the queue, but do not
			 * grant it the lock yet as we want other writers
			 * to be able to steal it.  Readers, on the other hand,
			 * will block as they will notice the queued writer.
			 */
			wake_up_process(waiter->task);
138
		goto out;
139
	}
L
Linus Torvalds 已提交
140

141 142 143
	/* Writers might steal the lock before we grant it to the next reader.
	 * We prefer to do the first reader grant before counting readers
	 * so we can bail out early if a writer stole the lock.
144
	 */
145 146 147 148 149 150 151 152 153 154 155 156 157 158
	adjustment = 0;
	if (wake_type != RWSEM_WAKE_READ_OWNED) {
		adjustment = RWSEM_ACTIVE_READ_BIAS;
 try_reader_grant:
		oldcount = rwsem_atomic_update(adjustment, sem) - adjustment;
		if (unlikely(oldcount < RWSEM_WAITING_BIAS)) {
			/* A writer stole the lock. Undo our reader grant. */
			if (rwsem_atomic_update(-adjustment, sem) &
						RWSEM_ACTIVE_MASK)
				goto out;
			/* Last active locker left. Retry waking readers. */
			goto try_reader_grant;
		}
	}
L
Linus Torvalds 已提交
159

160 161 162
	/* Grant an infinite number of read locks to the readers at the front
	 * of the queue.  Note we increment the 'active part' of the count by
	 * the number of readers before waking any processes up.
L
Linus Torvalds 已提交
163 164 165 166 167 168 169 170 171 172 173
	 */
	woken = 0;
	do {
		woken++;

		if (waiter->list.next == &sem->wait_list)
			break;

		waiter = list_entry(waiter->list.next,
					struct rwsem_waiter, list);

174
	} while (waiter->type != RWSEM_WAITING_FOR_WRITE);
L
Linus Torvalds 已提交
175

176
	adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment;
177
	if (waiter->type != RWSEM_WAITING_FOR_WRITE)
178 179
		/* hit end of list above */
		adjustment -= RWSEM_WAITING_BIAS;
L
Linus Torvalds 已提交
180

181 182
	if (adjustment)
		rwsem_atomic_add(adjustment, sem);
L
Linus Torvalds 已提交
183 184

	next = sem->wait_list.next;
185 186
	loop = woken;
	do {
L
Linus Torvalds 已提交
187 188 189
		waiter = list_entry(next, struct rwsem_waiter, list);
		next = waiter->list.next;
		tsk = waiter->task;
190 191 192 193 194 195 196
		/*
		 * Make sure we do not wakeup the next reader before
		 * setting the nil condition to grant the next reader;
		 * otherwise we could miss the wakeup on the other
		 * side and end up sleeping again. See the pairing
		 * in rwsem_down_read_failed().
		 */
197
		smp_mb();
L
Linus Torvalds 已提交
198 199 200
		waiter->task = NULL;
		wake_up_process(tsk);
		put_task_struct(tsk);
201
	} while (--loop);
L
Linus Torvalds 已提交
202 203 204 205 206 207

	sem->wait_list.next = next;
	next->prev = &sem->wait_list;

 out:
	return sem;
208 209
}

L
Linus Torvalds 已提交
210
/*
211
 * Wait for the read lock to be granted
L
Linus Torvalds 已提交
212
 */
213
__visible
214
struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem)
L
Linus Torvalds 已提交
215
{
216
	long count, adjustment = -RWSEM_ACTIVE_READ_BIAS;
217
	struct rwsem_waiter waiter;
L
Linus Torvalds 已提交
218 219 220
	struct task_struct *tsk = current;

	/* set up my own style of waitqueue */
221
	waiter.task = tsk;
222
	waiter.type = RWSEM_WAITING_FOR_READ;
L
Linus Torvalds 已提交
223 224
	get_task_struct(tsk);

225
	raw_spin_lock_irq(&sem->wait_lock);
226 227
	if (list_empty(&sem->wait_list))
		adjustment += RWSEM_WAITING_BIAS;
228
	list_add_tail(&waiter.list, &sem->wait_list);
L
Linus Torvalds 已提交
229

230
	/* we're now waiting on the lock, but no longer actively locking */
L
Linus Torvalds 已提交
231 232
	count = rwsem_atomic_update(adjustment, sem);

233 234 235 236 237 238 239 240
	/* If there are no active locks, wake the front queued process(es).
	 *
	 * If there are no writers and we are first in the queue,
	 * wake our own waiter to join the existing active readers !
	 */
	if (count == RWSEM_WAITING_BIAS ||
	    (count > RWSEM_WAITING_BIAS &&
	     adjustment != -RWSEM_ACTIVE_READ_BIAS))
241
		sem = __rwsem_do_wake(sem, RWSEM_WAKE_ANY);
L
Linus Torvalds 已提交
242

243
	raw_spin_unlock_irq(&sem->wait_lock);
L
Linus Torvalds 已提交
244 245

	/* wait to be given the lock */
246 247
	while (true) {
		set_task_state(tsk, TASK_UNINTERRUPTIBLE);
248
		if (!waiter.task)
L
Linus Torvalds 已提交
249 250 251 252
			break;
		schedule();
	}

253
	__set_task_state(tsk, TASK_RUNNING);
L
Linus Torvalds 已提交
254 255
	return sem;
}
256
EXPORT_SYMBOL(rwsem_down_read_failed);
L
Linus Torvalds 已提交
257

258 259
static inline bool rwsem_try_write_lock(long count, struct rw_semaphore *sem)
{
260 261 262 263 264 265 266 267 268
	/*
	 * Try acquiring the write lock. Check count first in order
	 * to reduce unnecessary expensive cmpxchg() operations.
	 */
	if (count == RWSEM_WAITING_BIAS &&
	    cmpxchg(&sem->count, RWSEM_WAITING_BIAS,
		    RWSEM_ACTIVE_WRITE_BIAS) == RWSEM_WAITING_BIAS) {
		if (!list_is_singular(&sem->wait_list))
			rwsem_atomic_update(RWSEM_WAITING_BIAS, sem);
269
		rwsem_set_owner(sem);
270
		return true;
271
	}
272

273 274 275
	return false;
}

276
#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
L
Linus Torvalds 已提交
277
/*
278 279 280 281
 * Try to acquire write lock before the writer has been put on wait queue.
 */
static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem)
{
282
	long old, count = READ_ONCE(sem->count);
283 284 285 286 287 288

	while (true) {
		if (!(count == 0 || count == RWSEM_WAITING_BIAS))
			return false;

		old = cmpxchg(&sem->count, count, count + RWSEM_ACTIVE_WRITE_BIAS);
289 290
		if (old == count) {
			rwsem_set_owner(sem);
291
			return true;
292
		}
293 294 295 296 297 298 299 300

		count = old;
	}
}

static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
{
	struct task_struct *owner;
301
	bool ret = true;
302 303

	if (need_resched())
304
		return false;
305 306

	rcu_read_lock();
307
	owner = READ_ONCE(sem->owner);
308
	if (!owner) {
309
		long count = READ_ONCE(sem->count);
310 311 312 313 314 315 316 317 318 319
		/*
		 * If sem->owner is not set, yet we have just recently entered the
		 * slowpath with the lock being active, then there is a possibility
		 * reader(s) may have the lock. To be safe, bail spinning in these
		 * situations.
		 */
		if (count & RWSEM_ACTIVE_MASK)
			ret = false;
		goto done;
	}
320

321 322 323 324
	ret = owner->on_cpu;
done:
	rcu_read_unlock();
	return ret;
325 326 327 328 329
}

static noinline
bool rwsem_spin_on_owner(struct rw_semaphore *sem, struct task_struct *owner)
{
330 331
	long count;

332
	rcu_read_lock();
333 334 335 336 337 338 339 340 341 342 343
	while (sem->owner == owner) {
		/*
		 * Ensure we emit the owner->on_cpu, dereference _after_
		 * checking sem->owner still matches owner, if that fails,
		 * owner might point to free()d memory, if it still matches,
		 * the rcu_read_lock() ensures the memory stays valid.
		 */
		barrier();

		/* abort spinning when need_resched or owner is not running */
		if (!owner->on_cpu || need_resched()) {
344 345 346
			rcu_read_unlock();
			return false;
		}
347

348
		cpu_relax_lowlatency();
349 350 351
	}
	rcu_read_unlock();

352 353 354
	if (READ_ONCE(sem->owner))
		return true; /* new owner, continue spinning */

355
	/*
356 357 358
	 * When the owner is not set, the lock could be free or
	 * held by readers. Check the counter to verify the
	 * state.
359
	 */
360 361
	count = READ_ONCE(sem->count);
	return (count == 0 || count == RWSEM_WAITING_BIAS);
362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378
}

static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
{
	struct task_struct *owner;
	bool taken = false;

	preempt_disable();

	/* sem->wait_lock should not be held when doing optimistic spinning */
	if (!rwsem_can_spin_on_owner(sem))
		goto done;

	if (!osq_lock(&sem->osq))
		goto done;

	while (true) {
379
		owner = READ_ONCE(sem->owner);
380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403
		if (owner && !rwsem_spin_on_owner(sem, owner))
			break;

		/* wait_lock will be acquired if write_lock is obtained */
		if (rwsem_try_write_lock_unqueued(sem)) {
			taken = true;
			break;
		}

		/*
		 * When there's no owner, we might have preempted between the
		 * owner acquiring the lock and setting the owner field. If
		 * we're an RT task that will live-lock because we won't let
		 * the owner complete.
		 */
		if (!owner && (need_resched() || rt_task(current)))
			break;

		/*
		 * The cpu_relax() call is a compiler barrier which forces
		 * everything in this loop to be re-loaded. We don't need
		 * memory barriers as we'll eventually observe the right
		 * values at the cost of a few extra spins.
		 */
404
		cpu_relax_lowlatency();
405 406 407 408 409 410 411
	}
	osq_unlock(&sem->osq);
done:
	preempt_enable();
	return taken;
}

412 413 414 415 416 417 418 419
/*
 * Return true if the rwsem has active spinner
 */
static inline bool rwsem_has_spinner(struct rw_semaphore *sem)
{
	return osq_is_locked(&sem->osq);
}

420 421 422 423 424
#else
static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
{
	return false;
}
425 426 427 428 429

static inline bool rwsem_has_spinner(struct rw_semaphore *sem)
{
	return false;
}
430 431 432 433
#endif

/*
 * Wait until we successfully acquire the write lock
L
Linus Torvalds 已提交
434
 */
435
__visible
436
struct rw_semaphore __sched *rwsem_down_write_failed(struct rw_semaphore *sem)
L
Linus Torvalds 已提交
437
{
438 439
	long count;
	bool waiting = true; /* any queued threads before us */
440 441
	struct rwsem_waiter waiter;

442 443 444 445 446 447 448 449 450 451 452 453
	/* undo write bias from down_write operation, stop active locking */
	count = rwsem_atomic_update(-RWSEM_ACTIVE_WRITE_BIAS, sem);

	/* do optimistic spinning and steal lock if possible */
	if (rwsem_optimistic_spin(sem))
		return sem;

	/*
	 * Optimistic spinning failed, proceed to the slowpath
	 * and block until we can acquire the sem.
	 */
	waiter.task = current;
454
	waiter.type = RWSEM_WAITING_FOR_WRITE;
455 456

	raw_spin_lock_irq(&sem->wait_lock);
457 458

	/* account for this before adding a new element to the list */
459
	if (list_empty(&sem->wait_list))
460 461
		waiting = false;

462 463 464
	list_add_tail(&waiter.list, &sem->wait_list);

	/* we're now waiting on the lock, but no longer actively locking */
465
	if (waiting) {
466
		count = READ_ONCE(sem->count);
467

468
		/*
469 470 471
		 * If there were already threads queued before us and there are
		 * no active writers, the lock must be read owned; so we try to
		 * wake any read locks that were queued ahead of us.
472 473 474 475 476 477
		 */
		if (count > RWSEM_WAITING_BIAS)
			sem = __rwsem_do_wake(sem, RWSEM_WAKE_READERS);

	} else
		count = rwsem_atomic_update(RWSEM_WAITING_BIAS, sem);
478

479
	/* wait until we successfully acquire the lock */
480
	set_current_state(TASK_UNINTERRUPTIBLE);
481
	while (true) {
482 483
		if (rwsem_try_write_lock(count, sem))
			break;
484
		raw_spin_unlock_irq(&sem->wait_lock);
485 486 487 488

		/* Block until there are no active lockers. */
		do {
			schedule();
489
			set_current_state(TASK_UNINTERRUPTIBLE);
490
		} while ((count = sem->count) & RWSEM_ACTIVE_MASK);
491

492
		raw_spin_lock_irq(&sem->wait_lock);
493
	}
494
	__set_current_state(TASK_RUNNING);
495

496 497
	list_del(&waiter.list);
	raw_spin_unlock_irq(&sem->wait_lock);
498 499

	return sem;
L
Linus Torvalds 已提交
500
}
501
EXPORT_SYMBOL(rwsem_down_write_failed);
L
Linus Torvalds 已提交
502 503 504 505 506

/*
 * handle waking up a waiter on the semaphore
 * - up_read/up_write has decremented the active part of count if we come here
 */
507
__visible
508
struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
L
Linus Torvalds 已提交
509 510 511
{
	unsigned long flags;

512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541
	/*
	 * If a spinner is present, it is not necessary to do the wakeup.
	 * Try to do wakeup only if the trylock succeeds to minimize
	 * spinlock contention which may introduce too much delay in the
	 * unlock operation.
	 *
	 *    spinning writer		up_write/up_read caller
	 *    ---------------		-----------------------
	 * [S]   osq_unlock()		[L]   osq
	 *	 MB			      RMB
	 * [RmW] rwsem_try_write_lock() [RmW] spin_trylock(wait_lock)
	 *
	 * Here, it is important to make sure that there won't be a missed
	 * wakeup while the rwsem is free and the only spinning writer goes
	 * to sleep without taking the rwsem. Even when the spinning writer
	 * is just going to break out of the waiting loop, it will still do
	 * a trylock in rwsem_down_write_failed() before sleeping. IOW, if
	 * rwsem_has_spinner() is true, it will guarantee at least one
	 * trylock attempt on the rwsem later on.
	 */
	if (rwsem_has_spinner(sem)) {
		/*
		 * The smp_rmb() here is to make sure that the spinner
		 * state is consulted before reading the wait_lock.
		 */
		smp_rmb();
		if (!raw_spin_trylock_irqsave(&sem->wait_lock, flags))
			return sem;
		goto locked;
	}
542
	raw_spin_lock_irqsave(&sem->wait_lock, flags);
543
locked:
L
Linus Torvalds 已提交
544 545 546

	/* do nothing if list empty */
	if (!list_empty(&sem->wait_list))
547
		sem = __rwsem_do_wake(sem, RWSEM_WAKE_ANY);
L
Linus Torvalds 已提交
548

549
	raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
L
Linus Torvalds 已提交
550 551 552

	return sem;
}
553
EXPORT_SYMBOL(rwsem_wake);
L
Linus Torvalds 已提交
554 555 556 557 558 559

/*
 * downgrade a write lock into a read lock
 * - caller incremented waiting part of count and discovered it still negative
 * - just wake up any readers at the front of the queue
 */
560
__visible
561
struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
L
Linus Torvalds 已提交
562 563 564
{
	unsigned long flags;

565
	raw_spin_lock_irqsave(&sem->wait_lock, flags);
L
Linus Torvalds 已提交
566 567 568

	/* do nothing if list empty */
	if (!list_empty(&sem->wait_list))
569
		sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED);
L
Linus Torvalds 已提交
570

571
	raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
L
Linus Torvalds 已提交
572 573 574 575

	return sem;
}
EXPORT_SYMBOL(rwsem_downgrade_wake);