rwsem-xadd.c 19.5 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
L
Linus Torvalds 已提交
2 3 4 5
/* rwsem.c: R/W semaphores: contention handling functions
 *
 * Written by David Howells (dhowells@redhat.com).
 * Derived from arch/i386/kernel/semaphore.c
6 7
 *
 * Writer lock-stealing by Alex Shi <alex.shi@intel.com>
8
 * and Michel Lespinasse <walken@google.com>
9 10 11
 *
 * Optimistic spinning by Tim Chen <tim.c.chen@intel.com>
 * and Davidlohr Bueso <davidlohr@hp.com>. Based on mutexes.
L
Linus Torvalds 已提交
12 13 14
 */
#include <linux/rwsem.h>
#include <linux/init.h>
15
#include <linux/export.h>
16
#include <linux/sched/signal.h>
17
#include <linux/sched/rt.h>
18
#include <linux/sched/wake_q.h>
19
#include <linux/sched/debug.h>
20
#include <linux/osq_lock.h>
21

22
#include "rwsem.h"
L
Linus Torvalds 已提交
23

24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72
/*
 * Guide to the rw_semaphore's count field for common values.
 * (32-bit case illustrated, similar for 64-bit)
 *
 * 0x0000000X	(1) X readers active or attempting lock, no writer waiting
 *		    X = #active_readers + #readers attempting to lock
 *		    (X*ACTIVE_BIAS)
 *
 * 0x00000000	rwsem is unlocked, and no one is waiting for the lock or
 *		attempting to read lock or write lock.
 *
 * 0xffff000X	(1) X readers active or attempting lock, with waiters for lock
 *		    X = #active readers + # readers attempting lock
 *		    (X*ACTIVE_BIAS + WAITING_BIAS)
 *		(2) 1 writer attempting lock, no waiters for lock
 *		    X-1 = #active readers + #readers attempting lock
 *		    ((X-1)*ACTIVE_BIAS + ACTIVE_WRITE_BIAS)
 *		(3) 1 writer active, no waiters for lock
 *		    X-1 = #active readers + #readers attempting lock
 *		    ((X-1)*ACTIVE_BIAS + ACTIVE_WRITE_BIAS)
 *
 * 0xffff0001	(1) 1 reader active or attempting lock, waiters for lock
 *		    (WAITING_BIAS + ACTIVE_BIAS)
 *		(2) 1 writer active or attempting lock, no waiters for lock
 *		    (ACTIVE_WRITE_BIAS)
 *
 * 0xffff0000	(1) There are writers or readers queued but none active
 *		    or in the process of attempting lock.
 *		    (WAITING_BIAS)
 *		Note: writer can attempt to steal lock for this count by adding
 *		ACTIVE_WRITE_BIAS in cmpxchg and checking the old count
 *
 * 0xfffe0001	(1) 1 writer active, or attempting lock. Waiters on queue.
 *		    (ACTIVE_WRITE_BIAS + WAITING_BIAS)
 *
 * Note: Readers attempt to lock by adding ACTIVE_BIAS in down_read and checking
 *	 the count becomes more than 0 for successful lock acquisition,
 *	 i.e. the case where there are only readers or nobody has lock.
 *	 (1st and 2nd case above).
 *
 *	 Writers attempt to lock by adding ACTIVE_WRITE_BIAS in down_write and
 *	 checking the count becomes ACTIVE_WRITE_BIAS for successful lock
 *	 acquisition (i.e. nobody else has lock or attempts lock).  If
 *	 unsuccessful, in rwsem_down_write_failed, we'll check to see if there
 *	 are only waiters but none active (5th case above), and attempt to
 *	 steal the lock.
 *
 */

73 74 75 76 77 78 79 80 81 82 83
/*
 * Initialize an rwsem:
 */
void __init_rwsem(struct rw_semaphore *sem, const char *name,
		  struct lock_class_key *key)
{
#ifdef CONFIG_DEBUG_LOCK_ALLOC
	/*
	 * Make sure we are not reinitializing a held semaphore:
	 */
	debug_check_no_locks_freed((void *)sem, sizeof(*sem));
84
	lockdep_init_map(&sem->dep_map, name, key, 0);
85
#endif
86
	atomic_long_set(&sem->count, RWSEM_UNLOCKED_VALUE);
87
	raw_spin_lock_init(&sem->wait_lock);
88
	INIT_LIST_HEAD(&sem->wait_list);
89
#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
90
	sem->owner = NULL;
91
	osq_lock_init(&sem->osq);
92
#endif
93 94 95 96
}

EXPORT_SYMBOL(__init_rwsem);

97 98 99 100 101
enum rwsem_waiter_type {
	RWSEM_WAITING_FOR_WRITE,
	RWSEM_WAITING_FOR_READ
};

L
Linus Torvalds 已提交
102 103 104
struct rwsem_waiter {
	struct list_head list;
	struct task_struct *task;
105
	enum rwsem_waiter_type type;
L
Linus Torvalds 已提交
106 107
};

108 109 110 111 112
enum rwsem_wake_type {
	RWSEM_WAKE_ANY,		/* Wake whatever's at head of wait list */
	RWSEM_WAKE_READERS,	/* Wake readers only */
	RWSEM_WAKE_READ_OWNED	/* Waker thread holds the read lock */
};
113

L
Linus Torvalds 已提交
114 115 116 117 118
/*
 * handle the lock release when processes blocked on it that can now run
 * - if we come here from up_xxxx(), then:
 *   - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed)
 *   - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so)
119
 * - there must be someone on the queue
120 121 122 123
 * - the wait_lock must be held by the caller
 * - tasks are marked for wakeup, the caller must later invoke wake_up_q()
 *   to actually wakeup the blocked task(s) and drop the reference count,
 *   preferably when the wait_lock is released
L
Linus Torvalds 已提交
124
 * - woken process blocks are discarded from the list after having task zeroed
125
 * - writers are only marked woken if downgrading is false
L
Linus Torvalds 已提交
126
 */
127 128 129
static void __rwsem_mark_wake(struct rw_semaphore *sem,
			      enum rwsem_wake_type wake_type,
			      struct wake_q_head *wake_q)
L
Linus Torvalds 已提交
130
{
131 132
	struct rwsem_waiter *waiter, *tmp;
	long oldcount, woken = 0, adjustment = 0;
L
Linus Torvalds 已提交
133

134 135 136 137 138
	/*
	 * Take a peek at the queue head waiter such that we can determine
	 * the wakeup(s) to perform.
	 */
	waiter = list_first_entry(&sem->wait_list, struct rwsem_waiter, list);
139

140
	if (waiter->type == RWSEM_WAITING_FOR_WRITE) {
141 142 143 144 145 146 147
		if (wake_type == RWSEM_WAKE_ANY) {
			/*
			 * Mark writer at the front of the queue for wakeup.
			 * Until the task is actually later awoken later by
			 * the caller, other writers are able to steal it.
			 * Readers, on the other hand, will block as they
			 * will notice the queued writer.
148
			 */
149 150
			wake_q_add(wake_q, waiter->task);
		}
151 152

		return;
153
	}
L
Linus Torvalds 已提交
154

155 156
	/*
	 * Writers might steal the lock before we grant it to the next reader.
157 158
	 * We prefer to do the first reader grant before counting readers
	 * so we can bail out early if a writer stole the lock.
159
	 */
160 161 162
	if (wake_type != RWSEM_WAKE_READ_OWNED) {
		adjustment = RWSEM_ACTIVE_READ_BIAS;
 try_reader_grant:
163
		oldcount = atomic_long_fetch_add(adjustment, &sem->count);
164
		if (unlikely(oldcount < RWSEM_WAITING_BIAS)) {
165 166 167 168 169 170 171 172
			/*
			 * If the count is still less than RWSEM_WAITING_BIAS
			 * after removing the adjustment, it is assumed that
			 * a writer has stolen the lock. We have to undo our
			 * reader grant.
			 */
			if (atomic_long_add_return(-adjustment, &sem->count) <
			    RWSEM_WAITING_BIAS)
173 174
				return;

175 176 177
			/* Last active locker left. Retry waking readers. */
			goto try_reader_grant;
		}
178 179 180 181 182 183
		/*
		 * It is not really necessary to set it to reader-owned here,
		 * but it gives the spinners an early indication that the
		 * readers now have the lock.
		 */
		rwsem_set_reader_owned(sem);
184
	}
L
Linus Torvalds 已提交
185

186 187
	/*
	 * Grant an infinite number of read locks to the readers at the front
188 189 190
	 * of the queue. We know that woken will be at least 1 as we accounted
	 * for above. Note we increment the 'active part' of the count by the
	 * number of readers before waking any processes up.
L
Linus Torvalds 已提交
191
	 */
192 193
	list_for_each_entry_safe(waiter, tmp, &sem->wait_list, list) {
		struct task_struct *tsk;
L
Linus Torvalds 已提交
194

195
		if (waiter->type == RWSEM_WAITING_FOR_WRITE)
L
Linus Torvalds 已提交
196 197
			break;

198
		woken++;
L
Linus Torvalds 已提交
199
		tsk = waiter->task;
200 201

		wake_q_add(wake_q, tsk);
202
		list_del(&waiter->list);
203
		/*
204 205 206 207
		 * Ensure that the last operation is setting the reader
		 * waiter to nil such that rwsem_down_read_failed() cannot
		 * race with do_exit() by always holding a reference count
		 * to the task to wakeup.
208
		 */
209
		smp_store_release(&waiter->task, NULL);
210
	}
L
Linus Torvalds 已提交
211

212 213 214 215 216 217 218 219
	adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment;
	if (list_empty(&sem->wait_list)) {
		/* hit end of list above */
		adjustment -= RWSEM_WAITING_BIAS;
	}

	if (adjustment)
		atomic_long_add(adjustment, &sem->count);
220 221
}

L
Linus Torvalds 已提交
222
/*
223
 * Wait for the read lock to be granted
L
Linus Torvalds 已提交
224
 */
225 226
static inline struct rw_semaphore __sched *
__rwsem_down_read_failed_common(struct rw_semaphore *sem, int state)
L
Linus Torvalds 已提交
227
{
228
	long count, adjustment = -RWSEM_ACTIVE_READ_BIAS;
229
	struct rwsem_waiter waiter;
230
	DEFINE_WAKE_Q(wake_q);
L
Linus Torvalds 已提交
231

232
	waiter.task = current;
233
	waiter.type = RWSEM_WAITING_FOR_READ;
L
Linus Torvalds 已提交
234

235
	raw_spin_lock_irq(&sem->wait_lock);
236 237
	if (list_empty(&sem->wait_list))
		adjustment += RWSEM_WAITING_BIAS;
238
	list_add_tail(&waiter.list, &sem->wait_list);
L
Linus Torvalds 已提交
239

240
	/* we're now waiting on the lock, but no longer actively locking */
241
	count = atomic_long_add_return(adjustment, &sem->count);
L
Linus Torvalds 已提交
242

243 244
	/*
	 * If there are no active locks, wake the front queued process(es).
245 246 247 248 249 250 251
	 *
	 * If there are no writers and we are first in the queue,
	 * wake our own waiter to join the existing active readers !
	 */
	if (count == RWSEM_WAITING_BIAS ||
	    (count > RWSEM_WAITING_BIAS &&
	     adjustment != -RWSEM_ACTIVE_READ_BIAS))
252
		__rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
L
Linus Torvalds 已提交
253

254
	raw_spin_unlock_irq(&sem->wait_lock);
255
	wake_up_q(&wake_q);
L
Linus Torvalds 已提交
256 257

	/* wait to be given the lock */
258
	while (true) {
259
		set_current_state(state);
260
		if (!waiter.task)
L
Linus Torvalds 已提交
261
			break;
262 263 264 265 266 267 268
		if (signal_pending_state(state, current)) {
			raw_spin_lock_irq(&sem->wait_lock);
			if (waiter.task)
				goto out_nolock;
			raw_spin_unlock_irq(&sem->wait_lock);
			break;
		}
L
Linus Torvalds 已提交
269 270 271
		schedule();
	}

272
	__set_current_state(TASK_RUNNING);
L
Linus Torvalds 已提交
273
	return sem;
274 275 276 277 278 279 280 281 282 283 284 285 286
out_nolock:
	list_del(&waiter.list);
	if (list_empty(&sem->wait_list))
		atomic_long_add(-RWSEM_WAITING_BIAS, &sem->count);
	raw_spin_unlock_irq(&sem->wait_lock);
	__set_current_state(TASK_RUNNING);
	return ERR_PTR(-EINTR);
}

__visible struct rw_semaphore * __sched
rwsem_down_read_failed(struct rw_semaphore *sem)
{
	return __rwsem_down_read_failed_common(sem, TASK_UNINTERRUPTIBLE);
L
Linus Torvalds 已提交
287
}
288
EXPORT_SYMBOL(rwsem_down_read_failed);
L
Linus Torvalds 已提交
289

290 291 292 293 294 295 296
__visible struct rw_semaphore * __sched
rwsem_down_read_failed_killable(struct rw_semaphore *sem)
{
	return __rwsem_down_read_failed_common(sem, TASK_KILLABLE);
}
EXPORT_SYMBOL(rwsem_down_read_failed_killable);

297 298 299 300 301
/*
 * This function must be called with the sem->wait_lock held to prevent
 * race conditions between checking the rwsem wait list and setting the
 * sem->count accordingly.
 */
302 303
static inline bool rwsem_try_write_lock(long count, struct rw_semaphore *sem)
{
304
	/*
305
	 * Avoid trying to acquire write lock if count isn't RWSEM_WAITING_BIAS.
306
	 */
307 308 309 310 311 312 313 314 315 316 317
	if (count != RWSEM_WAITING_BIAS)
		return false;

	/*
	 * Acquire the lock by trying to set it to ACTIVE_WRITE_BIAS. If there
	 * are other tasks on the wait list, we need to add on WAITING_BIAS.
	 */
	count = list_is_singular(&sem->wait_list) ?
			RWSEM_ACTIVE_WRITE_BIAS :
			RWSEM_ACTIVE_WRITE_BIAS + RWSEM_WAITING_BIAS;

318 319
	if (atomic_long_cmpxchg_acquire(&sem->count, RWSEM_WAITING_BIAS, count)
							== RWSEM_WAITING_BIAS) {
320
		rwsem_set_owner(sem);
321
		return true;
322
	}
323

324 325 326
	return false;
}

327
#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
L
Linus Torvalds 已提交
328
/*
329 330 331 332
 * Try to acquire write lock before the writer has been put on wait queue.
 */
static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem)
{
333
	long old, count = atomic_long_read(&sem->count);
334 335 336 337 338

	while (true) {
		if (!(count == 0 || count == RWSEM_WAITING_BIAS))
			return false;

339
		old = atomic_long_cmpxchg_acquire(&sem->count, count,
340
				      count + RWSEM_ACTIVE_WRITE_BIAS);
341 342
		if (old == count) {
			rwsem_set_owner(sem);
343
			return true;
344
		}
345 346 347 348 349 350 351 352

		count = old;
	}
}

static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
{
	struct task_struct *owner;
353
	bool ret = true;
354 355

	if (need_resched())
356
		return false;
357 358

	rcu_read_lock();
359
	owner = READ_ONCE(sem->owner);
360
	if (!rwsem_owner_is_writer(owner)) {
361
		/*
362
		 * Don't spin if the rwsem is readers owned.
363
		 */
364
		ret = !rwsem_owner_is_reader(owner);
365 366
		goto done;
	}
367

368 369 370 371 372
	/*
	 * As lock holder preemption issue, we both skip spinning if task is not
	 * on cpu or its cpu is preempted
	 */
	ret = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
373 374 375
done:
	rcu_read_unlock();
	return ret;
376 377
}

378 379 380 381
/*
 * Return true only if we can still spin on the owner field of the rwsem.
 */
static noinline bool rwsem_spin_on_owner(struct rw_semaphore *sem)
382
{
383 384 385 386 387
	struct task_struct *owner = READ_ONCE(sem->owner);

	if (!rwsem_owner_is_writer(owner))
		goto out;

388
	rcu_read_lock();
389 390 391 392 393 394 395 396 397
	while (sem->owner == owner) {
		/*
		 * Ensure we emit the owner->on_cpu, dereference _after_
		 * checking sem->owner still matches owner, if that fails,
		 * owner might point to free()d memory, if it still matches,
		 * the rcu_read_lock() ensures the memory stays valid.
		 */
		barrier();

398 399 400 401 402 403
		/*
		 * abort spinning when need_resched or owner is not running or
		 * owner's cpu is preempted.
		 */
		if (!owner->on_cpu || need_resched() ||
				vcpu_is_preempted(task_cpu(owner))) {
404 405 406
			rcu_read_unlock();
			return false;
		}
407

408
		cpu_relax();
409 410
	}
	rcu_read_unlock();
411
out:
412
	/*
413 414
	 * If there is a new owner or the owner is not set, we continue
	 * spinning.
415
	 */
416
	return !rwsem_owner_is_reader(READ_ONCE(sem->owner));
417 418 419 420 421 422 423 424 425 426 427 428 429 430 431
}

static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
{
	bool taken = false;

	preempt_disable();

	/* sem->wait_lock should not be held when doing optimistic spinning */
	if (!rwsem_can_spin_on_owner(sem))
		goto done;

	if (!osq_lock(&sem->osq))
		goto done;

432 433 434 435 436 437 438 439
	/*
	 * Optimistically spin on the owner field and attempt to acquire the
	 * lock whenever the owner changes. Spinning will be stopped when:
	 *  1) the owning writer isn't running; or
	 *  2) readers own the lock as we can't determine if they are
	 *     actively running or not.
	 */
	while (rwsem_spin_on_owner(sem)) {
440
		/*
441
		 * Try to acquire the lock
442
		 */
443 444 445 446 447 448 449 450 451 452 453
		if (rwsem_try_write_lock_unqueued(sem)) {
			taken = true;
			break;
		}

		/*
		 * When there's no owner, we might have preempted between the
		 * owner acquiring the lock and setting the owner field. If
		 * we're an RT task that will live-lock because we won't let
		 * the owner complete.
		 */
454
		if (!sem->owner && (need_resched() || rt_task(current)))
455 456 457 458 459 460 461 462
			break;

		/*
		 * The cpu_relax() call is a compiler barrier which forces
		 * everything in this loop to be re-loaded. We don't need
		 * memory barriers as we'll eventually observe the right
		 * values at the cost of a few extra spins.
		 */
463
		cpu_relax();
464 465 466 467 468 469 470
	}
	osq_unlock(&sem->osq);
done:
	preempt_enable();
	return taken;
}

471 472 473 474 475 476 477 478
/*
 * Return true if the rwsem has active spinner
 */
static inline bool rwsem_has_spinner(struct rw_semaphore *sem)
{
	return osq_is_locked(&sem->osq);
}

479 480 481 482 483
#else
static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
{
	return false;
}
484 485 486 487 488

static inline bool rwsem_has_spinner(struct rw_semaphore *sem)
{
	return false;
}
489 490 491 492
#endif

/*
 * Wait until we successfully acquire the write lock
L
Linus Torvalds 已提交
493
 */
494 495
static inline struct rw_semaphore *
__rwsem_down_write_failed_common(struct rw_semaphore *sem, int state)
L
Linus Torvalds 已提交
496
{
497 498
	long count;
	bool waiting = true; /* any queued threads before us */
499
	struct rwsem_waiter waiter;
500
	struct rw_semaphore *ret = sem;
501
	DEFINE_WAKE_Q(wake_q);
502

503
	/* undo write bias from down_write operation, stop active locking */
504
	count = atomic_long_sub_return(RWSEM_ACTIVE_WRITE_BIAS, &sem->count);
505 506 507 508 509 510 511 512 513 514

	/* do optimistic spinning and steal lock if possible */
	if (rwsem_optimistic_spin(sem))
		return sem;

	/*
	 * Optimistic spinning failed, proceed to the slowpath
	 * and block until we can acquire the sem.
	 */
	waiter.task = current;
515
	waiter.type = RWSEM_WAITING_FOR_WRITE;
516 517

	raw_spin_lock_irq(&sem->wait_lock);
518 519

	/* account for this before adding a new element to the list */
520
	if (list_empty(&sem->wait_list))
521 522
		waiting = false;

523 524 525
	list_add_tail(&waiter.list, &sem->wait_list);

	/* we're now waiting on the lock, but no longer actively locking */
526
	if (waiting) {
527
		count = atomic_long_read(&sem->count);
528

529
		/*
530 531 532
		 * If there were already threads queued before us and there are
		 * no active writers, the lock must be read owned; so we try to
		 * wake any read locks that were queued ahead of us.
533
		 */
534
		if (count > RWSEM_WAITING_BIAS) {
535
			__rwsem_mark_wake(sem, RWSEM_WAKE_READERS, &wake_q);
536 537 538 539 540 541 542 543
			/*
			 * The wakeup is normally called _after_ the wait_lock
			 * is released, but given that we are proactively waking
			 * readers we can deal with the wake_q overhead as it is
			 * similar to releasing and taking the wait_lock again
			 * for attempting rwsem_try_write_lock().
			 */
			wake_up_q(&wake_q);
544 545 546 547 548

			/*
			 * Reinitialize wake_q after use.
			 */
			wake_q_init(&wake_q);
549
		}
550 551

	} else
552
		count = atomic_long_add_return(RWSEM_WAITING_BIAS, &sem->count);
553

554
	/* wait until we successfully acquire the lock */
555
	set_current_state(state);
556
	while (true) {
557 558
		if (rwsem_try_write_lock(count, sem))
			break;
559
		raw_spin_unlock_irq(&sem->wait_lock);
560 561 562

		/* Block until there are no active lockers. */
		do {
563 564 565
			if (signal_pending_state(state, current))
				goto out_nolock;

566
			schedule();
567
			set_current_state(state);
568
		} while ((count = atomic_long_read(&sem->count)) & RWSEM_ACTIVE_MASK);
569

570
		raw_spin_lock_irq(&sem->wait_lock);
571
	}
572
	__set_current_state(TASK_RUNNING);
573 574
	list_del(&waiter.list);
	raw_spin_unlock_irq(&sem->wait_lock);
575

576
	return ret;
577 578 579 580 581 582

out_nolock:
	__set_current_state(TASK_RUNNING);
	raw_spin_lock_irq(&sem->wait_lock);
	list_del(&waiter.list);
	if (list_empty(&sem->wait_list))
583
		atomic_long_add(-RWSEM_WAITING_BIAS, &sem->count);
584
	else
585
		__rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
586
	raw_spin_unlock_irq(&sem->wait_lock);
587
	wake_up_q(&wake_q);
588 589

	return ERR_PTR(-EINTR);
590 591 592 593 594 595
}

__visible struct rw_semaphore * __sched
rwsem_down_write_failed(struct rw_semaphore *sem)
{
	return __rwsem_down_write_failed_common(sem, TASK_UNINTERRUPTIBLE);
L
Linus Torvalds 已提交
596
}
597
EXPORT_SYMBOL(rwsem_down_write_failed);
L
Linus Torvalds 已提交
598

599 600 601 602 603 604 605
__visible struct rw_semaphore * __sched
rwsem_down_write_failed_killable(struct rw_semaphore *sem)
{
	return __rwsem_down_write_failed_common(sem, TASK_KILLABLE);
}
EXPORT_SYMBOL(rwsem_down_write_failed_killable);

L
Linus Torvalds 已提交
606 607 608 609
/*
 * handle waking up a waiter on the semaphore
 * - up_read/up_write has decremented the active part of count if we come here
 */
610
__visible
611
struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
L
Linus Torvalds 已提交
612 613
{
	unsigned long flags;
614
	DEFINE_WAKE_Q(wake_q);
L
Linus Torvalds 已提交
615

616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642
	/*
	* __rwsem_down_write_failed_common(sem)
	*   rwsem_optimistic_spin(sem)
	*     osq_unlock(sem->osq)
	*   ...
	*   atomic_long_add_return(&sem->count)
	*
	*      - VS -
	*
	*              __up_write()
	*                if (atomic_long_sub_return_release(&sem->count) < 0)
	*                  rwsem_wake(sem)
	*                    osq_is_locked(&sem->osq)
	*
	* And __up_write() must observe !osq_is_locked() when it observes the
	* atomic_long_add_return() in order to not miss a wakeup.
	*
	* This boils down to:
	*
	* [S.rel] X = 1                [RmW] r0 = (Y += 0)
	*         MB                         RMB
	* [RmW]   Y += 1               [L]   r1 = X
	*
	* exists (r0=1 /\ r1=0)
	*/
	smp_rmb();

643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672
	/*
	 * If a spinner is present, it is not necessary to do the wakeup.
	 * Try to do wakeup only if the trylock succeeds to minimize
	 * spinlock contention which may introduce too much delay in the
	 * unlock operation.
	 *
	 *    spinning writer		up_write/up_read caller
	 *    ---------------		-----------------------
	 * [S]   osq_unlock()		[L]   osq
	 *	 MB			      RMB
	 * [RmW] rwsem_try_write_lock() [RmW] spin_trylock(wait_lock)
	 *
	 * Here, it is important to make sure that there won't be a missed
	 * wakeup while the rwsem is free and the only spinning writer goes
	 * to sleep without taking the rwsem. Even when the spinning writer
	 * is just going to break out of the waiting loop, it will still do
	 * a trylock in rwsem_down_write_failed() before sleeping. IOW, if
	 * rwsem_has_spinner() is true, it will guarantee at least one
	 * trylock attempt on the rwsem later on.
	 */
	if (rwsem_has_spinner(sem)) {
		/*
		 * The smp_rmb() here is to make sure that the spinner
		 * state is consulted before reading the wait_lock.
		 */
		smp_rmb();
		if (!raw_spin_trylock_irqsave(&sem->wait_lock, flags))
			return sem;
		goto locked;
	}
673
	raw_spin_lock_irqsave(&sem->wait_lock, flags);
674
locked:
L
Linus Torvalds 已提交
675 676

	if (!list_empty(&sem->wait_list))
677
		__rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
L
Linus Torvalds 已提交
678

679
	raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
680
	wake_up_q(&wake_q);
L
Linus Torvalds 已提交
681 682 683

	return sem;
}
684
EXPORT_SYMBOL(rwsem_wake);
L
Linus Torvalds 已提交
685 686 687 688 689 690

/*
 * downgrade a write lock into a read lock
 * - caller incremented waiting part of count and discovered it still negative
 * - just wake up any readers at the front of the queue
 */
691
__visible
692
struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
L
Linus Torvalds 已提交
693 694
{
	unsigned long flags;
695
	DEFINE_WAKE_Q(wake_q);
L
Linus Torvalds 已提交
696

697
	raw_spin_lock_irqsave(&sem->wait_lock, flags);
L
Linus Torvalds 已提交
698 699

	if (!list_empty(&sem->wait_list))
700
		__rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED, &wake_q);
L
Linus Torvalds 已提交
701

702
	raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
703
	wake_up_q(&wake_q);
L
Linus Torvalds 已提交
704 705 706 707

	return sem;
}
EXPORT_SYMBOL(rwsem_downgrade_wake);