rwsem-xadd.c 17.5 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4
/* rwsem.c: R/W semaphores: contention handling functions
 *
 * Written by David Howells (dhowells@redhat.com).
 * Derived from arch/i386/kernel/semaphore.c
5 6
 *
 * Writer lock-stealing by Alex Shi <alex.shi@intel.com>
7
 * and Michel Lespinasse <walken@google.com>
8 9 10
 *
 * Optimistic spinning by Tim Chen <tim.c.chen@intel.com>
 * and Davidlohr Bueso <davidlohr@hp.com>. Based on mutexes.
L
Linus Torvalds 已提交
11 12 13 14
 */
#include <linux/rwsem.h>
#include <linux/sched.h>
#include <linux/init.h>
15
#include <linux/export.h>
16
#include <linux/sched/rt.h>
17
#include <linux/osq_lock.h>
18

19
#include "rwsem.h"
L
Linus Torvalds 已提交
20

21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69
/*
 * Guide to the rw_semaphore's count field for common values.
 * (32-bit case illustrated, similar for 64-bit)
 *
 * 0x0000000X	(1) X readers active or attempting lock, no writer waiting
 *		    X = #active_readers + #readers attempting to lock
 *		    (X*ACTIVE_BIAS)
 *
 * 0x00000000	rwsem is unlocked, and no one is waiting for the lock or
 *		attempting to read lock or write lock.
 *
 * 0xffff000X	(1) X readers active or attempting lock, with waiters for lock
 *		    X = #active readers + # readers attempting lock
 *		    (X*ACTIVE_BIAS + WAITING_BIAS)
 *		(2) 1 writer attempting lock, no waiters for lock
 *		    X-1 = #active readers + #readers attempting lock
 *		    ((X-1)*ACTIVE_BIAS + ACTIVE_WRITE_BIAS)
 *		(3) 1 writer active, no waiters for lock
 *		    X-1 = #active readers + #readers attempting lock
 *		    ((X-1)*ACTIVE_BIAS + ACTIVE_WRITE_BIAS)
 *
 * 0xffff0001	(1) 1 reader active or attempting lock, waiters for lock
 *		    (WAITING_BIAS + ACTIVE_BIAS)
 *		(2) 1 writer active or attempting lock, no waiters for lock
 *		    (ACTIVE_WRITE_BIAS)
 *
 * 0xffff0000	(1) There are writers or readers queued but none active
 *		    or in the process of attempting lock.
 *		    (WAITING_BIAS)
 *		Note: writer can attempt to steal lock for this count by adding
 *		ACTIVE_WRITE_BIAS in cmpxchg and checking the old count
 *
 * 0xfffe0001	(1) 1 writer active, or attempting lock. Waiters on queue.
 *		    (ACTIVE_WRITE_BIAS + WAITING_BIAS)
 *
 * Note: Readers attempt to lock by adding ACTIVE_BIAS in down_read and checking
 *	 the count becomes more than 0 for successful lock acquisition,
 *	 i.e. the case where there are only readers or nobody has lock.
 *	 (1st and 2nd case above).
 *
 *	 Writers attempt to lock by adding ACTIVE_WRITE_BIAS in down_write and
 *	 checking the count becomes ACTIVE_WRITE_BIAS for successful lock
 *	 acquisition (i.e. nobody else has lock or attempts lock).  If
 *	 unsuccessful, in rwsem_down_write_failed, we'll check to see if there
 *	 are only waiters but none active (5th case above), and attempt to
 *	 steal the lock.
 *
 */

70 71 72 73 74 75 76 77 78 79 80
/*
 * Initialize an rwsem:
 */
void __init_rwsem(struct rw_semaphore *sem, const char *name,
		  struct lock_class_key *key)
{
#ifdef CONFIG_DEBUG_LOCK_ALLOC
	/*
	 * Make sure we are not reinitializing a held semaphore:
	 */
	debug_check_no_locks_freed((void *)sem, sizeof(*sem));
81
	lockdep_init_map(&sem->dep_map, name, key, 0);
82 83
#endif
	sem->count = RWSEM_UNLOCKED_VALUE;
84
	raw_spin_lock_init(&sem->wait_lock);
85
	INIT_LIST_HEAD(&sem->wait_list);
86
#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
87
	sem->owner = NULL;
88
	osq_lock_init(&sem->osq);
89
#endif
90 91 92 93
}

EXPORT_SYMBOL(__init_rwsem);

94 95 96 97 98
enum rwsem_waiter_type {
	RWSEM_WAITING_FOR_WRITE,
	RWSEM_WAITING_FOR_READ
};

L
Linus Torvalds 已提交
99 100 101
struct rwsem_waiter {
	struct list_head list;
	struct task_struct *task;
102
	enum rwsem_waiter_type type;
L
Linus Torvalds 已提交
103 104
};

105 106 107 108 109
enum rwsem_wake_type {
	RWSEM_WAKE_ANY,		/* Wake whatever's at head of wait list */
	RWSEM_WAKE_READERS,	/* Wake readers only */
	RWSEM_WAKE_READ_OWNED	/* Waker thread holds the read lock */
};
110

L
Linus Torvalds 已提交
111 112 113 114 115
/*
 * handle the lock release when processes blocked on it that can now run
 * - if we come here from up_xxxx(), then:
 *   - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed)
 *   - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so)
116
 * - there must be someone on the queue
117 118 119 120
 * - the wait_lock must be held by the caller
 * - tasks are marked for wakeup, the caller must later invoke wake_up_q()
 *   to actually wakeup the blocked task(s) and drop the reference count,
 *   preferably when the wait_lock is released
L
Linus Torvalds 已提交
121
 * - woken process blocks are discarded from the list after having task zeroed
122
 * - writers are only marked woken if downgrading is false
L
Linus Torvalds 已提交
123
 */
124
static struct rw_semaphore *
125 126
__rwsem_mark_wake(struct rw_semaphore *sem,
		  enum rwsem_wake_type wake_type, struct wake_q_head *wake_q)
L
Linus Torvalds 已提交
127 128 129 130
{
	struct rwsem_waiter *waiter;
	struct task_struct *tsk;
	struct list_head *next;
131
	long oldcount, woken, loop, adjustment;
L
Linus Torvalds 已提交
132

133
	waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
134
	if (waiter->type == RWSEM_WAITING_FOR_WRITE) {
135 136 137 138 139 140 141
		if (wake_type == RWSEM_WAKE_ANY) {
			/*
			 * Mark writer at the front of the queue for wakeup.
			 * Until the task is actually later awoken later by
			 * the caller, other writers are able to steal it.
			 * Readers, on the other hand, will block as they
			 * will notice the queued writer.
142
			 */
143 144
			wake_q_add(wake_q, waiter->task);
		}
145
		goto out;
146
	}
L
Linus Torvalds 已提交
147

148 149 150
	/* Writers might steal the lock before we grant it to the next reader.
	 * We prefer to do the first reader grant before counting readers
	 * so we can bail out early if a writer stole the lock.
151
	 */
152 153 154 155 156 157 158 159 160 161 162 163 164 165
	adjustment = 0;
	if (wake_type != RWSEM_WAKE_READ_OWNED) {
		adjustment = RWSEM_ACTIVE_READ_BIAS;
 try_reader_grant:
		oldcount = rwsem_atomic_update(adjustment, sem) - adjustment;
		if (unlikely(oldcount < RWSEM_WAITING_BIAS)) {
			/* A writer stole the lock. Undo our reader grant. */
			if (rwsem_atomic_update(-adjustment, sem) &
						RWSEM_ACTIVE_MASK)
				goto out;
			/* Last active locker left. Retry waking readers. */
			goto try_reader_grant;
		}
	}
L
Linus Torvalds 已提交
166

167 168 169
	/* Grant an infinite number of read locks to the readers at the front
	 * of the queue.  Note we increment the 'active part' of the count by
	 * the number of readers before waking any processes up.
L
Linus Torvalds 已提交
170 171 172 173 174 175 176 177 178 179 180
	 */
	woken = 0;
	do {
		woken++;

		if (waiter->list.next == &sem->wait_list)
			break;

		waiter = list_entry(waiter->list.next,
					struct rwsem_waiter, list);

181
	} while (waiter->type != RWSEM_WAITING_FOR_WRITE);
L
Linus Torvalds 已提交
182

183
	adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment;
184
	if (waiter->type != RWSEM_WAITING_FOR_WRITE)
185 186
		/* hit end of list above */
		adjustment -= RWSEM_WAITING_BIAS;
L
Linus Torvalds 已提交
187

188 189
	if (adjustment)
		rwsem_atomic_add(adjustment, sem);
L
Linus Torvalds 已提交
190 191

	next = sem->wait_list.next;
192 193
	loop = woken;
	do {
L
Linus Torvalds 已提交
194 195 196
		waiter = list_entry(next, struct rwsem_waiter, list);
		next = waiter->list.next;
		tsk = waiter->task;
197 198

		wake_q_add(wake_q, tsk);
199
		/*
200 201 202 203
		 * Ensure that the last operation is setting the reader
		 * waiter to nil such that rwsem_down_read_failed() cannot
		 * race with do_exit() by always holding a reference count
		 * to the task to wakeup.
204
		 */
205
		smp_store_release(&waiter->task, NULL);
206
	} while (--loop);
L
Linus Torvalds 已提交
207 208 209 210 211 212

	sem->wait_list.next = next;
	next->prev = &sem->wait_list;

 out:
	return sem;
213 214
}

L
Linus Torvalds 已提交
215
/*
216
 * Wait for the read lock to be granted
L
Linus Torvalds 已提交
217
 */
218
__visible
219
struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem)
L
Linus Torvalds 已提交
220
{
221
	long count, adjustment = -RWSEM_ACTIVE_READ_BIAS;
222
	struct rwsem_waiter waiter;
L
Linus Torvalds 已提交
223
	struct task_struct *tsk = current;
224
	WAKE_Q(wake_q);
L
Linus Torvalds 已提交
225 226

	/* set up my own style of waitqueue */
227
	waiter.task = tsk;
228
	waiter.type = RWSEM_WAITING_FOR_READ;
L
Linus Torvalds 已提交
229

230
	raw_spin_lock_irq(&sem->wait_lock);
231 232
	if (list_empty(&sem->wait_list))
		adjustment += RWSEM_WAITING_BIAS;
233
	list_add_tail(&waiter.list, &sem->wait_list);
L
Linus Torvalds 已提交
234

235
	/* we're now waiting on the lock, but no longer actively locking */
L
Linus Torvalds 已提交
236 237
	count = rwsem_atomic_update(adjustment, sem);

238 239 240 241 242 243 244 245
	/* If there are no active locks, wake the front queued process(es).
	 *
	 * If there are no writers and we are first in the queue,
	 * wake our own waiter to join the existing active readers !
	 */
	if (count == RWSEM_WAITING_BIAS ||
	    (count > RWSEM_WAITING_BIAS &&
	     adjustment != -RWSEM_ACTIVE_READ_BIAS))
246
		sem = __rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
L
Linus Torvalds 已提交
247

248
	raw_spin_unlock_irq(&sem->wait_lock);
249
	wake_up_q(&wake_q);
L
Linus Torvalds 已提交
250 251

	/* wait to be given the lock */
252 253
	while (true) {
		set_task_state(tsk, TASK_UNINTERRUPTIBLE);
254
		if (!waiter.task)
L
Linus Torvalds 已提交
255 256 257 258
			break;
		schedule();
	}

259
	__set_task_state(tsk, TASK_RUNNING);
L
Linus Torvalds 已提交
260 261
	return sem;
}
262
EXPORT_SYMBOL(rwsem_down_read_failed);
L
Linus Torvalds 已提交
263

264 265 266 267 268
/*
 * This function must be called with the sem->wait_lock held to prevent
 * race conditions between checking the rwsem wait list and setting the
 * sem->count accordingly.
 */
269 270
static inline bool rwsem_try_write_lock(long count, struct rw_semaphore *sem)
{
271
	/*
272
	 * Avoid trying to acquire write lock if count isn't RWSEM_WAITING_BIAS.
273
	 */
274 275 276 277 278 279 280 281 282 283 284 285
	if (count != RWSEM_WAITING_BIAS)
		return false;

	/*
	 * Acquire the lock by trying to set it to ACTIVE_WRITE_BIAS. If there
	 * are other tasks on the wait list, we need to add on WAITING_BIAS.
	 */
	count = list_is_singular(&sem->wait_list) ?
			RWSEM_ACTIVE_WRITE_BIAS :
			RWSEM_ACTIVE_WRITE_BIAS + RWSEM_WAITING_BIAS;

	if (cmpxchg_acquire(&sem->count, RWSEM_WAITING_BIAS, count) == RWSEM_WAITING_BIAS) {
286
		rwsem_set_owner(sem);
287
		return true;
288
	}
289

290 291 292
	return false;
}

293
#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
L
Linus Torvalds 已提交
294
/*
295 296 297 298
 * Try to acquire write lock before the writer has been put on wait queue.
 */
static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem)
{
299
	long old, count = READ_ONCE(sem->count);
300 301 302 303 304

	while (true) {
		if (!(count == 0 || count == RWSEM_WAITING_BIAS))
			return false;

305 306
		old = cmpxchg_acquire(&sem->count, count,
				      count + RWSEM_ACTIVE_WRITE_BIAS);
307 308
		if (old == count) {
			rwsem_set_owner(sem);
309
			return true;
310
		}
311 312 313 314 315 316 317 318

		count = old;
	}
}

static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
{
	struct task_struct *owner;
319
	bool ret = true;
320 321

	if (need_resched())
322
		return false;
323 324

	rcu_read_lock();
325
	owner = READ_ONCE(sem->owner);
326
	if (!owner) {
327
		long count = READ_ONCE(sem->count);
328 329 330 331 332 333 334 335 336 337
		/*
		 * If sem->owner is not set, yet we have just recently entered the
		 * slowpath with the lock being active, then there is a possibility
		 * reader(s) may have the lock. To be safe, bail spinning in these
		 * situations.
		 */
		if (count & RWSEM_ACTIVE_MASK)
			ret = false;
		goto done;
	}
338

339 340 341 342
	ret = owner->on_cpu;
done:
	rcu_read_unlock();
	return ret;
343 344 345 346 347
}

static noinline
bool rwsem_spin_on_owner(struct rw_semaphore *sem, struct task_struct *owner)
{
348 349
	long count;

350
	rcu_read_lock();
351 352 353 354 355 356 357 358 359 360 361
	while (sem->owner == owner) {
		/*
		 * Ensure we emit the owner->on_cpu, dereference _after_
		 * checking sem->owner still matches owner, if that fails,
		 * owner might point to free()d memory, if it still matches,
		 * the rcu_read_lock() ensures the memory stays valid.
		 */
		barrier();

		/* abort spinning when need_resched or owner is not running */
		if (!owner->on_cpu || need_resched()) {
362 363 364
			rcu_read_unlock();
			return false;
		}
365

366
		cpu_relax_lowlatency();
367 368 369
	}
	rcu_read_unlock();

370 371 372
	if (READ_ONCE(sem->owner))
		return true; /* new owner, continue spinning */

373
	/*
374 375 376
	 * When the owner is not set, the lock could be free or
	 * held by readers. Check the counter to verify the
	 * state.
377
	 */
378 379
	count = READ_ONCE(sem->count);
	return (count == 0 || count == RWSEM_WAITING_BIAS);
380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396
}

static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
{
	struct task_struct *owner;
	bool taken = false;

	preempt_disable();

	/* sem->wait_lock should not be held when doing optimistic spinning */
	if (!rwsem_can_spin_on_owner(sem))
		goto done;

	if (!osq_lock(&sem->osq))
		goto done;

	while (true) {
397
		owner = READ_ONCE(sem->owner);
398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421
		if (owner && !rwsem_spin_on_owner(sem, owner))
			break;

		/* wait_lock will be acquired if write_lock is obtained */
		if (rwsem_try_write_lock_unqueued(sem)) {
			taken = true;
			break;
		}

		/*
		 * When there's no owner, we might have preempted between the
		 * owner acquiring the lock and setting the owner field. If
		 * we're an RT task that will live-lock because we won't let
		 * the owner complete.
		 */
		if (!owner && (need_resched() || rt_task(current)))
			break;

		/*
		 * The cpu_relax() call is a compiler barrier which forces
		 * everything in this loop to be re-loaded. We don't need
		 * memory barriers as we'll eventually observe the right
		 * values at the cost of a few extra spins.
		 */
422
		cpu_relax_lowlatency();
423 424 425 426 427 428 429
	}
	osq_unlock(&sem->osq);
done:
	preempt_enable();
	return taken;
}

430 431 432 433 434 435 436 437
/*
 * Return true if the rwsem has active spinner
 */
static inline bool rwsem_has_spinner(struct rw_semaphore *sem)
{
	return osq_is_locked(&sem->osq);
}

438 439 440 441 442
#else
static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
{
	return false;
}
443 444 445 446 447

static inline bool rwsem_has_spinner(struct rw_semaphore *sem)
{
	return false;
}
448 449 450 451
#endif

/*
 * Wait until we successfully acquire the write lock
L
Linus Torvalds 已提交
452
 */
453 454
static inline struct rw_semaphore *
__rwsem_down_write_failed_common(struct rw_semaphore *sem, int state)
L
Linus Torvalds 已提交
455
{
456 457
	long count;
	bool waiting = true; /* any queued threads before us */
458
	struct rwsem_waiter waiter;
459
	struct rw_semaphore *ret = sem;
460
	WAKE_Q(wake_q);
461

462 463 464 465 466 467 468 469 470 471 472 473
	/* undo write bias from down_write operation, stop active locking */
	count = rwsem_atomic_update(-RWSEM_ACTIVE_WRITE_BIAS, sem);

	/* do optimistic spinning and steal lock if possible */
	if (rwsem_optimistic_spin(sem))
		return sem;

	/*
	 * Optimistic spinning failed, proceed to the slowpath
	 * and block until we can acquire the sem.
	 */
	waiter.task = current;
474
	waiter.type = RWSEM_WAITING_FOR_WRITE;
475 476

	raw_spin_lock_irq(&sem->wait_lock);
477 478

	/* account for this before adding a new element to the list */
479
	if (list_empty(&sem->wait_list))
480 481
		waiting = false;

482 483 484
	list_add_tail(&waiter.list, &sem->wait_list);

	/* we're now waiting on the lock, but no longer actively locking */
485
	if (waiting) {
486
		count = READ_ONCE(sem->count);
487

488
		/*
489 490 491
		 * If there were already threads queued before us and there are
		 * no active writers, the lock must be read owned; so we try to
		 * wake any read locks that were queued ahead of us.
492
		 */
493 494 495 496 497 498 499 500 501 502 503 504 505
		if (count > RWSEM_WAITING_BIAS) {
			WAKE_Q(wake_q);

			sem = __rwsem_mark_wake(sem, RWSEM_WAKE_READERS, &wake_q);
			/*
			 * The wakeup is normally called _after_ the wait_lock
			 * is released, but given that we are proactively waking
			 * readers we can deal with the wake_q overhead as it is
			 * similar to releasing and taking the wait_lock again
			 * for attempting rwsem_try_write_lock().
			 */
			wake_up_q(&wake_q);
		}
506 507 508

	} else
		count = rwsem_atomic_update(RWSEM_WAITING_BIAS, sem);
509

510
	/* wait until we successfully acquire the lock */
511
	set_current_state(state);
512
	while (true) {
513 514
		if (rwsem_try_write_lock(count, sem))
			break;
515
		raw_spin_unlock_irq(&sem->wait_lock);
516 517 518

		/* Block until there are no active lockers. */
		do {
519 520 521
			if (signal_pending_state(state, current))
				goto out_nolock;

522
			schedule();
523
			set_current_state(state);
524
		} while ((count = sem->count) & RWSEM_ACTIVE_MASK);
525

526
		raw_spin_lock_irq(&sem->wait_lock);
527
	}
528
	__set_current_state(TASK_RUNNING);
529 530
	list_del(&waiter.list);
	raw_spin_unlock_irq(&sem->wait_lock);
531

532
	return ret;
533 534 535 536 537 538 539 540

out_nolock:
	__set_current_state(TASK_RUNNING);
	raw_spin_lock_irq(&sem->wait_lock);
	list_del(&waiter.list);
	if (list_empty(&sem->wait_list))
		rwsem_atomic_update(-RWSEM_WAITING_BIAS, sem);
	else
541
		__rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
542
	raw_spin_unlock_irq(&sem->wait_lock);
543
	wake_up_q(&wake_q);
544 545

	return ERR_PTR(-EINTR);
546 547 548 549 550 551
}

__visible struct rw_semaphore * __sched
rwsem_down_write_failed(struct rw_semaphore *sem)
{
	return __rwsem_down_write_failed_common(sem, TASK_UNINTERRUPTIBLE);
L
Linus Torvalds 已提交
552
}
553
EXPORT_SYMBOL(rwsem_down_write_failed);
L
Linus Torvalds 已提交
554

555 556 557 558 559 560 561
__visible struct rw_semaphore * __sched
rwsem_down_write_failed_killable(struct rw_semaphore *sem)
{
	return __rwsem_down_write_failed_common(sem, TASK_KILLABLE);
}
EXPORT_SYMBOL(rwsem_down_write_failed_killable);

L
Linus Torvalds 已提交
562 563 564 565
/*
 * handle waking up a waiter on the semaphore
 * - up_read/up_write has decremented the active part of count if we come here
 */
566
__visible
567
struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
L
Linus Torvalds 已提交
568 569
{
	unsigned long flags;
570
	WAKE_Q(wake_q);
L
Linus Torvalds 已提交
571

572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601
	/*
	 * If a spinner is present, it is not necessary to do the wakeup.
	 * Try to do wakeup only if the trylock succeeds to minimize
	 * spinlock contention which may introduce too much delay in the
	 * unlock operation.
	 *
	 *    spinning writer		up_write/up_read caller
	 *    ---------------		-----------------------
	 * [S]   osq_unlock()		[L]   osq
	 *	 MB			      RMB
	 * [RmW] rwsem_try_write_lock() [RmW] spin_trylock(wait_lock)
	 *
	 * Here, it is important to make sure that there won't be a missed
	 * wakeup while the rwsem is free and the only spinning writer goes
	 * to sleep without taking the rwsem. Even when the spinning writer
	 * is just going to break out of the waiting loop, it will still do
	 * a trylock in rwsem_down_write_failed() before sleeping. IOW, if
	 * rwsem_has_spinner() is true, it will guarantee at least one
	 * trylock attempt on the rwsem later on.
	 */
	if (rwsem_has_spinner(sem)) {
		/*
		 * The smp_rmb() here is to make sure that the spinner
		 * state is consulted before reading the wait_lock.
		 */
		smp_rmb();
		if (!raw_spin_trylock_irqsave(&sem->wait_lock, flags))
			return sem;
		goto locked;
	}
602
	raw_spin_lock_irqsave(&sem->wait_lock, flags);
603
locked:
L
Linus Torvalds 已提交
604 605 606

	/* do nothing if list empty */
	if (!list_empty(&sem->wait_list))
607
		sem = __rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
L
Linus Torvalds 已提交
608

609
	raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
610
	wake_up_q(&wake_q);
L
Linus Torvalds 已提交
611 612 613

	return sem;
}
614
EXPORT_SYMBOL(rwsem_wake);
L
Linus Torvalds 已提交
615 616 617 618 619 620

/*
 * downgrade a write lock into a read lock
 * - caller incremented waiting part of count and discovered it still negative
 * - just wake up any readers at the front of the queue
 */
621
__visible
622
struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
L
Linus Torvalds 已提交
623 624
{
	unsigned long flags;
625
	WAKE_Q(wake_q);
L
Linus Torvalds 已提交
626

627
	raw_spin_lock_irqsave(&sem->wait_lock, flags);
L
Linus Torvalds 已提交
628 629 630

	/* do nothing if list empty */
	if (!list_empty(&sem->wait_list))
631
		sem = __rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED, &wake_q);
L
Linus Torvalds 已提交
632

633
	raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
634
	wake_up_q(&wake_q);
L
Linus Torvalds 已提交
635 636 637 638

	return sem;
}
EXPORT_SYMBOL(rwsem_downgrade_wake);