rwsem-xadd.c 13.8 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4
/* rwsem.c: R/W semaphores: contention handling functions
 *
 * Written by David Howells (dhowells@redhat.com).
 * Derived from arch/i386/kernel/semaphore.c
5 6
 *
 * Writer lock-stealing by Alex Shi <alex.shi@intel.com>
7
 * and Michel Lespinasse <walken@google.com>
8 9 10
 *
 * Optimistic spinning by Tim Chen <tim.c.chen@intel.com>
 * and Davidlohr Bueso <davidlohr@hp.com>. Based on mutexes.
L
Linus Torvalds 已提交
11 12 13 14
 */
#include <linux/rwsem.h>
#include <linux/sched.h>
#include <linux/init.h>
15
#include <linux/export.h>
16 17 18
#include <linux/sched/rt.h>

#include "mcs_spinlock.h"
L
Linus Torvalds 已提交
19

20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68
/*
 * Guide to the rw_semaphore's count field for common values.
 * (32-bit case illustrated, similar for 64-bit)
 *
 * 0x0000000X	(1) X readers active or attempting lock, no writer waiting
 *		    X = #active_readers + #readers attempting to lock
 *		    (X*ACTIVE_BIAS)
 *
 * 0x00000000	rwsem is unlocked, and no one is waiting for the lock or
 *		attempting to read lock or write lock.
 *
 * 0xffff000X	(1) X readers active or attempting lock, with waiters for lock
 *		    X = #active readers + # readers attempting lock
 *		    (X*ACTIVE_BIAS + WAITING_BIAS)
 *		(2) 1 writer attempting lock, no waiters for lock
 *		    X-1 = #active readers + #readers attempting lock
 *		    ((X-1)*ACTIVE_BIAS + ACTIVE_WRITE_BIAS)
 *		(3) 1 writer active, no waiters for lock
 *		    X-1 = #active readers + #readers attempting lock
 *		    ((X-1)*ACTIVE_BIAS + ACTIVE_WRITE_BIAS)
 *
 * 0xffff0001	(1) 1 reader active or attempting lock, waiters for lock
 *		    (WAITING_BIAS + ACTIVE_BIAS)
 *		(2) 1 writer active or attempting lock, no waiters for lock
 *		    (ACTIVE_WRITE_BIAS)
 *
 * 0xffff0000	(1) There are writers or readers queued but none active
 *		    or in the process of attempting lock.
 *		    (WAITING_BIAS)
 *		Note: writer can attempt to steal lock for this count by adding
 *		ACTIVE_WRITE_BIAS in cmpxchg and checking the old count
 *
 * 0xfffe0001	(1) 1 writer active, or attempting lock. Waiters on queue.
 *		    (ACTIVE_WRITE_BIAS + WAITING_BIAS)
 *
 * Note: Readers attempt to lock by adding ACTIVE_BIAS in down_read and checking
 *	 the count becomes more than 0 for successful lock acquisition,
 *	 i.e. the case where there are only readers or nobody has lock.
 *	 (1st and 2nd case above).
 *
 *	 Writers attempt to lock by adding ACTIVE_WRITE_BIAS in down_write and
 *	 checking the count becomes ACTIVE_WRITE_BIAS for successful lock
 *	 acquisition (i.e. nobody else has lock or attempts lock).  If
 *	 unsuccessful, in rwsem_down_write_failed, we'll check to see if there
 *	 are only waiters but none active (5th case above), and attempt to
 *	 steal the lock.
 *
 */

69 70 71 72 73 74 75 76 77 78 79
/*
 * Initialize an rwsem:
 */
void __init_rwsem(struct rw_semaphore *sem, const char *name,
		  struct lock_class_key *key)
{
#ifdef CONFIG_DEBUG_LOCK_ALLOC
	/*
	 * Make sure we are not reinitializing a held semaphore:
	 */
	debug_check_no_locks_freed((void *)sem, sizeof(*sem));
80
	lockdep_init_map(&sem->dep_map, name, key, 0);
81 82
#endif
	sem->count = RWSEM_UNLOCKED_VALUE;
83
	raw_spin_lock_init(&sem->wait_lock);
84
	INIT_LIST_HEAD(&sem->wait_list);
85
#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
86
	sem->owner = NULL;
87
	osq_lock_init(&sem->osq);
88
#endif
89 90 91 92
}

EXPORT_SYMBOL(__init_rwsem);

93 94 95 96 97
enum rwsem_waiter_type {
	RWSEM_WAITING_FOR_WRITE,
	RWSEM_WAITING_FOR_READ
};

L
Linus Torvalds 已提交
98 99 100
struct rwsem_waiter {
	struct list_head list;
	struct task_struct *task;
101
	enum rwsem_waiter_type type;
L
Linus Torvalds 已提交
102 103
};

104 105 106 107 108
enum rwsem_wake_type {
	RWSEM_WAKE_ANY,		/* Wake whatever's at head of wait list */
	RWSEM_WAKE_READERS,	/* Wake readers only */
	RWSEM_WAKE_READ_OWNED	/* Waker thread holds the read lock */
};
109

L
Linus Torvalds 已提交
110 111 112 113 114
/*
 * handle the lock release when processes blocked on it that can now run
 * - if we come here from up_xxxx(), then:
 *   - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed)
 *   - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so)
115
 * - there must be someone on the queue
L
Linus Torvalds 已提交
116 117 118 119
 * - the spinlock must be held by the caller
 * - woken process blocks are discarded from the list after having task zeroed
 * - writers are only woken if downgrading is false
 */
120
static struct rw_semaphore *
121
__rwsem_do_wake(struct rw_semaphore *sem, enum rwsem_wake_type wake_type)
L
Linus Torvalds 已提交
122 123 124 125
{
	struct rwsem_waiter *waiter;
	struct task_struct *tsk;
	struct list_head *next;
126
	long oldcount, woken, loop, adjustment;
L
Linus Torvalds 已提交
127

128
	waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
129
	if (waiter->type == RWSEM_WAITING_FOR_WRITE) {
130
		if (wake_type == RWSEM_WAKE_ANY)
131 132 133 134 135 136
			/* Wake writer at the front of the queue, but do not
			 * grant it the lock yet as we want other writers
			 * to be able to steal it.  Readers, on the other hand,
			 * will block as they will notice the queued writer.
			 */
			wake_up_process(waiter->task);
137
		goto out;
138
	}
L
Linus Torvalds 已提交
139

140 141 142
	/* Writers might steal the lock before we grant it to the next reader.
	 * We prefer to do the first reader grant before counting readers
	 * so we can bail out early if a writer stole the lock.
143
	 */
144 145 146 147 148 149 150 151 152 153 154 155 156 157
	adjustment = 0;
	if (wake_type != RWSEM_WAKE_READ_OWNED) {
		adjustment = RWSEM_ACTIVE_READ_BIAS;
 try_reader_grant:
		oldcount = rwsem_atomic_update(adjustment, sem) - adjustment;
		if (unlikely(oldcount < RWSEM_WAITING_BIAS)) {
			/* A writer stole the lock. Undo our reader grant. */
			if (rwsem_atomic_update(-adjustment, sem) &
						RWSEM_ACTIVE_MASK)
				goto out;
			/* Last active locker left. Retry waking readers. */
			goto try_reader_grant;
		}
	}
L
Linus Torvalds 已提交
158

159 160 161
	/* Grant an infinite number of read locks to the readers at the front
	 * of the queue.  Note we increment the 'active part' of the count by
	 * the number of readers before waking any processes up.
L
Linus Torvalds 已提交
162 163 164 165 166 167 168 169 170 171 172
	 */
	woken = 0;
	do {
		woken++;

		if (waiter->list.next == &sem->wait_list)
			break;

		waiter = list_entry(waiter->list.next,
					struct rwsem_waiter, list);

173
	} while (waiter->type != RWSEM_WAITING_FOR_WRITE);
L
Linus Torvalds 已提交
174

175
	adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment;
176
	if (waiter->type != RWSEM_WAITING_FOR_WRITE)
177 178
		/* hit end of list above */
		adjustment -= RWSEM_WAITING_BIAS;
L
Linus Torvalds 已提交
179

180 181
	if (adjustment)
		rwsem_atomic_add(adjustment, sem);
L
Linus Torvalds 已提交
182 183

	next = sem->wait_list.next;
184 185
	loop = woken;
	do {
L
Linus Torvalds 已提交
186 187 188
		waiter = list_entry(next, struct rwsem_waiter, list);
		next = waiter->list.next;
		tsk = waiter->task;
189
		smp_mb();
L
Linus Torvalds 已提交
190 191 192
		waiter->task = NULL;
		wake_up_process(tsk);
		put_task_struct(tsk);
193
	} while (--loop);
L
Linus Torvalds 已提交
194 195 196 197 198 199

	sem->wait_list.next = next;
	next->prev = &sem->wait_list;

 out:
	return sem;
200 201
}

L
Linus Torvalds 已提交
202
/*
203
 * Wait for the read lock to be granted
L
Linus Torvalds 已提交
204
 */
205
__visible
206
struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem)
L
Linus Torvalds 已提交
207
{
208
	long count, adjustment = -RWSEM_ACTIVE_READ_BIAS;
209
	struct rwsem_waiter waiter;
L
Linus Torvalds 已提交
210 211 212
	struct task_struct *tsk = current;

	/* set up my own style of waitqueue */
213
	waiter.task = tsk;
214
	waiter.type = RWSEM_WAITING_FOR_READ;
L
Linus Torvalds 已提交
215 216
	get_task_struct(tsk);

217
	raw_spin_lock_irq(&sem->wait_lock);
218 219
	if (list_empty(&sem->wait_list))
		adjustment += RWSEM_WAITING_BIAS;
220
	list_add_tail(&waiter.list, &sem->wait_list);
L
Linus Torvalds 已提交
221

222
	/* we're now waiting on the lock, but no longer actively locking */
L
Linus Torvalds 已提交
223 224
	count = rwsem_atomic_update(adjustment, sem);

225 226 227 228 229 230 231 232
	/* If there are no active locks, wake the front queued process(es).
	 *
	 * If there are no writers and we are first in the queue,
	 * wake our own waiter to join the existing active readers !
	 */
	if (count == RWSEM_WAITING_BIAS ||
	    (count > RWSEM_WAITING_BIAS &&
	     adjustment != -RWSEM_ACTIVE_READ_BIAS))
233
		sem = __rwsem_do_wake(sem, RWSEM_WAKE_ANY);
L
Linus Torvalds 已提交
234

235
	raw_spin_unlock_irq(&sem->wait_lock);
L
Linus Torvalds 已提交
236 237

	/* wait to be given the lock */
238 239
	while (true) {
		set_task_state(tsk, TASK_UNINTERRUPTIBLE);
240
		if (!waiter.task)
L
Linus Torvalds 已提交
241 242 243 244 245 246 247 248
			break;
		schedule();
	}

	tsk->state = TASK_RUNNING;

	return sem;
}
249
EXPORT_SYMBOL(rwsem_down_read_failed);
L
Linus Torvalds 已提交
250

251 252 253 254 255 256 257 258 259 260 261 262 263 264 265
static inline bool rwsem_try_write_lock(long count, struct rw_semaphore *sem)
{
	if (!(count & RWSEM_ACTIVE_MASK)) {
		/* try acquiring the write lock */
		if (sem->count == RWSEM_WAITING_BIAS &&
		    cmpxchg(&sem->count, RWSEM_WAITING_BIAS,
			    RWSEM_ACTIVE_WRITE_BIAS) == RWSEM_WAITING_BIAS) {
			if (!list_is_singular(&sem->wait_list))
				rwsem_atomic_update(RWSEM_WAITING_BIAS, sem);
			return true;
		}
	}
	return false;
}

266
#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
L
Linus Torvalds 已提交
267
/*
268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288
 * Try to acquire write lock before the writer has been put on wait queue.
 */
static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem)
{
	long old, count = ACCESS_ONCE(sem->count);

	while (true) {
		if (!(count == 0 || count == RWSEM_WAITING_BIAS))
			return false;

		old = cmpxchg(&sem->count, count, count + RWSEM_ACTIVE_WRITE_BIAS);
		if (old == count)
			return true;

		count = old;
	}
}

static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
{
	struct task_struct *owner;
289
	bool on_cpu = false;
290 291

	if (need_resched())
292
		return false;
293 294 295 296 297 298 299 300

	rcu_read_lock();
	owner = ACCESS_ONCE(sem->owner);
	if (owner)
		on_cpu = owner->on_cpu;
	rcu_read_unlock();

	/*
301 302 303
	 * If sem->owner is not set, yet we have just recently entered the
	 * slowpath, then there is a possibility reader(s) may have the lock.
	 * To be safe, avoid spinning in these situations.
304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332
	 */
	return on_cpu;
}

static inline bool owner_running(struct rw_semaphore *sem,
				 struct task_struct *owner)
{
	if (sem->owner != owner)
		return false;

	/*
	 * Ensure we emit the owner->on_cpu, dereference _after_ checking
	 * sem->owner still matches owner, if that fails, owner might
	 * point to free()d memory, if it still matches, the rcu_read_lock()
	 * ensures the memory stays valid.
	 */
	barrier();

	return owner->on_cpu;
}

static noinline
bool rwsem_spin_on_owner(struct rw_semaphore *sem, struct task_struct *owner)
{
	rcu_read_lock();
	while (owner_running(sem, owner)) {
		if (need_resched())
			break;

333
		cpu_relax_lowlatency();
334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384
	}
	rcu_read_unlock();

	/*
	 * We break out the loop above on need_resched() or when the
	 * owner changed, which is a sign for heavy contention. Return
	 * success only when sem->owner is NULL.
	 */
	return sem->owner == NULL;
}

static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
{
	struct task_struct *owner;
	bool taken = false;

	preempt_disable();

	/* sem->wait_lock should not be held when doing optimistic spinning */
	if (!rwsem_can_spin_on_owner(sem))
		goto done;

	if (!osq_lock(&sem->osq))
		goto done;

	while (true) {
		owner = ACCESS_ONCE(sem->owner);
		if (owner && !rwsem_spin_on_owner(sem, owner))
			break;

		/* wait_lock will be acquired if write_lock is obtained */
		if (rwsem_try_write_lock_unqueued(sem)) {
			taken = true;
			break;
		}

		/*
		 * When there's no owner, we might have preempted between the
		 * owner acquiring the lock and setting the owner field. If
		 * we're an RT task that will live-lock because we won't let
		 * the owner complete.
		 */
		if (!owner && (need_resched() || rt_task(current)))
			break;

		/*
		 * The cpu_relax() call is a compiler barrier which forces
		 * everything in this loop to be re-loaded. We don't need
		 * memory barriers as we'll eventually observe the right
		 * values at the cost of a few extra spins.
		 */
385
		cpu_relax_lowlatency();
386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401
	}
	osq_unlock(&sem->osq);
done:
	preempt_enable();
	return taken;
}

#else
static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
{
	return false;
}
#endif

/*
 * Wait until we successfully acquire the write lock
L
Linus Torvalds 已提交
402
 */
403
__visible
404
struct rw_semaphore __sched *rwsem_down_write_failed(struct rw_semaphore *sem)
L
Linus Torvalds 已提交
405
{
406 407
	long count;
	bool waiting = true; /* any queued threads before us */
408 409
	struct rwsem_waiter waiter;

410 411 412 413 414 415 416 417 418 419 420 421
	/* undo write bias from down_write operation, stop active locking */
	count = rwsem_atomic_update(-RWSEM_ACTIVE_WRITE_BIAS, sem);

	/* do optimistic spinning and steal lock if possible */
	if (rwsem_optimistic_spin(sem))
		return sem;

	/*
	 * Optimistic spinning failed, proceed to the slowpath
	 * and block until we can acquire the sem.
	 */
	waiter.task = current;
422
	waiter.type = RWSEM_WAITING_FOR_WRITE;
423 424

	raw_spin_lock_irq(&sem->wait_lock);
425 426

	/* account for this before adding a new element to the list */
427
	if (list_empty(&sem->wait_list))
428 429
		waiting = false;

430 431 432
	list_add_tail(&waiter.list, &sem->wait_list);

	/* we're now waiting on the lock, but no longer actively locking */
433 434
	if (waiting) {
		count = ACCESS_ONCE(sem->count);
435

436
		/*
437 438 439
		 * If there were already threads queued before us and there are
		 * no active writers, the lock must be read owned; so we try to
		 * wake any read locks that were queued ahead of us.
440 441 442 443 444 445
		 */
		if (count > RWSEM_WAITING_BIAS)
			sem = __rwsem_do_wake(sem, RWSEM_WAKE_READERS);

	} else
		count = rwsem_atomic_update(RWSEM_WAITING_BIAS, sem);
446

447
	/* wait until we successfully acquire the lock */
448
	set_current_state(TASK_UNINTERRUPTIBLE);
449
	while (true) {
450 451
		if (rwsem_try_write_lock(count, sem))
			break;
452
		raw_spin_unlock_irq(&sem->wait_lock);
453 454 455 456

		/* Block until there are no active lockers. */
		do {
			schedule();
457
			set_current_state(TASK_UNINTERRUPTIBLE);
458
		} while ((count = sem->count) & RWSEM_ACTIVE_MASK);
459

460
		raw_spin_lock_irq(&sem->wait_lock);
461
	}
462
	__set_current_state(TASK_RUNNING);
463

464 465
	list_del(&waiter.list);
	raw_spin_unlock_irq(&sem->wait_lock);
466 467

	return sem;
L
Linus Torvalds 已提交
468
}
469
EXPORT_SYMBOL(rwsem_down_write_failed);
L
Linus Torvalds 已提交
470 471 472 473 474

/*
 * handle waking up a waiter on the semaphore
 * - up_read/up_write has decremented the active part of count if we come here
 */
475
__visible
476
struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
L
Linus Torvalds 已提交
477 478 479
{
	unsigned long flags;

480
	raw_spin_lock_irqsave(&sem->wait_lock, flags);
L
Linus Torvalds 已提交
481 482 483

	/* do nothing if list empty */
	if (!list_empty(&sem->wait_list))
484
		sem = __rwsem_do_wake(sem, RWSEM_WAKE_ANY);
L
Linus Torvalds 已提交
485

486
	raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
L
Linus Torvalds 已提交
487 488 489

	return sem;
}
490
EXPORT_SYMBOL(rwsem_wake);
L
Linus Torvalds 已提交
491 492 493 494 495 496

/*
 * downgrade a write lock into a read lock
 * - caller incremented waiting part of count and discovered it still negative
 * - just wake up any readers at the front of the queue
 */
497
__visible
498
struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
L
Linus Torvalds 已提交
499 500 501
{
	unsigned long flags;

502
	raw_spin_lock_irqsave(&sem->wait_lock, flags);
L
Linus Torvalds 已提交
503 504 505

	/* do nothing if list empty */
	if (!list_empty(&sem->wait_list))
506
		sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED);
L
Linus Torvalds 已提交
507

508
	raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
L
Linus Torvalds 已提交
509 510 511 512

	return sem;
}
EXPORT_SYMBOL(rwsem_downgrade_wake);