提交 d1331661 编写于 作者: W Will Deacon 提交者: Ingo Molnar

locking/qrwlock: Prevent slowpath writers getting held up by fastpath

When a prospective writer takes the qrwlock locking slowpath due to the
lock being held, it attempts to cmpxchg the wmode field from 0 to
_QW_WAITING so that concurrent lockers also take the slowpath and queue
on the spinlock accordingly, allowing the lockers to drain.

Unfortunately, this isn't fair, because a fastpath writer that comes in
after the lock is made available but before the _QW_WAITING flag is set
can effectively jump the queue. If there is a steady stream of prospective
writers, then the waiter will be held off indefinitely.

This patch restores fairness by separating _QW_WAITING and _QW_LOCKED
into two distinct fields: _QW_LOCKED continues to occupy the bottom byte
of the lockword so that it can be cleared unconditionally when unlocking,
but _QW_WAITING now occupies what used to be the bottom bit of the reader
count. This then forces the slow-path for concurrent lockers.
Tested-by: NWaiman Long <longman@redhat.com>
Tested-by: NJeremy Linton <jeremy.linton@arm.com>
Tested-by: NAdam Wallis <awallis@codeaurora.org>
Tested-by: NJan Glauber <jglauber@cavium.com>
Signed-off-by: NWill Deacon <will.deacon@arm.com>
Acked-by: NPeter Zijlstra <peterz@infradead.org>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Jeremy.Linton@arm.com
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-arm-kernel@lists.infradead.org
Link: http://lkml.kernel.org/r/1507810851-306-6-git-send-email-will.deacon@arm.comSigned-off-by: NIngo Molnar <mingo@kernel.org>
上级 087133ac
...@@ -26,24 +26,11 @@ ...@@ -26,24 +26,11 @@
/* /*
* Writer states & reader shift and bias. * Writer states & reader shift and bias.
*
* | +0 | +1 | +2 | +3 |
* ----+----+----+----+----+
* LE | 78 | 56 | 34 | 12 | 0x12345678
* ----+----+----+----+----+
* | wr | rd |
* +----+----+----+----+
*
* ----+----+----+----+----+
* BE | 12 | 34 | 56 | 78 | 0x12345678
* ----+----+----+----+----+
* | rd | wr |
* +----+----+----+----+
*/ */
#define _QW_WAITING 1 /* A writer is waiting */ #define _QW_WAITING 0x100 /* A writer is waiting */
#define _QW_LOCKED 0xff /* A writer holds the lock */ #define _QW_LOCKED 0x0ff /* A writer holds the lock */
#define _QW_WMASK 0xff /* Writer mask */ #define _QW_WMASK 0x1ff /* Writer mask */
#define _QR_SHIFT 8 /* Reader count shift */ #define _QR_SHIFT 9 /* Reader count shift */
#define _QR_BIAS (1U << _QR_SHIFT) #define _QR_BIAS (1U << _QR_SHIFT)
/* /*
...@@ -134,7 +121,7 @@ static inline void queued_read_unlock(struct qrwlock *lock) ...@@ -134,7 +121,7 @@ static inline void queued_read_unlock(struct qrwlock *lock)
*/ */
static inline void queued_write_unlock(struct qrwlock *lock) static inline void queued_write_unlock(struct qrwlock *lock)
{ {
smp_store_release(&lock->wmode, 0); smp_store_release(&lock->wlocked, 0);
} }
/* /*
......
...@@ -13,11 +13,11 @@ typedef struct qrwlock { ...@@ -13,11 +13,11 @@ typedef struct qrwlock {
atomic_t cnts; atomic_t cnts;
struct { struct {
#ifdef __LITTLE_ENDIAN #ifdef __LITTLE_ENDIAN
u8 wmode; /* Writer mode */ u8 wlocked; /* Locked for write? */
u8 rcnts[3]; /* Reader counts */ u8 __lstate[3];
#else #else
u8 rcnts[3]; /* Reader counts */ u8 __lstate[3];
u8 wmode; /* Writer mode */ u8 wlocked; /* Locked for write? */
#endif #endif
}; };
}; };
......
...@@ -39,8 +39,7 @@ void queued_read_lock_slowpath(struct qrwlock *lock) ...@@ -39,8 +39,7 @@ void queued_read_lock_slowpath(struct qrwlock *lock)
* so spin with ACQUIRE semantics until the lock is available * so spin with ACQUIRE semantics until the lock is available
* without waiting in the queue. * without waiting in the queue.
*/ */
atomic_cond_read_acquire(&lock->cnts, (VAL & _QW_WMASK) atomic_cond_read_acquire(&lock->cnts, !(VAL & _QW_LOCKED));
!= _QW_LOCKED);
return; return;
} }
atomic_sub(_QR_BIAS, &lock->cnts); atomic_sub(_QR_BIAS, &lock->cnts);
...@@ -56,7 +55,7 @@ void queued_read_lock_slowpath(struct qrwlock *lock) ...@@ -56,7 +55,7 @@ void queued_read_lock_slowpath(struct qrwlock *lock)
* that accesses can't leak upwards out of our subsequent critical * that accesses can't leak upwards out of our subsequent critical
* section in the case that the lock is currently held for write. * section in the case that the lock is currently held for write.
*/ */
atomic_cond_read_acquire(&lock->cnts, (VAL & _QW_WMASK) != _QW_LOCKED); atomic_cond_read_acquire(&lock->cnts, !(VAL & _QW_LOCKED));
/* /*
* Signal the next one in queue to become queue head * Signal the next one in queue to become queue head
...@@ -79,19 +78,10 @@ void queued_write_lock_slowpath(struct qrwlock *lock) ...@@ -79,19 +78,10 @@ void queued_write_lock_slowpath(struct qrwlock *lock)
(atomic_cmpxchg_acquire(&lock->cnts, 0, _QW_LOCKED) == 0)) (atomic_cmpxchg_acquire(&lock->cnts, 0, _QW_LOCKED) == 0))
goto unlock; goto unlock;
/* /* Set the waiting flag to notify readers that a writer is pending */
* Set the waiting flag to notify readers that a writer is pending, atomic_add(_QW_WAITING, &lock->cnts);
* or wait for a previous writer to go away.
*/
for (;;) {
if (!READ_ONCE(lock->wmode) &&
(cmpxchg_relaxed(&lock->wmode, 0, _QW_WAITING) == 0))
break;
cpu_relax();
}
/* When no more readers, set the locked flag */ /* When no more readers or writers, set the locked flag */
do { do {
atomic_cond_read_acquire(&lock->cnts, VAL == _QW_WAITING); atomic_cond_read_acquire(&lock->cnts, VAL == _QW_WAITING);
} while (atomic_cmpxchg_relaxed(&lock->cnts, _QW_WAITING, } while (atomic_cmpxchg_relaxed(&lock->cnts, _QW_WAITING,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册