提交 5e4823e6 编写于 作者: L Linus Torvalds

Merge tag 'locking_urgent_for_v5.19' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull locking fix from Borislav Petkov:

 - Avoid rwsem lockups in certain situations when handling the handoff
   bit

* tag 'locking_urgent_for_v5.19' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  locking/rwsem: Allow slowpath writer to ignore handoff bit if not set by first waiter
...@@ -335,8 +335,6 @@ struct rwsem_waiter { ...@@ -335,8 +335,6 @@ struct rwsem_waiter {
struct task_struct *task; struct task_struct *task;
enum rwsem_waiter_type type; enum rwsem_waiter_type type;
unsigned long timeout; unsigned long timeout;
/* Writer only, not initialized in reader */
bool handoff_set; bool handoff_set;
}; };
#define rwsem_first_waiter(sem) \ #define rwsem_first_waiter(sem) \
...@@ -459,10 +457,12 @@ static void rwsem_mark_wake(struct rw_semaphore *sem, ...@@ -459,10 +457,12 @@ static void rwsem_mark_wake(struct rw_semaphore *sem,
* to give up the lock), request a HANDOFF to * to give up the lock), request a HANDOFF to
* force the issue. * force the issue.
*/ */
if (!(oldcount & RWSEM_FLAG_HANDOFF) && if (time_after(jiffies, waiter->timeout)) {
time_after(jiffies, waiter->timeout)) { if (!(oldcount & RWSEM_FLAG_HANDOFF)) {
adjustment -= RWSEM_FLAG_HANDOFF; adjustment -= RWSEM_FLAG_HANDOFF;
lockevent_inc(rwsem_rlock_handoff); lockevent_inc(rwsem_rlock_handoff);
}
waiter->handoff_set = true;
} }
atomic_long_add(-adjustment, &sem->count); atomic_long_add(-adjustment, &sem->count);
...@@ -599,7 +599,7 @@ rwsem_del_wake_waiter(struct rw_semaphore *sem, struct rwsem_waiter *waiter, ...@@ -599,7 +599,7 @@ rwsem_del_wake_waiter(struct rw_semaphore *sem, struct rwsem_waiter *waiter,
static inline bool rwsem_try_write_lock(struct rw_semaphore *sem, static inline bool rwsem_try_write_lock(struct rw_semaphore *sem,
struct rwsem_waiter *waiter) struct rwsem_waiter *waiter)
{ {
bool first = rwsem_first_waiter(sem) == waiter; struct rwsem_waiter *first = rwsem_first_waiter(sem);
long count, new; long count, new;
lockdep_assert_held(&sem->wait_lock); lockdep_assert_held(&sem->wait_lock);
...@@ -609,11 +609,20 @@ static inline bool rwsem_try_write_lock(struct rw_semaphore *sem, ...@@ -609,11 +609,20 @@ static inline bool rwsem_try_write_lock(struct rw_semaphore *sem,
bool has_handoff = !!(count & RWSEM_FLAG_HANDOFF); bool has_handoff = !!(count & RWSEM_FLAG_HANDOFF);
if (has_handoff) { if (has_handoff) {
if (!first) /*
* Honor handoff bit and yield only when the first
* waiter is the one that set it. Otherwisee, we
* still try to acquire the rwsem.
*/
if (first->handoff_set && (waiter != first))
return false; return false;
/* First waiter inherits a previously set handoff bit */ /*
waiter->handoff_set = true; * First waiter can inherit a previously set handoff
* bit and spin on rwsem if lock acquisition fails.
*/
if (waiter == first)
waiter->handoff_set = true;
} }
new = count; new = count;
...@@ -1027,6 +1036,7 @@ rwsem_down_read_slowpath(struct rw_semaphore *sem, long count, unsigned int stat ...@@ -1027,6 +1036,7 @@ rwsem_down_read_slowpath(struct rw_semaphore *sem, long count, unsigned int stat
waiter.task = current; waiter.task = current;
waiter.type = RWSEM_WAITING_FOR_READ; waiter.type = RWSEM_WAITING_FOR_READ;
waiter.timeout = jiffies + RWSEM_WAIT_TIMEOUT; waiter.timeout = jiffies + RWSEM_WAIT_TIMEOUT;
waiter.handoff_set = false;
raw_spin_lock_irq(&sem->wait_lock); raw_spin_lock_irq(&sem->wait_lock);
if (list_empty(&sem->wait_list)) { if (list_empty(&sem->wait_list)) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册