提交 cfafcd11 编写于 作者: P Peter Zijlstra 提交者: Thomas Gleixner

futex: Rework futex_lock_pi() to use rt_mutex_*_proxy_lock()

By changing futex_lock_pi() to use rt_mutex_*_proxy_lock() all wait_list
modifications are done under both hb->lock and wait_lock.

This closes the obvious interleave pattern between futex_lock_pi() and
futex_unlock_pi(), but not entirely so. See below:

Before:

futex_lock_pi()			futex_unlock_pi()
  unlock hb->lock

				  lock hb->lock
				  unlock hb->lock

				  lock rt_mutex->wait_lock
				  unlock rt_mutex_wait_lock
				    -EAGAIN

  lock rt_mutex->wait_lock
  list_add
  unlock rt_mutex->wait_lock

  schedule()

  lock rt_mutex->wait_lock
  list_del
  unlock rt_mutex->wait_lock

				  <idem>
				    -EAGAIN

  lock hb->lock


After:

futex_lock_pi()			futex_unlock_pi()

  lock hb->lock
  lock rt_mutex->wait_lock
  list_add
  unlock rt_mutex->wait_lock
  unlock hb->lock

  schedule()
				  lock hb->lock
				  unlock hb->lock
  lock hb->lock
  lock rt_mutex->wait_lock
  list_del
  unlock rt_mutex->wait_lock

				  lock rt_mutex->wait_lock
				  unlock rt_mutex_wait_lock
				    -EAGAIN

  unlock hb->lock


It does however solve the earlier starvation/live-lock scenario which got
introduced with the -EAGAIN since unlike the before scenario; where the
-EAGAIN happens while futex_unlock_pi() doesn't hold any locks; in the
after scenario it happens while futex_unlock_pi() actually holds a lock,
and then it is serialized on that lock.
Signed-off-by: NPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: juri.lelli@arm.com
Cc: bigeasy@linutronix.de
Cc: xlpang@redhat.com
Cc: rostedt@goodmis.org
Cc: mathieu.desnoyers@efficios.com
Cc: jdesfossez@efficios.com
Cc: dvhart@infradead.org
Cc: bristot@redhat.com
Link: http://lkml.kernel.org/r/20170322104152.062785528@infradead.orgSigned-off-by: NThomas Gleixner <tglx@linutronix.de>
上级 38d589f2
...@@ -2099,20 +2099,7 @@ queue_unlock(struct futex_hash_bucket *hb) ...@@ -2099,20 +2099,7 @@ queue_unlock(struct futex_hash_bucket *hb)
hb_waiters_dec(hb); hb_waiters_dec(hb);
} }
/** static inline void __queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
* queue_me() - Enqueue the futex_q on the futex_hash_bucket
* @q: The futex_q to enqueue
* @hb: The destination hash bucket
*
* The hb->lock must be held by the caller, and is released here. A call to
* queue_me() is typically paired with exactly one call to unqueue_me(). The
* exceptions involve the PI related operations, which may use unqueue_me_pi()
* or nothing if the unqueue is done as part of the wake process and the unqueue
* state is implicit in the state of woken task (see futex_wait_requeue_pi() for
* an example).
*/
static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
__releases(&hb->lock)
{ {
int prio; int prio;
...@@ -2129,6 +2116,24 @@ static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb) ...@@ -2129,6 +2116,24 @@ static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
plist_node_init(&q->list, prio); plist_node_init(&q->list, prio);
plist_add(&q->list, &hb->chain); plist_add(&q->list, &hb->chain);
q->task = current; q->task = current;
}
/**
* queue_me() - Enqueue the futex_q on the futex_hash_bucket
* @q: The futex_q to enqueue
* @hb: The destination hash bucket
*
* The hb->lock must be held by the caller, and is released here. A call to
* queue_me() is typically paired with exactly one call to unqueue_me(). The
* exceptions involve the PI related operations, which may use unqueue_me_pi()
* or nothing if the unqueue is done as part of the wake process and the unqueue
* state is implicit in the state of woken task (see futex_wait_requeue_pi() for
* an example).
*/
static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
__releases(&hb->lock)
{
__queue_me(q, hb);
spin_unlock(&hb->lock); spin_unlock(&hb->lock);
} }
...@@ -2587,6 +2592,7 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, ...@@ -2587,6 +2592,7 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
{ {
struct hrtimer_sleeper timeout, *to = NULL; struct hrtimer_sleeper timeout, *to = NULL;
struct futex_pi_state *pi_state = NULL; struct futex_pi_state *pi_state = NULL;
struct rt_mutex_waiter rt_waiter;
struct futex_hash_bucket *hb; struct futex_hash_bucket *hb;
struct futex_q q = futex_q_init; struct futex_q q = futex_q_init;
int res, ret; int res, ret;
...@@ -2639,24 +2645,51 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, ...@@ -2639,24 +2645,51 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
} }
} }
WARN_ON(!q.pi_state);
/* /*
* Only actually queue now that the atomic ops are done: * Only actually queue now that the atomic ops are done:
*/ */
queue_me(&q, hb); __queue_me(&q, hb);
WARN_ON(!q.pi_state); if (trylock) {
/*
* Block on the PI mutex:
*/
if (!trylock) {
ret = rt_mutex_timed_futex_lock(&q.pi_state->pi_mutex, to);
} else {
ret = rt_mutex_futex_trylock(&q.pi_state->pi_mutex); ret = rt_mutex_futex_trylock(&q.pi_state->pi_mutex);
/* Fixup the trylock return value: */ /* Fixup the trylock return value: */
ret = ret ? 0 : -EWOULDBLOCK; ret = ret ? 0 : -EWOULDBLOCK;
goto no_block;
}
/*
* We must add ourselves to the rt_mutex waitlist while holding hb->lock
* such that the hb and rt_mutex wait lists match.
*/
rt_mutex_init_waiter(&rt_waiter);
ret = rt_mutex_start_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter, current);
if (ret) {
if (ret == 1)
ret = 0;
goto no_block;
} }
spin_unlock(q.lock_ptr);
if (unlikely(to))
hrtimer_start_expires(&to->timer, HRTIMER_MODE_ABS);
ret = rt_mutex_wait_proxy_lock(&q.pi_state->pi_mutex, to, &rt_waiter);
spin_lock(q.lock_ptr); spin_lock(q.lock_ptr);
/*
* If we failed to acquire the lock (signal/timeout), we must
* first acquire the hb->lock before removing the lock from the
* rt_mutex waitqueue, such that we can keep the hb and rt_mutex
* wait lists consistent.
*/
if (ret && !rt_mutex_cleanup_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter))
ret = 0;
no_block:
/* /*
* Fixup the pi_state owner and possibly acquire the lock if we * Fixup the pi_state owner and possibly acquire the lock if we
* haven't already. * haven't already.
......
...@@ -1492,19 +1492,6 @@ int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock) ...@@ -1492,19 +1492,6 @@ int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
} }
EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible); EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
/*
* Futex variant with full deadlock detection.
* Futex variants must not use the fast-path, see __rt_mutex_futex_unlock().
*/
int __sched rt_mutex_timed_futex_lock(struct rt_mutex *lock,
struct hrtimer_sleeper *timeout)
{
might_sleep();
return rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE,
timeout, RT_MUTEX_FULL_CHAINWALK);
}
/* /*
* Futex variant, must not use fastpath. * Futex variant, must not use fastpath.
*/ */
...@@ -1782,12 +1769,6 @@ int rt_mutex_wait_proxy_lock(struct rt_mutex *lock, ...@@ -1782,12 +1769,6 @@ int rt_mutex_wait_proxy_lock(struct rt_mutex *lock,
/* sleep on the mutex */ /* sleep on the mutex */
ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter); ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter);
/*
* try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
* have to fix that up.
*/
fixup_rt_mutex_waiters(lock);
raw_spin_unlock_irq(&lock->wait_lock); raw_spin_unlock_irq(&lock->wait_lock);
return ret; return ret;
...@@ -1827,6 +1808,13 @@ bool rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock, ...@@ -1827,6 +1808,13 @@ bool rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock,
fixup_rt_mutex_waiters(lock); fixup_rt_mutex_waiters(lock);
cleanup = true; cleanup = true;
} }
/*
* try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
* have to fix that up.
*/
fixup_rt_mutex_waiters(lock);
raw_spin_unlock_irq(&lock->wait_lock); raw_spin_unlock_irq(&lock->wait_lock);
return cleanup; return cleanup;
......
...@@ -113,7 +113,6 @@ extern int rt_mutex_wait_proxy_lock(struct rt_mutex *lock, ...@@ -113,7 +113,6 @@ extern int rt_mutex_wait_proxy_lock(struct rt_mutex *lock,
extern bool rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock, extern bool rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock,
struct rt_mutex_waiter *waiter); struct rt_mutex_waiter *waiter);
extern int rt_mutex_timed_futex_lock(struct rt_mutex *l, struct hrtimer_sleeper *to);
extern int rt_mutex_futex_trylock(struct rt_mutex *l); extern int rt_mutex_futex_trylock(struct rt_mutex *l);
extern void rt_mutex_futex_unlock(struct rt_mutex *lock); extern void rt_mutex_futex_unlock(struct rt_mutex *lock);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册