diff --git a/kernel/futex.c b/kernel/futex.c index 6eab24764f2834ea82a8fc28f919fd308812cda2..21625cb3e865c4f8277b04f989e017439196aab3 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -1493,11 +1493,11 @@ static void mark_wake_futex(struct wake_q_head *wake_q, struct futex_q *q) */ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_state) { - u32 curval, newval; struct rt_mutex_waiter *top_waiter; struct task_struct *new_owner; bool postunlock = false; - DEFINE_WAKE_Q(wake_q); + DEFINE_RT_WAKE_Q(wqh); + u32 curval, newval; int ret = 0; top_waiter = rt_mutex_top_waiter(&pi_state->pi_mutex); @@ -1549,14 +1549,14 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_ * not fail. */ pi_state_update_owner(pi_state, new_owner); - postunlock = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q); + postunlock = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wqh); } out_unlock: raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); if (postunlock) - rt_mutex_postunlock(&wake_q); + rt_mutex_postunlock(&wqh); return ret; } diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index 35f7685d7460e79ca25a3bfc7256ec7545710734..5f0d0725ca32787c7527ccbba5a1356e8c5b9683 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -1017,7 +1017,7 @@ static int __sched task_blocks_on_rt_mutex(struct rt_mutex_base *lock, * * Called with lock->wait_lock held and interrupts disabled. */ -static void __sched mark_wakeup_next_waiter(struct wake_q_head *wake_q, +static void __sched mark_wakeup_next_waiter(struct rt_wake_q_head *wqh, struct rt_mutex_base *lock) { struct rt_mutex_waiter *waiter; @@ -1054,10 +1054,10 @@ static void __sched mark_wakeup_next_waiter(struct wake_q_head *wake_q, * deboost but before waking our donor task, hence the preempt_disable() * before unlock. * - * Pairs with preempt_enable() in rt_mutex_postunlock(); + * Pairs with preempt_enable() in rt_mutex_wake_up_q(); */ preempt_disable(); - wake_q_add(wake_q, waiter->task); + rt_mutex_wake_q_add(wqh, waiter); raw_spin_unlock(¤t->pi_lock); } @@ -1328,7 +1328,7 @@ static __always_inline int __rt_mutex_trylock(struct rt_mutex_base *lock) */ static void __sched rt_mutex_slowunlock(struct rt_mutex_base *lock) { - DEFINE_WAKE_Q(wake_q); + DEFINE_RT_WAKE_Q(wqh); unsigned long flags; /* irqsave required to support early boot calls */ @@ -1381,10 +1381,10 @@ static void __sched rt_mutex_slowunlock(struct rt_mutex_base *lock) * * Queue the next waiter for wakeup once we release the wait_lock. */ - mark_wakeup_next_waiter(&wake_q, lock); + mark_wakeup_next_waiter(&wqh, lock); raw_spin_unlock_irqrestore(&lock->wait_lock, flags); - rt_mutex_postunlock(&wake_q); + rt_mutex_wake_up_q(&wqh); } static __always_inline void __rt_mutex_unlock(struct rt_mutex_base *lock) diff --git a/kernel/locking/rtmutex_api.c b/kernel/locking/rtmutex_api.c index c5136f4998bbb7f633ebdcefbda5ec0fbd468382..56403dc5c2fc1d9528f58e0d6b597e2b030731c9 100644 --- a/kernel/locking/rtmutex_api.c +++ b/kernel/locking/rtmutex_api.c @@ -137,10 +137,10 @@ int __sched __rt_mutex_futex_trylock(struct rt_mutex_base *lock) * do not use the fast-path, can be simple and will not need to retry. * * @lock: The rt_mutex to be unlocked - * @wake_q: The wake queue head from which to get the next lock waiter + * @wqh: The wake queue head from which to get the next lock waiter */ bool __sched __rt_mutex_futex_unlock(struct rt_mutex_base *lock, - struct wake_q_head *wake_q) + struct rt_wake_q_head *wqh) { lockdep_assert_held(&lock->wait_lock); @@ -157,23 +157,23 @@ bool __sched __rt_mutex_futex_unlock(struct rt_mutex_base *lock, * avoid inversion prior to the wakeup. preempt_disable() * therein pairs with rt_mutex_postunlock(). */ - mark_wakeup_next_waiter(wake_q, lock); + mark_wakeup_next_waiter(wqh, lock); return true; /* call postunlock() */ } void __sched rt_mutex_futex_unlock(struct rt_mutex_base *lock) { - DEFINE_WAKE_Q(wake_q); + DEFINE_RT_WAKE_Q(wqh); unsigned long flags; bool postunlock; raw_spin_lock_irqsave(&lock->wait_lock, flags); - postunlock = __rt_mutex_futex_unlock(lock, &wake_q); + postunlock = __rt_mutex_futex_unlock(lock, &wqh); raw_spin_unlock_irqrestore(&lock->wait_lock, flags); if (postunlock) - rt_mutex_postunlock(&wake_q); + rt_mutex_postunlock(&wqh); } /** @@ -441,12 +441,9 @@ void __sched rt_mutex_adjust_pi(struct task_struct *task) /* * Performs the wakeup of the top-waiter and re-enables preemption. */ -void __sched rt_mutex_postunlock(struct wake_q_head *wake_q) +void __sched rt_mutex_postunlock(struct rt_wake_q_head *wqh) { - wake_up_q(wake_q); - - /* Pairs with preempt_disable() in mark_wakeup_next_waiter() */ - preempt_enable(); + rt_mutex_wake_up_q(wqh); } #ifdef CONFIG_DEBUG_RT_MUTEXES diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h index 9e2f1dbf6482916799b14a6d13604adec048eec7..ff36316003d87583df667e53bef747c951b9f417 100644 --- a/kernel/locking/rtmutex_common.h +++ b/kernel/locking/rtmutex_common.h @@ -76,9 +76,9 @@ extern int __rt_mutex_futex_trylock(struct rt_mutex_base *l); extern void rt_mutex_futex_unlock(struct rt_mutex_base *lock); extern bool __rt_mutex_futex_unlock(struct rt_mutex_base *lock, - struct wake_q_head *wake_q); + struct rt_wake_q_head *wqh); -extern void rt_mutex_postunlock(struct wake_q_head *wake_q); +extern void rt_mutex_postunlock(struct rt_wake_q_head *wqh); /* * Must be guarded because this header is included from rcu/tree_plugin.h