提交 36cf3b5c 编写于 作者: T Thomas Gleixner 提交者: Linus Torvalds

FUTEX: Tidy up the code

The recent PRIVATE and REQUEUE_PI changes to the futex code made it hard to
read.  Tidy it up.
Signed-off-by: NThomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@elte.hu>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 0746aec3
......@@ -120,6 +120,24 @@ static struct futex_hash_bucket futex_queues[1<<FUTEX_HASHBITS];
/* Futex-fs vfsmount entry: */
static struct vfsmount *futex_mnt;
/*
* Take mm->mmap_sem, when futex is shared
*/
static inline void futex_lock_mm(struct rw_semaphore *fshared)
{
if (fshared)
down_read(fshared);
}
/*
* Release mm->mmap_sem, when the futex is shared
*/
static inline void futex_unlock_mm(struct rw_semaphore *fshared)
{
if (fshared)
up_read(fshared);
}
/*
* We hash on the keys returned from get_futex_key (see below).
*/
......@@ -287,7 +305,18 @@ void drop_futex_key_refs(union futex_key *key)
}
EXPORT_SYMBOL_GPL(drop_futex_key_refs);
static inline int get_futex_value_locked(u32 *dest, u32 __user *from)
static u32 cmpxchg_futex_value_locked(u32 __user *uaddr, u32 uval, u32 newval)
{
u32 curval;
pagefault_disable();
curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval);
pagefault_enable();
return curval;
}
static int get_futex_value_locked(u32 *dest, u32 __user *from)
{
int ret;
......@@ -620,9 +649,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
newval = FUTEX_WAITERS | new_owner->pid;
pagefault_disable();
curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval);
pagefault_enable();
curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
if (curval == -EFAULT)
ret = -EFAULT;
......@@ -659,9 +686,7 @@ static int unlock_futex_pi(u32 __user *uaddr, u32 uval)
* There is no waiter, so we unlock the futex. The owner died
* bit has not to be preserved here. We are the owner:
*/
pagefault_disable();
oldval = futex_atomic_cmpxchg_inatomic(uaddr, uval, 0);
pagefault_enable();
oldval = cmpxchg_futex_value_locked(uaddr, uval, 0);
if (oldval == -EFAULT)
return oldval;
......@@ -700,8 +725,7 @@ static int futex_wake(u32 __user *uaddr, struct rw_semaphore *fshared,
union futex_key key;
int ret;
if (fshared)
down_read(fshared);
futex_lock_mm(fshared);
ret = get_futex_key(uaddr, fshared, &key);
if (unlikely(ret != 0))
......@@ -725,8 +749,7 @@ static int futex_wake(u32 __user *uaddr, struct rw_semaphore *fshared,
spin_unlock(&hb->lock);
out:
if (fshared)
up_read(fshared);
futex_unlock_mm(fshared);
return ret;
}
......@@ -746,8 +769,7 @@ futex_wake_op(u32 __user *uaddr1, struct rw_semaphore *fshared,
int ret, op_ret, attempt = 0;
retryfull:
if (fshared)
down_read(fshared);
futex_lock_mm(fshared);
ret = get_futex_key(uaddr1, fshared, &key1);
if (unlikely(ret != 0))
......@@ -793,7 +815,7 @@ futex_wake_op(u32 __user *uaddr1, struct rw_semaphore *fshared,
*/
if (attempt++) {
ret = futex_handle_fault((unsigned long)uaddr2,
fshared, attempt);
fshared, attempt);
if (ret)
goto out;
goto retry;
......@@ -803,8 +825,7 @@ futex_wake_op(u32 __user *uaddr1, struct rw_semaphore *fshared,
* If we would have faulted, release mmap_sem,
* fault it in and start all over again.
*/
if (fshared)
up_read(fshared);
futex_unlock_mm(fshared);
ret = get_user(dummy, uaddr2);
if (ret)
......@@ -841,8 +862,8 @@ futex_wake_op(u32 __user *uaddr1, struct rw_semaphore *fshared,
if (hb1 != hb2)
spin_unlock(&hb2->lock);
out:
if (fshared)
up_read(fshared);
futex_unlock_mm(fshared);
return ret;
}
......@@ -861,8 +882,7 @@ static int futex_requeue(u32 __user *uaddr1, struct rw_semaphore *fshared,
int ret, drop_count = 0;
retry:
if (fshared)
down_read(fshared);
futex_lock_mm(fshared);
ret = get_futex_key(uaddr1, fshared, &key1);
if (unlikely(ret != 0))
......@@ -890,8 +910,7 @@ static int futex_requeue(u32 __user *uaddr1, struct rw_semaphore *fshared,
* If we would have faulted, release mmap_sem, fault
* it in and start all over again.
*/
if (fshared)
up_read(fshared);
futex_unlock_mm(fshared);
ret = get_user(curval, uaddr1);
......@@ -944,8 +963,7 @@ static int futex_requeue(u32 __user *uaddr1, struct rw_semaphore *fshared,
drop_futex_key_refs(&key1);
out:
if (fshared)
up_read(fshared);
futex_unlock_mm(fshared);
return ret;
}
......@@ -1113,10 +1131,7 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
while (!ret) {
newval = (uval & FUTEX_OWNER_DIED) | newtid;
pagefault_disable();
curval = futex_atomic_cmpxchg_inatomic(uaddr,
uval, newval);
pagefault_enable();
curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
if (curval == -EFAULT)
ret = -EFAULT;
......@@ -1134,6 +1149,7 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
#define ARG3_SHARED 1
static long futex_wait_restart(struct restart_block *restart);
static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
u32 val, ktime_t *abs_time)
{
......@@ -1148,8 +1164,7 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
q.pi_state = NULL;
retry:
if (fshared)
down_read(fshared);
futex_lock_mm(fshared);
ret = get_futex_key(uaddr, fshared, &q.key);
if (unlikely(ret != 0))
......@@ -1186,8 +1201,7 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
* If we would have faulted, release mmap_sem, fault it in and
* start all over again.
*/
if (fshared)
up_read(fshared);
futex_unlock_mm(fshared);
ret = get_user(uval, uaddr);
......@@ -1206,8 +1220,7 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
* Now the futex is queued and we have checked the data, we
* don't want to hold mmap_sem while we sleep.
*/
if (fshared)
up_read(fshared);
futex_unlock_mm(fshared);
/*
* There might have been scheduling since the queue_me(), as we
......@@ -1285,8 +1298,7 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
queue_unlock(&q, hb);
out_release_sem:
if (fshared)
up_read(fshared);
futex_unlock_mm(fshared);
return ret;
}
......@@ -1333,8 +1345,7 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
q.pi_state = NULL;
retry:
if (fshared)
down_read(fshared);
futex_lock_mm(fshared);
ret = get_futex_key(uaddr, fshared, &q.key);
if (unlikely(ret != 0))
......@@ -1353,9 +1364,7 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
*/
newval = current->pid;
pagefault_disable();
curval = futex_atomic_cmpxchg_inatomic(uaddr, 0, newval);
pagefault_enable();
curval = cmpxchg_futex_value_locked(uaddr, 0, newval);
if (unlikely(curval == -EFAULT))
goto uaddr_faulted;
......@@ -1398,9 +1407,7 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
lock_taken = 1;
}
pagefault_disable();
curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval);
pagefault_enable();
curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
if (unlikely(curval == -EFAULT))
goto uaddr_faulted;
......@@ -1428,8 +1435,7 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
* exit to complete.
*/
queue_unlock(&q, hb);
if (fshared)
up_read(fshared);
futex_unlock_mm(fshared);
cond_resched();
goto retry;
......@@ -1465,8 +1471,7 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
* Now the futex is queued and we have checked the data, we
* don't want to hold mmap_sem while we sleep.
*/
if (fshared)
up_read(fshared);
futex_unlock_mm(fshared);
WARN_ON(!q.pi_state);
/*
......@@ -1480,8 +1485,7 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
ret = ret ? 0 : -EWOULDBLOCK;
}
if (fshared)
down_read(fshared);
futex_lock_mm(fshared);
spin_lock(q.lock_ptr);
if (!ret) {
......@@ -1518,8 +1522,7 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
/* Unqueue and drop the lock */
unqueue_me_pi(&q);
if (fshared)
up_read(fshared);
futex_unlock_mm(fshared);
return ret != -EINTR ? ret : -ERESTARTNOINTR;
......@@ -1527,8 +1530,7 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
queue_unlock(&q, hb);
out_release_sem:
if (fshared)
up_read(fshared);
futex_unlock_mm(fshared);
return ret;
uaddr_faulted:
......@@ -1550,8 +1552,7 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
goto retry_unlocked;
}
if (fshared)
up_read(fshared);
futex_unlock_mm(fshared);
ret = get_user(uval, uaddr);
if (!ret && (uval != -EFAULT))
......@@ -1585,8 +1586,7 @@ static int futex_unlock_pi(u32 __user *uaddr, struct rw_semaphore *fshared)
/*
* First take all the futex related locks:
*/
if (fshared)
down_read(fshared);
futex_lock_mm(fshared);
ret = get_futex_key(uaddr, fshared, &key);
if (unlikely(ret != 0))
......@@ -1601,11 +1601,9 @@ static int futex_unlock_pi(u32 __user *uaddr, struct rw_semaphore *fshared)
* again. If it succeeds then we can return without waking
* anyone else up:
*/
if (!(uval & FUTEX_OWNER_DIED)) {
pagefault_disable();
uval = futex_atomic_cmpxchg_inatomic(uaddr, current->pid, 0);
pagefault_enable();
}
if (!(uval & FUTEX_OWNER_DIED))
uval = cmpxchg_futex_value_locked(uaddr, current->pid, 0);
if (unlikely(uval == -EFAULT))
goto pi_faulted;
......@@ -1647,8 +1645,7 @@ static int futex_unlock_pi(u32 __user *uaddr, struct rw_semaphore *fshared)
out_unlock:
spin_unlock(&hb->lock);
out:
if (fshared)
up_read(fshared);
futex_unlock_mm(fshared);
return ret;
......@@ -1671,8 +1668,7 @@ static int futex_unlock_pi(u32 __user *uaddr, struct rw_semaphore *fshared)
goto retry_unlocked;
}
if (fshared)
up_read(fshared);
futex_unlock_mm(fshared);
ret = get_user(uval, uaddr);
if (!ret && (uval != -EFAULT))
......@@ -1729,8 +1725,8 @@ static int futex_fd(u32 __user *uaddr, int signal)
if (printk_timed_ratelimit(&printk_interval, 60 * 60 * 1000)) {
printk(KERN_WARNING "Process `%s' used FUTEX_FD, which "
"will be removed from the kernel in June 2007\n",
current->comm);
"will be removed from the kernel in June 2007\n",
current->comm);
}
ret = -EINVAL;
......@@ -1908,10 +1904,8 @@ int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
* Wake robust non-PI futexes here. The wakeup of
* PI futexes happens in exit_pi_state():
*/
if (!pi) {
if (uval & FUTEX_WAITERS)
if (!pi && (uval & FUTEX_WAITERS))
futex_wake(uaddr, &curr->mm->mmap_sem, 1);
}
}
return 0;
}
......
......@@ -29,12 +29,6 @@
#include "rtmutex_common.h"
#ifdef CONFIG_DEBUG_RT_MUTEXES
# include "rtmutex-debug.h"
#else
# include "rtmutex.h"
#endif
# define TRACE_WARN_ON(x) WARN_ON(x)
# define TRACE_BUG_ON(x) BUG_ON(x)
......
......@@ -17,12 +17,6 @@
#include "rtmutex_common.h"
#ifdef CONFIG_DEBUG_RT_MUTEXES
# include "rtmutex-debug.h"
#else
# include "rtmutex.h"
#endif
/*
* lock->owner state tracking:
*
......
......@@ -103,7 +103,7 @@ static inline struct task_struct *rt_mutex_owner(struct rt_mutex *lock)
static inline struct task_struct *rt_mutex_real_owner(struct rt_mutex *lock)
{
return (struct task_struct *)
return (struct task_struct *)
((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS);
}
......@@ -120,4 +120,11 @@ extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
struct task_struct *proxy_owner);
extern void rt_mutex_proxy_unlock(struct rt_mutex *lock,
struct task_struct *proxy_owner);
#ifdef CONFIG_DEBUG_RT_MUTEXES
# include "rtmutex-debug.h"
#else
# include "rtmutex.h"
#endif
#endif
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册