提交 c87e2837 编写于 作者: I Ingo Molnar 提交者: Linus Torvalds

[PATCH] pi-futex: futex_lock_pi/futex_unlock_pi support

This adds the actual pi-futex implementation, based on rt-mutexes.

[dino@in.ibm.com: fix an oops-causing race]
Signed-off-by: NIngo Molnar <mingo@elte.hu>
Signed-off-by: NThomas Gleixner <tglx@linutronix.de>
Signed-off-by: NArjan van de Ven <arjan@linux.intel.com>
Signed-off-by: NDinakar Guniguntala <dino@in.ibm.com>
Signed-off-by: NAndrew Morton <akpm@osdl.org>
Signed-off-by: NLinus Torvalds <torvalds@osdl.org>
上级 0cdbee99
...@@ -12,6 +12,9 @@ ...@@ -12,6 +12,9 @@
#define FUTEX_REQUEUE 3 #define FUTEX_REQUEUE 3
#define FUTEX_CMP_REQUEUE 4 #define FUTEX_CMP_REQUEUE 4
#define FUTEX_WAKE_OP 5 #define FUTEX_WAKE_OP 5
#define FUTEX_LOCK_PI 6
#define FUTEX_UNLOCK_PI 7
#define FUTEX_TRYLOCK_PI 8
/* /*
* Support for robust futexes: the kernel cleans up held futexes at * Support for robust futexes: the kernel cleans up held futexes at
...@@ -97,10 +100,14 @@ extern int handle_futex_death(u32 __user *uaddr, struct task_struct *curr); ...@@ -97,10 +100,14 @@ extern int handle_futex_death(u32 __user *uaddr, struct task_struct *curr);
#ifdef CONFIG_FUTEX #ifdef CONFIG_FUTEX
extern void exit_robust_list(struct task_struct *curr); extern void exit_robust_list(struct task_struct *curr);
extern void exit_pi_state_list(struct task_struct *curr);
#else #else
static inline void exit_robust_list(struct task_struct *curr) static inline void exit_robust_list(struct task_struct *curr)
{ {
} }
static inline void exit_pi_state_list(struct task_struct *curr)
{
}
#endif #endif
#define FUTEX_OP_SET 0 /* *(int *)UADDR2 = OPARG; */ #define FUTEX_OP_SET 0 /* *(int *)UADDR2 = OPARG; */
......
...@@ -84,6 +84,7 @@ struct sched_param { ...@@ -84,6 +84,7 @@ struct sched_param {
#include <asm/processor.h> #include <asm/processor.h>
struct exec_domain; struct exec_domain;
struct futex_pi_state;
/* /*
* List of flags we want to share for kernel threads, * List of flags we want to share for kernel threads,
...@@ -915,6 +916,8 @@ struct task_struct { ...@@ -915,6 +916,8 @@ struct task_struct {
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
struct compat_robust_list_head __user *compat_robust_list; struct compat_robust_list_head __user *compat_robust_list;
#endif #endif
struct list_head pi_state_list;
struct futex_pi_state *pi_state_cache;
atomic_t fs_excl; /* holding fs exclusive resources */ atomic_t fs_excl; /* holding fs exclusive resources */
struct rcu_head rcu; struct rcu_head rcu;
......
...@@ -925,6 +925,14 @@ fastcall NORET_TYPE void do_exit(long code) ...@@ -925,6 +925,14 @@ fastcall NORET_TYPE void do_exit(long code)
mpol_free(tsk->mempolicy); mpol_free(tsk->mempolicy);
tsk->mempolicy = NULL; tsk->mempolicy = NULL;
#endif #endif
/*
* This must happen late, after the PID is not
* hashed anymore:
*/
if (unlikely(!list_empty(&tsk->pi_state_list)))
exit_pi_state_list(tsk);
if (unlikely(current->pi_state_cache))
kfree(current->pi_state_cache);
/* /*
* If DEBUG_MUTEXES is on, make sure we are holding no locks: * If DEBUG_MUTEXES is on, make sure we are holding no locks:
*/ */
......
...@@ -1092,6 +1092,9 @@ static task_t *copy_process(unsigned long clone_flags, ...@@ -1092,6 +1092,9 @@ static task_t *copy_process(unsigned long clone_flags,
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
p->compat_robust_list = NULL; p->compat_robust_list = NULL;
#endif #endif
INIT_LIST_HEAD(&p->pi_state_list);
p->pi_state_cache = NULL;
/* /*
* sigaltstack should be cleared when sharing the same VM * sigaltstack should be cleared when sharing the same VM
*/ */
......
此差异已折叠。
...@@ -129,14 +129,19 @@ asmlinkage long compat_sys_futex(u32 __user *uaddr, int op, u32 val, ...@@ -129,14 +129,19 @@ asmlinkage long compat_sys_futex(u32 __user *uaddr, int op, u32 val,
unsigned long timeout = MAX_SCHEDULE_TIMEOUT; unsigned long timeout = MAX_SCHEDULE_TIMEOUT;
int val2 = 0; int val2 = 0;
if (utime && (op == FUTEX_WAIT)) { if (utime && (op == FUTEX_WAIT || op == FUTEX_LOCK_PI)) {
if (get_compat_timespec(&t, utime)) if (get_compat_timespec(&t, utime))
return -EFAULT; return -EFAULT;
if (!timespec_valid(&t)) if (!timespec_valid(&t))
return -EINVAL; return -EINVAL;
timeout = timespec_to_jiffies(&t) + 1; if (op == FUTEX_WAIT)
timeout = timespec_to_jiffies(&t) + 1;
else {
timeout = t.tv_sec;
val2 = t.tv_nsec;
}
} }
if (op >= FUTEX_REQUEUE) if (op == FUTEX_REQUEUE || op == FUTEX_CMP_REQUEUE)
val2 = (int) (unsigned long) utime; val2 = (int) (unsigned long) utime;
return do_futex(uaddr, op, val, timeout, uaddr2, val2, val3); return do_futex(uaddr, op, val, timeout, uaddr2, val2, val3);
......
...@@ -112,4 +112,12 @@ static inline unsigned long rt_mutex_owner_pending(struct rt_mutex *lock) ...@@ -112,4 +112,12 @@ static inline unsigned long rt_mutex_owner_pending(struct rt_mutex *lock)
return (unsigned long)lock->owner & RT_MUTEX_OWNER_PENDING; return (unsigned long)lock->owner & RT_MUTEX_OWNER_PENDING;
} }
/*
* PI-futex support (proxy locking functions, etc.):
*/
extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock);
extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
struct task_struct *proxy_owner);
extern void rt_mutex_proxy_unlock(struct rt_mutex *lock,
struct task_struct *proxy_owner);
#endif #endif
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册