提交 6b3934ef 编写于 作者: O Oleg Nesterov 提交者: Linus Torvalds

[PATCH] copy_process: cleanup bad_fork_cleanup_signal

__exit_signal() does important cleanups atomically under ->siglock.  It is
also called from copy_process's error path.  This is not good, for example we
can't move __unhash_process() under ->siglock for that reason.

We should not mix these 2 paths, just look at ugly 'if (p->sighand)' under
'bad_fork_cleanup_sighand:' label.  For copy_process() case it is sufficient
to just backout copy_signal(), nothing more.

Again, nobody can see this task yet.  For CLONE_THREAD case we just decrement
signal->count, otherwise nobody can see this ->signal and we can free it
lockless.

This patch assumes it is safe to do exit_thread_group_keys() without
tasklist_lock.
Signed-off-by: NOleg Nesterov <oleg@tv-sign.ru>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
Acked-by: NDavid Howells <dhowells@redhat.com>
Signed-off-by: NAdrian Bunk <bunk@stusta.de>
Signed-off-by: NAndrew Morton <akpm@osdl.org>
Signed-off-by: NLinus Torvalds <torvalds@osdl.org>
上级 7001510d
......@@ -1149,7 +1149,7 @@ extern void flush_thread(void);
extern void exit_thread(void);
extern void exit_files(struct task_struct *);
extern void exit_signal(struct task_struct *);
extern void __cleanup_signal(struct signal_struct *);
extern void __exit_signal(struct task_struct *);
extern void __exit_sighand(struct task_struct *);
extern void exit_itimers(struct signal_struct *);
......
......@@ -210,7 +210,6 @@ extern kmem_cache_t *names_cachep;
extern kmem_cache_t *files_cachep;
extern kmem_cache_t *filp_cachep;
extern kmem_cache_t *fs_cachep;
extern kmem_cache_t *signal_cachep;
extern kmem_cache_t *sighand_cachep;
extern kmem_cache_t *bio_cachep;
......
......@@ -84,7 +84,7 @@ static kmem_cache_t *task_struct_cachep;
#endif
/* SLAB cache for signal_struct structures (tsk->signal) */
kmem_cache_t *signal_cachep;
static kmem_cache_t *signal_cachep;
/* SLAB cache for sighand_struct structures (tsk->sighand) */
kmem_cache_t *sighand_cachep;
......@@ -872,6 +872,22 @@ static inline int copy_signal(unsigned long clone_flags, struct task_struct * ts
return 0;
}
void __cleanup_signal(struct signal_struct *sig)
{
exit_thread_group_keys(sig);
kmem_cache_free(signal_cachep, sig);
}
static inline void cleanup_signal(struct task_struct *tsk)
{
struct signal_struct *sig = tsk->signal;
atomic_dec(&sig->live);
if (atomic_dec_and_test(&sig->count))
__cleanup_signal(sig);
}
static inline void copy_flags(unsigned long clone_flags, struct task_struct *p)
{
unsigned long new_flags = p->flags;
......@@ -1206,10 +1222,9 @@ static task_t *copy_process(unsigned long clone_flags,
if (p->mm)
mmput(p->mm);
bad_fork_cleanup_signal:
exit_signal(p);
cleanup_signal(p);
bad_fork_cleanup_sighand:
if (p->sighand)
__exit_sighand(p);
__exit_sighand(p);
bad_fork_cleanup_fs:
exit_fs(p); /* blocking */
bad_fork_cleanup_files:
......
......@@ -395,23 +395,10 @@ void __exit_signal(struct task_struct *tsk)
clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
flush_sigqueue(&tsk->pending);
if (sig) {
/*
* We are cleaning up the signal_struct here.
*/
exit_thread_group_keys(sig);
kmem_cache_free(signal_cachep, sig);
__cleanup_signal(sig);
}
}
void exit_signal(struct task_struct *tsk)
{
atomic_dec(&tsk->signal->live);
write_lock_irq(&tasklist_lock);
__exit_signal(tsk);
write_unlock_irq(&tasklist_lock);
}
/*
* Flush all handlers for a task.
*/
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册