提交 999d9fc1 编写于 作者: O Oleg Nesterov 提交者: Linus Torvalds

coredump: move mm->core_waiters into struct core_state

Move mm->core_waiters into "struct core_state" allocated on stack.  This
shrinks mm_struct a little bit and allows further changes.

This patch mostly does s/core_waiters/core_state.  The only essential
change is that coredump_wait() must clear mm->core_state before return.

The coredump_wait()'s path is uglified and .text grows by 30 bytes, this
is fixed by the next patch.
Signed-off-by: NOleg Nesterov <oleg@tv-sign.ru>
Cc: Roland McGrath <roland@redhat.com>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 32ecb1f2
...@@ -722,12 +722,10 @@ static int exec_mmap(struct mm_struct *mm) ...@@ -722,12 +722,10 @@ static int exec_mmap(struct mm_struct *mm)
* Make sure that if there is a core dump in progress * Make sure that if there is a core dump in progress
* for the old mm, we get out and die instead of going * for the old mm, we get out and die instead of going
* through with the exec. We must hold mmap_sem around * through with the exec. We must hold mmap_sem around
* checking core_waiters and changing tsk->mm. The * checking core_state and changing tsk->mm.
* core-inducing thread will increment core_waiters for
* each thread whose ->mm == old_mm.
*/ */
down_read(&old_mm->mmap_sem); down_read(&old_mm->mmap_sem);
if (unlikely(old_mm->core_waiters)) { if (unlikely(old_mm->core_state)) {
up_read(&old_mm->mmap_sem); up_read(&old_mm->mmap_sem);
return -EINTR; return -EINTR;
} }
...@@ -1514,7 +1512,7 @@ static void zap_process(struct task_struct *start) ...@@ -1514,7 +1512,7 @@ static void zap_process(struct task_struct *start)
t = start; t = start;
do { do {
if (t != current && t->mm) { if (t != current && t->mm) {
t->mm->core_waiters++; t->mm->core_state->nr_threads++;
sigaddset(&t->pending.signal, SIGKILL); sigaddset(&t->pending.signal, SIGKILL);
signal_wake_up(t, 1); signal_wake_up(t, 1);
} }
...@@ -1538,11 +1536,11 @@ static inline int zap_threads(struct task_struct *tsk, struct mm_struct *mm, ...@@ -1538,11 +1536,11 @@ static inline int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
if (err) if (err)
return err; return err;
if (atomic_read(&mm->mm_users) == mm->core_waiters + 1) if (atomic_read(&mm->mm_users) == mm->core_state->nr_threads + 1)
goto done; goto done;
/* /*
* We should find and kill all tasks which use this mm, and we should * We should find and kill all tasks which use this mm, and we should
* count them correctly into mm->core_waiters. We don't take tasklist * count them correctly into ->nr_threads. We don't take tasklist
* lock, but this is safe wrt: * lock, but this is safe wrt:
* *
* fork: * fork:
...@@ -1590,7 +1588,7 @@ static inline int zap_threads(struct task_struct *tsk, struct mm_struct *mm, ...@@ -1590,7 +1588,7 @@ static inline int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
} }
rcu_read_unlock(); rcu_read_unlock();
done: done:
return mm->core_waiters; return mm->core_state->nr_threads;
} }
static int coredump_wait(int exit_code) static int coredump_wait(int exit_code)
...@@ -1603,9 +1601,12 @@ static int coredump_wait(int exit_code) ...@@ -1603,9 +1601,12 @@ static int coredump_wait(int exit_code)
init_completion(&mm->core_done); init_completion(&mm->core_done);
init_completion(&core_state.startup); init_completion(&core_state.startup);
core_state.nr_threads = 0;
mm->core_state = &core_state; mm->core_state = &core_state;
core_waiters = zap_threads(tsk, mm, exit_code); core_waiters = zap_threads(tsk, mm, exit_code);
if (core_waiters < 0)
mm->core_state = NULL;
up_write(&mm->mmap_sem); up_write(&mm->mmap_sem);
if (unlikely(core_waiters < 0)) if (unlikely(core_waiters < 0))
...@@ -1623,8 +1624,8 @@ static int coredump_wait(int exit_code) ...@@ -1623,8 +1624,8 @@ static int coredump_wait(int exit_code)
if (core_waiters) if (core_waiters)
wait_for_completion(&core_state.startup); wait_for_completion(&core_state.startup);
mm->core_state = NULL;
fail: fail:
BUG_ON(mm->core_waiters);
return core_waiters; return core_waiters;
} }
...@@ -1702,7 +1703,7 @@ int do_coredump(long signr, int exit_code, struct pt_regs * regs) ...@@ -1702,7 +1703,7 @@ int do_coredump(long signr, int exit_code, struct pt_regs * regs)
/* /*
* If another thread got here first, or we are not dumpable, bail out. * If another thread got here first, or we are not dumpable, bail out.
*/ */
if (mm->core_waiters || !get_dumpable(mm)) { if (mm->core_state || !get_dumpable(mm)) {
up_write(&mm->mmap_sem); up_write(&mm->mmap_sem);
goto fail; goto fail;
} }
......
...@@ -160,6 +160,7 @@ struct vm_area_struct { ...@@ -160,6 +160,7 @@ struct vm_area_struct {
}; };
struct core_state { struct core_state {
int nr_threads;
struct completion startup; struct completion startup;
}; };
...@@ -179,7 +180,6 @@ struct mm_struct { ...@@ -179,7 +180,6 @@ struct mm_struct {
atomic_t mm_users; /* How many users with user space? */ atomic_t mm_users; /* How many users with user space? */
atomic_t mm_count; /* How many references to "struct mm_struct" (users count as 1) */ atomic_t mm_count; /* How many references to "struct mm_struct" (users count as 1) */
int map_count; /* number of VMAs */ int map_count; /* number of VMAs */
int core_waiters;
struct rw_semaphore mmap_sem; struct rw_semaphore mmap_sem;
spinlock_t page_table_lock; /* Protects page tables and some counters */ spinlock_t page_table_lock; /* Protects page tables and some counters */
......
...@@ -670,16 +670,16 @@ static void exit_mm(struct task_struct * tsk) ...@@ -670,16 +670,16 @@ static void exit_mm(struct task_struct * tsk)
return; return;
/* /*
* Serialize with any possible pending coredump. * Serialize with any possible pending coredump.
* We must hold mmap_sem around checking core_waiters * We must hold mmap_sem around checking core_state
* and clearing tsk->mm. The core-inducing thread * and clearing tsk->mm. The core-inducing thread
* will increment core_waiters for each thread in the * will increment ->nr_threads for each thread in the
* group with ->mm != NULL. * group with ->mm != NULL.
*/ */
down_read(&mm->mmap_sem); down_read(&mm->mmap_sem);
if (mm->core_waiters) { if (mm->core_state) {
up_read(&mm->mmap_sem); up_read(&mm->mmap_sem);
down_write(&mm->mmap_sem); down_write(&mm->mmap_sem);
if (!--mm->core_waiters) if (!--mm->core_state->nr_threads)
complete(&mm->core_state->startup); complete(&mm->core_state->startup);
up_write(&mm->mmap_sem); up_write(&mm->mmap_sem);
......
...@@ -400,7 +400,7 @@ static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p) ...@@ -400,7 +400,7 @@ static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p)
INIT_LIST_HEAD(&mm->mmlist); INIT_LIST_HEAD(&mm->mmlist);
mm->flags = (current->mm) ? current->mm->flags mm->flags = (current->mm) ? current->mm->flags
: MMF_DUMP_FILTER_DEFAULT; : MMF_DUMP_FILTER_DEFAULT;
mm->core_waiters = 0; mm->core_state = NULL;
mm->nr_ptes = 0; mm->nr_ptes = 0;
set_mm_counter(mm, file_rss, 0); set_mm_counter(mm, file_rss, 0);
set_mm_counter(mm, anon_rss, 0); set_mm_counter(mm, anon_rss, 0);
......
...@@ -1480,10 +1480,10 @@ static inline int may_ptrace_stop(void) ...@@ -1480,10 +1480,10 @@ static inline int may_ptrace_stop(void)
* is a deadlock situation, and pointless because our tracer * is a deadlock situation, and pointless because our tracer
* is dead so don't allow us to stop. * is dead so don't allow us to stop.
* If SIGKILL was already sent before the caller unlocked * If SIGKILL was already sent before the caller unlocked
* ->siglock we must see ->core_waiters != 0. Otherwise it * ->siglock we must see ->core_state != NULL. Otherwise it
* is safe to enter schedule(). * is safe to enter schedule().
*/ */
if (unlikely(current->mm->core_waiters) && if (unlikely(current->mm->core_state) &&
unlikely(current->mm == current->parent->mm)) unlikely(current->mm == current->parent->mm))
return 0; return 0;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册