提交 16e95196 编写于 作者: J Johannes Weiner 提交者: Linus Torvalds

mm: oom_kill: clean up victim marking and exiting interfaces

Rename unmark_oom_victim() to exit_oom_victim().  Marking and unmarking
are related in functionality, but the interface is not symmetrical at
all: one is an internal OOM killer function used during the killing, the
other is for an OOM victim to signal its own death on exit later on.
This has locking implications, see follow-up changes.

While at it, rename mark_tsk_oom_victim() to mark_oom_victim(), which
is easier on the eye.
Signed-off-by: NJohannes Weiner <hannes@cmpxchg.org>
Acked-by: NDavid Rientjes <rientjes@google.com>
Acked-by: NMichal Hocko <mhocko@suse.cz>
Cc: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 3f5ab8cf
...@@ -165,7 +165,7 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc) ...@@ -165,7 +165,7 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)
* infrastructure. There is no real reason why the selected * infrastructure. There is no real reason why the selected
* task should have access to the memory reserves. * task should have access to the memory reserves.
*/ */
mark_tsk_oom_victim(selected); mark_oom_victim(selected);
send_sig(SIGKILL, selected, 0); send_sig(SIGKILL, selected, 0);
rem += selected_tasksize; rem += selected_tasksize;
} }
......
...@@ -47,9 +47,7 @@ static inline bool oom_task_origin(const struct task_struct *p) ...@@ -47,9 +47,7 @@ static inline bool oom_task_origin(const struct task_struct *p)
return !!(p->signal->oom_flags & OOM_FLAG_ORIGIN); return !!(p->signal->oom_flags & OOM_FLAG_ORIGIN);
} }
extern void mark_tsk_oom_victim(struct task_struct *tsk); extern void mark_oom_victim(struct task_struct *tsk);
extern void unmark_oom_victim(void);
extern unsigned long oom_badness(struct task_struct *p, extern unsigned long oom_badness(struct task_struct *p,
struct mem_cgroup *memcg, const nodemask_t *nodemask, struct mem_cgroup *memcg, const nodemask_t *nodemask,
...@@ -75,6 +73,9 @@ extern enum oom_scan_t oom_scan_process_thread(struct task_struct *task, ...@@ -75,6 +73,9 @@ extern enum oom_scan_t oom_scan_process_thread(struct task_struct *task,
extern bool out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, extern bool out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
int order, nodemask_t *mask, bool force_kill); int order, nodemask_t *mask, bool force_kill);
extern void exit_oom_victim(void);
extern int register_oom_notifier(struct notifier_block *nb); extern int register_oom_notifier(struct notifier_block *nb);
extern int unregister_oom_notifier(struct notifier_block *nb); extern int unregister_oom_notifier(struct notifier_block *nb);
......
...@@ -436,7 +436,7 @@ static void exit_mm(struct task_struct *tsk) ...@@ -436,7 +436,7 @@ static void exit_mm(struct task_struct *tsk)
mm_update_next_owner(mm); mm_update_next_owner(mm);
mmput(mm); mmput(mm);
if (test_thread_flag(TIF_MEMDIE)) if (test_thread_flag(TIF_MEMDIE))
unmark_oom_victim(); exit_oom_victim();
} }
static struct task_struct *find_alive_thread(struct task_struct *p) static struct task_struct *find_alive_thread(struct task_struct *p)
......
...@@ -1536,7 +1536,7 @@ static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, ...@@ -1536,7 +1536,7 @@ static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
* quickly exit and free its memory. * quickly exit and free its memory.
*/ */
if (fatal_signal_pending(current) || task_will_free_mem(current)) { if (fatal_signal_pending(current) || task_will_free_mem(current)) {
mark_tsk_oom_victim(current); mark_oom_victim(current);
return; return;
} }
......
...@@ -408,13 +408,13 @@ bool oom_killer_disabled __read_mostly; ...@@ -408,13 +408,13 @@ bool oom_killer_disabled __read_mostly;
static DECLARE_RWSEM(oom_sem); static DECLARE_RWSEM(oom_sem);
/** /**
* mark_tsk_oom_victim - marks the given task as OOM victim. * mark_oom_victim - mark the given task as OOM victim
* @tsk: task to mark * @tsk: task to mark
* *
* Has to be called with oom_sem taken for read and never after * Has to be called with oom_sem taken for read and never after
* oom has been disabled already. * oom has been disabled already.
*/ */
void mark_tsk_oom_victim(struct task_struct *tsk) void mark_oom_victim(struct task_struct *tsk)
{ {
WARN_ON(oom_killer_disabled); WARN_ON(oom_killer_disabled);
/* OOM killer might race with memcg OOM */ /* OOM killer might race with memcg OOM */
...@@ -431,11 +431,9 @@ void mark_tsk_oom_victim(struct task_struct *tsk) ...@@ -431,11 +431,9 @@ void mark_tsk_oom_victim(struct task_struct *tsk)
} }
/** /**
* unmark_oom_victim - unmarks the current task as OOM victim. * exit_oom_victim - note the exit of an OOM victim
*
* Wakes up all waiters in oom_killer_disable()
*/ */
void unmark_oom_victim(void) void exit_oom_victim(void)
{ {
if (!test_and_clear_thread_flag(TIF_MEMDIE)) if (!test_and_clear_thread_flag(TIF_MEMDIE))
return; return;
...@@ -515,7 +513,7 @@ void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, ...@@ -515,7 +513,7 @@ void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
*/ */
task_lock(p); task_lock(p);
if (p->mm && task_will_free_mem(p)) { if (p->mm && task_will_free_mem(p)) {
mark_tsk_oom_victim(p); mark_oom_victim(p);
task_unlock(p); task_unlock(p);
put_task_struct(p); put_task_struct(p);
return; return;
...@@ -570,7 +568,7 @@ void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, ...@@ -570,7 +568,7 @@ void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
/* mm cannot safely be dereferenced after task_unlock(victim) */ /* mm cannot safely be dereferenced after task_unlock(victim) */
mm = victim->mm; mm = victim->mm;
mark_tsk_oom_victim(victim); mark_oom_victim(victim);
pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB\n", pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB\n",
task_pid_nr(victim), victim->comm, K(victim->mm->total_vm), task_pid_nr(victim), victim->comm, K(victim->mm->total_vm),
K(get_mm_counter(victim->mm, MM_ANONPAGES)), K(get_mm_counter(victim->mm, MM_ANONPAGES)),
...@@ -728,7 +726,7 @@ static void __out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, ...@@ -728,7 +726,7 @@ static void __out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
*/ */
if (current->mm && if (current->mm &&
(fatal_signal_pending(current) || task_will_free_mem(current))) { (fatal_signal_pending(current) || task_will_free_mem(current))) {
mark_tsk_oom_victim(current); mark_oom_victim(current);
return; return;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册