提交 9b0f8b04 编写于 作者: C Christoph Lameter 提交者: Linus Torvalds

[PATCH] Terminate process that fails on a constrained allocation

Some allocations are restricted to a limited set of nodes (due to memory
policies or cpuset constraints).  If the page allocator is not able to find
enough memory then that does not mean that overall system memory is low.

In particular going postal and more or less randomly shooting at processes
is not likely going to help the situation but may just lead to suicide (the
whole system coming down).

It is better to signal to the process that no memory exists given the
constraints that the process (or the configuration of the process) has
placed on the allocation behavior.  The process may be killed but then the
sysadmin or developer can investigate the situation.  The solution is
similar to what we do when running out of hugepages.

This patch adds a check before we kill processes.  At that point
performance considerations do not matter much so we just scan the zonelist
and reconstruct a list of nodes.  If the list of nodes does not contain all
online nodes then this is a constrained allocation and we should kill the
current process.
Signed-off-by: NChristoph Lameter <clameter@sgi.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: Andi Kleen <ak@muc.de>
Signed-off-by: NAndrew Morton <akpm@osdl.org>
Signed-off-by: NLinus Torvalds <torvalds@osdl.org>
上级 9827b781
...@@ -243,7 +243,7 @@ static struct sysrq_key_op sysrq_term_op = { ...@@ -243,7 +243,7 @@ static struct sysrq_key_op sysrq_term_op = {
static void moom_callback(void *ignored) static void moom_callback(void *ignored)
{ {
out_of_memory(GFP_KERNEL, 0); out_of_memory(&NODE_DATA(0)->node_zonelists[ZONE_NORMAL], GFP_KERNEL, 0);
} }
static DECLARE_WORK(moom_work, moom_callback, NULL); static DECLARE_WORK(moom_work, moom_callback, NULL);
......
...@@ -147,7 +147,7 @@ struct swap_list_t { ...@@ -147,7 +147,7 @@ struct swap_list_t {
#define vm_swap_full() (nr_swap_pages*2 < total_swap_pages) #define vm_swap_full() (nr_swap_pages*2 < total_swap_pages)
/* linux/mm/oom_kill.c */ /* linux/mm/oom_kill.c */
extern void out_of_memory(gfp_t gfp_mask, int order); extern void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order);
/* linux/mm/memory.c */ /* linux/mm/memory.c */
extern void swapin_readahead(swp_entry_t, unsigned long, struct vm_area_struct *); extern void swapin_readahead(swp_entry_t, unsigned long, struct vm_area_struct *);
......
...@@ -132,6 +132,36 @@ unsigned long badness(struct task_struct *p, unsigned long uptime) ...@@ -132,6 +132,36 @@ unsigned long badness(struct task_struct *p, unsigned long uptime)
return points; return points;
} }
/*
* Types of limitations to the nodes from which allocations may occur
*/
#define CONSTRAINT_NONE 1
#define CONSTRAINT_MEMORY_POLICY 2
#define CONSTRAINT_CPUSET 3
/*
* Determine the type of allocation constraint.
*/
static inline int constrained_alloc(struct zonelist *zonelist, gfp_t gfp_mask)
{
#ifdef CONFIG_NUMA
struct zone **z;
nodemask_t nodes = node_online_map;
for (z = zonelist->zones; *z; z++)
if (cpuset_zone_allowed(*z, gfp_mask))
node_clear((*z)->zone_pgdat->node_id,
nodes);
else
return CONSTRAINT_CPUSET;
if (!nodes_empty(nodes))
return CONSTRAINT_MEMORY_POLICY;
#endif
return CONSTRAINT_NONE;
}
/* /*
* Simple selection loop. We chose the process with the highest * Simple selection loop. We chose the process with the highest
* number of 'points'. We expect the caller will lock the tasklist. * number of 'points'. We expect the caller will lock the tasklist.
...@@ -184,7 +214,7 @@ static struct task_struct *select_bad_process(unsigned long *ppoints) ...@@ -184,7 +214,7 @@ static struct task_struct *select_bad_process(unsigned long *ppoints)
* CAP_SYS_RAW_IO set, send SIGTERM instead (but it's unlikely that * CAP_SYS_RAW_IO set, send SIGTERM instead (but it's unlikely that
* we select a process with CAP_SYS_RAW_IO set). * we select a process with CAP_SYS_RAW_IO set).
*/ */
static void __oom_kill_task(task_t *p) static void __oom_kill_task(task_t *p, const char *message)
{ {
if (p->pid == 1) { if (p->pid == 1) {
WARN_ON(1); WARN_ON(1);
...@@ -200,8 +230,8 @@ static void __oom_kill_task(task_t *p) ...@@ -200,8 +230,8 @@ static void __oom_kill_task(task_t *p)
return; return;
} }
task_unlock(p); task_unlock(p);
printk(KERN_ERR "Out of Memory: Killed process %d (%s).\n", printk(KERN_ERR "%s: Killed process %d (%s).\n",
p->pid, p->comm); message, p->pid, p->comm);
/* /*
* We give our sacrificial lamb high priority and access to * We give our sacrificial lamb high priority and access to
...@@ -214,7 +244,7 @@ static void __oom_kill_task(task_t *p) ...@@ -214,7 +244,7 @@ static void __oom_kill_task(task_t *p)
force_sig(SIGKILL, p); force_sig(SIGKILL, p);
} }
static struct mm_struct *oom_kill_task(task_t *p) static struct mm_struct *oom_kill_task(task_t *p, const char *message)
{ {
struct mm_struct *mm = get_task_mm(p); struct mm_struct *mm = get_task_mm(p);
task_t * g, * q; task_t * g, * q;
...@@ -226,21 +256,21 @@ static struct mm_struct *oom_kill_task(task_t *p) ...@@ -226,21 +256,21 @@ static struct mm_struct *oom_kill_task(task_t *p)
return NULL; return NULL;
} }
__oom_kill_task(p); __oom_kill_task(p, message);
/* /*
* kill all processes that share the ->mm (i.e. all threads), * kill all processes that share the ->mm (i.e. all threads),
* but are in a different thread group * but are in a different thread group
*/ */
do_each_thread(g, q) do_each_thread(g, q)
if (q->mm == mm && q->tgid != p->tgid) if (q->mm == mm && q->tgid != p->tgid)
__oom_kill_task(q); __oom_kill_task(q, message);
while_each_thread(g, q); while_each_thread(g, q);
return mm; return mm;
} }
static struct mm_struct *oom_kill_process(struct task_struct *p, static struct mm_struct *oom_kill_process(struct task_struct *p,
unsigned long points) unsigned long points, const char *message)
{ {
struct mm_struct *mm; struct mm_struct *mm;
struct task_struct *c; struct task_struct *c;
...@@ -253,11 +283,11 @@ static struct mm_struct *oom_kill_process(struct task_struct *p, ...@@ -253,11 +283,11 @@ static struct mm_struct *oom_kill_process(struct task_struct *p,
c = list_entry(tsk, struct task_struct, sibling); c = list_entry(tsk, struct task_struct, sibling);
if (c->mm == p->mm) if (c->mm == p->mm)
continue; continue;
mm = oom_kill_task(c); mm = oom_kill_task(c, message);
if (mm) if (mm)
return mm; return mm;
} }
return oom_kill_task(p); return oom_kill_task(p, message);
} }
/** /**
...@@ -268,10 +298,10 @@ static struct mm_struct *oom_kill_process(struct task_struct *p, ...@@ -268,10 +298,10 @@ static struct mm_struct *oom_kill_process(struct task_struct *p,
* OR try to be smart about which process to kill. Note that we * OR try to be smart about which process to kill. Note that we
* don't have to be perfect here, we just have to be good. * don't have to be perfect here, we just have to be good.
*/ */
void out_of_memory(gfp_t gfp_mask, int order) void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order)
{ {
struct mm_struct *mm = NULL; struct mm_struct *mm = NULL;
task_t * p; task_t *p;
unsigned long points; unsigned long points;
if (printk_ratelimit()) { if (printk_ratelimit()) {
...@@ -283,7 +313,28 @@ void out_of_memory(gfp_t gfp_mask, int order) ...@@ -283,7 +313,28 @@ void out_of_memory(gfp_t gfp_mask, int order)
cpuset_lock(); cpuset_lock();
read_lock(&tasklist_lock); read_lock(&tasklist_lock);
/*
* Check if there were limitations on the allocation (only relevant for
* NUMA) that may require different handling.
*/
switch (constrained_alloc(zonelist, gfp_mask)) {
case CONSTRAINT_MEMORY_POLICY:
mm = oom_kill_process(current, points,
"No available memory (MPOL_BIND)");
break;
case CONSTRAINT_CPUSET:
mm = oom_kill_process(current, points,
"No available memory in cpuset");
break;
case CONSTRAINT_NONE:
retry: retry:
/*
* Rambo mode: Shoot down a process and hope it solves whatever
* issues we may have.
*/
p = select_bad_process(&points); p = select_bad_process(&points);
if (PTR_ERR(p) == -1UL) if (PTR_ERR(p) == -1UL)
...@@ -296,12 +347,14 @@ void out_of_memory(gfp_t gfp_mask, int order) ...@@ -296,12 +347,14 @@ void out_of_memory(gfp_t gfp_mask, int order)
panic("Out of memory and no killable processes...\n"); panic("Out of memory and no killable processes...\n");
} }
mm = oom_kill_process(p, points); mm = oom_kill_process(p, points, "Out of memory");
if (!mm) if (!mm)
goto retry; goto retry;
out: break;
read_unlock(&tasklist_lock); }
out:
cpuset_unlock(); cpuset_unlock();
if (mm) if (mm)
mmput(mm); mmput(mm);
......
...@@ -1015,7 +1015,7 @@ __alloc_pages(gfp_t gfp_mask, unsigned int order, ...@@ -1015,7 +1015,7 @@ __alloc_pages(gfp_t gfp_mask, unsigned int order,
if (page) if (page)
goto got_pg; goto got_pg;
out_of_memory(gfp_mask, order); out_of_memory(zonelist, gfp_mask, order);
goto restart; goto restart;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册