oom_kill.c 22.1 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6
/*
 *  linux/mm/oom_kill.c
 * 
 *  Copyright (C)  1998,2000  Rik van Riel
 *	Thanks go out to Claus Fischer for some serious inspiration and
 *	for goading me into coding this file...
D
David Rientjes 已提交
7 8
 *  Copyright (C)  2010  Google, Inc.
 *	Rewritten by David Rientjes
L
Linus Torvalds 已提交
9 10
 *
 *  The routines in this file are used to kill a process when
P
Paul Jackson 已提交
11 12
 *  we're seriously out of memory. This gets called from __alloc_pages()
 *  in mm/page_alloc.c when we really run out of memory.
L
Linus Torvalds 已提交
13 14 15 16 17 18 19
 *
 *  Since we won't call these routines often (on a well-configured
 *  machine) this file will double as a 'coding guide' and a signpost
 *  for newbie kernel hackers. It features several pointers to major
 *  kernel subsystems and hints as to where to find out what things do.
 */

20
#include <linux/oom.h>
L
Linus Torvalds 已提交
21
#include <linux/mm.h>
A
Alexey Dobriyan 已提交
22
#include <linux/err.h>
23
#include <linux/gfp.h>
L
Linus Torvalds 已提交
24 25 26 27
#include <linux/sched.h>
#include <linux/swap.h>
#include <linux/timex.h>
#include <linux/jiffies.h>
28
#include <linux/cpuset.h>
29
#include <linux/export.h>
30
#include <linux/notifier.h>
31
#include <linux/memcontrol.h>
32
#include <linux/mempolicy.h>
33
#include <linux/security.h>
34
#include <linux/ptrace.h>
35
#include <linux/freezer.h>
L
Linus Torvalds 已提交
36

37
int sysctl_panic_on_oom;
38
int sysctl_oom_kill_allocating_task;
39
int sysctl_oom_dump_tasks = 1;
D
David Rientjes 已提交
40
static DEFINE_SPINLOCK(zone_scan_lock);
L
Linus Torvalds 已提交
41

42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60
/*
 * compare_swap_oom_score_adj() - compare and swap current's oom_score_adj
 * @old_val: old oom_score_adj for compare
 * @new_val: new oom_score_adj for swap
 *
 * Sets the oom_score_adj value for current to @new_val iff its present value is
 * @old_val.  Usually used to reinstate a previous value to prevent racing with
 * userspacing tuning the value in the interim.
 */
void compare_swap_oom_score_adj(int old_val, int new_val)
{
	struct sighand_struct *sighand = current->sighand;

	spin_lock_irq(&sighand->siglock);
	if (current->signal->oom_score_adj == old_val)
		current->signal->oom_score_adj = new_val;
	spin_unlock_irq(&sighand->siglock);
}

61 62 63 64 65 66 67 68 69 70 71 72 73 74 75
/**
 * test_set_oom_score_adj() - set current's oom_score_adj and return old value
 * @new_val: new oom_score_adj value
 *
 * Sets the oom_score_adj value for current to @new_val with proper
 * synchronization and returns the old value.  Usually used to temporarily
 * set a value, save the old value in the caller, and then reinstate it later.
 */
int test_set_oom_score_adj(int new_val)
{
	struct sighand_struct *sighand = current->sighand;
	int old_val;

	spin_lock_irq(&sighand->siglock);
	old_val = current->signal->oom_score_adj;
D
David Rientjes 已提交
76
	current->signal->oom_score_adj = new_val;
77 78 79 80 81
	spin_unlock_irq(&sighand->siglock);

	return old_val;
}

82 83 84 85 86 87 88 89 90
#ifdef CONFIG_NUMA
/**
 * has_intersects_mems_allowed() - check task eligiblity for kill
 * @tsk: task struct of which task to consider
 * @mask: nodemask passed to page allocator for mempolicy ooms
 *
 * Task eligibility is determined by whether or not a candidate task, @tsk,
 * shares the same mempolicy nodes as current if it is bound by such a policy
 * and whether or not it has the same set of allowed cpuset nodes.
91
 */
92 93
static bool has_intersects_mems_allowed(struct task_struct *tsk,
					const nodemask_t *mask)
94
{
95
	struct task_struct *start = tsk;
96 97

	do {
98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114
		if (mask) {
			/*
			 * If this is a mempolicy constrained oom, tsk's
			 * cpuset is irrelevant.  Only return true if its
			 * mempolicy intersects current, otherwise it may be
			 * needlessly killed.
			 */
			if (mempolicy_nodemask_intersects(tsk, mask))
				return true;
		} else {
			/*
			 * This is not a mempolicy constrained oom, so only
			 * check the mems of tsk's cpuset.
			 */
			if (cpuset_mems_allowed_intersects(current, tsk))
				return true;
		}
115 116
	} while_each_thread(start, tsk);

117 118 119 120 121 122 123
	return false;
}
#else
static bool has_intersects_mems_allowed(struct task_struct *tsk,
					const nodemask_t *mask)
{
	return true;
124
}
125
#endif /* CONFIG_NUMA */
126

127 128 129 130 131 132
/*
 * The process p may have detached its own ->mm while exiting or through
 * use_mm(), but one or more of its subthreads may still have a valid
 * pointer.  Return p, or any of its subthreads with a valid ->mm, with
 * task_lock() held.
 */
133
struct task_struct *find_lock_task_mm(struct task_struct *p)
134 135 136 137 138 139 140 141 142 143 144 145 146
{
	struct task_struct *t = p;

	do {
		task_lock(t);
		if (likely(t->mm))
			return t;
		task_unlock(t);
	} while_each_thread(p, t);

	return NULL;
}

147
/* return true if the task is not adequate as candidate victim task. */
148 149
static bool oom_unkillable_task(struct task_struct *p,
		const struct mem_cgroup *mem, const nodemask_t *nodemask)
150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166
{
	if (is_global_init(p))
		return true;
	if (p->flags & PF_KTHREAD)
		return true;

	/* When mem_cgroup_out_of_memory() and p is not member of the group */
	if (mem && !task_in_mem_cgroup(p, mem))
		return true;

	/* p may not have freeable memory in nodemask */
	if (!has_intersects_mems_allowed(p, nodemask))
		return true;

	return false;
}

L
Linus Torvalds 已提交
167
/**
D
David Rientjes 已提交
168
 * oom_badness - heuristic function to determine which candidate task to kill
L
Linus Torvalds 已提交
169
 * @p: task struct of which task we should calculate
D
David Rientjes 已提交
170
 * @totalpages: total present RAM allowed for page allocation
L
Linus Torvalds 已提交
171
 *
D
David Rientjes 已提交
172 173 174
 * The heuristic for determining which task to kill is made to be as simple and
 * predictable as possible.  The goal is to return the highest value for the
 * task consuming the most memory to avoid subsequent oom failures.
L
Linus Torvalds 已提交
175
 */
D
David Rientjes 已提交
176 177
unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem,
		      const nodemask_t *nodemask, unsigned long totalpages)
L
Linus Torvalds 已提交
178
{
D
David Rientjes 已提交
179
	int points;
180

181 182
	if (oom_unkillable_task(p, mem, nodemask))
		return 0;
L
Linus Torvalds 已提交
183

184 185
	p = find_lock_task_mm(p);
	if (!p)
L
Linus Torvalds 已提交
186 187
		return 0;

188 189 190 191 192
	if (p->signal->oom_score_adj == OOM_SCORE_ADJ_MIN) {
		task_unlock(p);
		return 0;
	}

L
Linus Torvalds 已提交
193
	/*
D
David Rientjes 已提交
194 195
	 * The memory controller may have a limit of 0 bytes, so avoid a divide
	 * by zero, if necessary.
L
Linus Torvalds 已提交
196
	 */
D
David Rientjes 已提交
197 198
	if (!totalpages)
		totalpages = 1;
L
Linus Torvalds 已提交
199 200

	/*
D
David Rientjes 已提交
201
	 * The baseline for the badness score is the proportion of RAM that each
202
	 * task's rss, pagetable and swap space use.
L
Linus Torvalds 已提交
203
	 */
204 205 206 207 208
	points = get_mm_rss(p->mm) + p->mm->nr_ptes;
	points += get_mm_counter(p->mm, MM_SWAPENTS);

	points *= 1000;
	points /= totalpages;
D
David Rientjes 已提交
209
	task_unlock(p);
L
Linus Torvalds 已提交
210 211

	/*
D
David Rientjes 已提交
212 213
	 * Root processes get 3% bonus, just like the __vm_enough_memory()
	 * implementation used by LSMs.
L
Linus Torvalds 已提交
214
	 */
D
David Rientjes 已提交
215 216
	if (has_capability_noaudit(p, CAP_SYS_ADMIN))
		points -= 30;
L
Linus Torvalds 已提交
217 218

	/*
D
David Rientjes 已提交
219 220 221
	 * /proc/pid/oom_score_adj ranges from -1000 to +1000 such that it may
	 * either completely disable oom killing or always prefer a certain
	 * task.
L
Linus Torvalds 已提交
222
	 */
D
David Rientjes 已提交
223
	points += p->signal->oom_score_adj;
L
Linus Torvalds 已提交
224

225 226 227 228 229 230 231
	/*
	 * Never return 0 for an eligible task that may be killed since it's
	 * possible that no single user task uses more than 0.1% of memory and
	 * no single admin tasks uses more than 3.0%.
	 */
	if (points <= 0)
		return 1;
D
David Rientjes 已提交
232
	return (points < 1000) ? points : 1000;
L
Linus Torvalds 已提交
233 234
}

235 236 237 238
/*
 * Determine the type of allocation constraint.
 */
#ifdef CONFIG_NUMA
239
static enum oom_constraint constrained_alloc(struct zonelist *zonelist,
D
David Rientjes 已提交
240 241
				gfp_t gfp_mask, nodemask_t *nodemask,
				unsigned long *totalpages)
242
{
243
	struct zone *zone;
244
	struct zoneref *z;
245
	enum zone_type high_zoneidx = gfp_zone(gfp_mask);
D
David Rientjes 已提交
246 247
	bool cpuset_limited = false;
	int nid;
248

D
David Rientjes 已提交
249 250 251 252 253
	/* Default to all available memory */
	*totalpages = totalram_pages + total_swap_pages;

	if (!zonelist)
		return CONSTRAINT_NONE;
254 255 256 257 258 259 260
	/*
	 * Reach here only when __GFP_NOFAIL is used. So, we should avoid
	 * to kill current.We have to random task kill in this case.
	 * Hopefully, CONSTRAINT_THISNODE...but no way to handle it, now.
	 */
	if (gfp_mask & __GFP_THISNODE)
		return CONSTRAINT_NONE;
261

262
	/*
D
David Rientjes 已提交
263 264 265
	 * This is not a __GFP_THISNODE allocation, so a truncated nodemask in
	 * the page allocator means a mempolicy is in effect.  Cpuset policy
	 * is enforced in get_page_from_freelist().
266
	 */
D
David Rientjes 已提交
267 268 269 270
	if (nodemask && !nodes_subset(node_states[N_HIGH_MEMORY], *nodemask)) {
		*totalpages = total_swap_pages;
		for_each_node_mask(nid, *nodemask)
			*totalpages += node_spanned_pages(nid);
271
		return CONSTRAINT_MEMORY_POLICY;
D
David Rientjes 已提交
272
	}
273 274 275 276 277

	/* Check this allocation failure is caused by cpuset's wall function */
	for_each_zone_zonelist_nodemask(zone, z, zonelist,
			high_zoneidx, nodemask)
		if (!cpuset_zone_allowed_softwall(zone, gfp_mask))
D
David Rientjes 已提交
278
			cpuset_limited = true;
279

D
David Rientjes 已提交
280 281 282 283 284 285
	if (cpuset_limited) {
		*totalpages = total_swap_pages;
		for_each_node_mask(nid, cpuset_current_mems_allowed)
			*totalpages += node_spanned_pages(nid);
		return CONSTRAINT_CPUSET;
	}
286 287
	return CONSTRAINT_NONE;
}
288 289
#else
static enum oom_constraint constrained_alloc(struct zonelist *zonelist,
D
David Rientjes 已提交
290 291
				gfp_t gfp_mask, nodemask_t *nodemask,
				unsigned long *totalpages)
292
{
D
David Rientjes 已提交
293
	*totalpages = totalram_pages + total_swap_pages;
294 295 296
	return CONSTRAINT_NONE;
}
#endif
297

L
Linus Torvalds 已提交
298 299 300 301 302 303
/*
 * Simple selection loop. We chose the process with the highest
 * number of 'points'. We expect the caller will lock the tasklist.
 *
 * (not docbooked, we don't want this one cluttering up the manual)
 */
D
David Rientjes 已提交
304 305 306
static struct task_struct *select_bad_process(unsigned int *ppoints,
		unsigned long totalpages, struct mem_cgroup *mem,
		const nodemask_t *nodemask)
L
Linus Torvalds 已提交
307
{
308
	struct task_struct *g, *p;
L
Linus Torvalds 已提交
309
	struct task_struct *chosen = NULL;
310
	*ppoints = 0;
L
Linus Torvalds 已提交
311

312
	do_each_thread(g, p) {
D
David Rientjes 已提交
313
		unsigned int points;
P
Paul Jackson 已提交
314

315
		if (p->exit_state)
316
			continue;
317
		if (oom_unkillable_task(p, mem, nodemask))
318
			continue;
319

320 321 322 323 324 325 326 327 328
		/*
		 * This task already has access to memory reserves and is
		 * being killed. Don't allow any other task access to the
		 * memory reserve.
		 *
		 * Note: this may have a chance of deadlock if it gets
		 * blocked waiting for another task which itself is waiting
		 * for memory. Is there a better alternative?
		 */
329 330 331
		if (test_tsk_thread_flag(p, TIF_MEMDIE)) {
			if (unlikely(frozen(p)))
				thaw_process(p);
332
			return ERR_PTR(-1UL);
333
		}
334 335
		if (!p->mm)
			continue;
336

337
		if (p->flags & PF_EXITING) {
338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355
			/*
			 * If p is the current task and is in the process of
			 * releasing memory, we allow the "kill" to set
			 * TIF_MEMDIE, which will allow it to gain access to
			 * memory reserves.  Otherwise, it may stall forever.
			 *
			 * The loop isn't broken here, however, in case other
			 * threads are found to have already been oom killed.
			 */
			if (p == current) {
				chosen = p;
				*ppoints = 1000;
			} else {
				/*
				 * If this task is not being ptraced on exit,
				 * then wait for it to finish before killing
				 * some other task unnecessarily.
				 */
T
Tejun Heo 已提交
356
				if (!(p->group_leader->ptrace & PT_TRACE_EXIT))
357 358
					return ERR_PTR(-1UL);
			}
359
		}
360

D
David Rientjes 已提交
361 362
		points = oom_badness(p, mem, nodemask, totalpages);
		if (points > *ppoints) {
P
Paul Jackson 已提交
363
			chosen = p;
364
			*ppoints = points;
L
Linus Torvalds 已提交
365
		}
366
	} while_each_thread(g, p);
367

L
Linus Torvalds 已提交
368 369 370
	return chosen;
}

371
/**
R
Randy Dunlap 已提交
372
 * dump_tasks - dump current memory state of all system tasks
373
 * @mem: current's memory controller, if constrained
374
 * @nodemask: nodemask passed to page allocator for mempolicy ooms
R
Randy Dunlap 已提交
375
 *
376 377 378
 * Dumps the current memory state of all eligible tasks.  Tasks not in the same
 * memcg, not in the same cpuset, or bound to a disjoint set of mempolicy nodes
 * are not shown.
379
 * State information includes task's pid, uid, tgid, vm size, rss, cpu, oom_adj
D
David Rientjes 已提交
380
 * value, oom_score_adj value, and name.
381 382 383
 *
 * Call with tasklist_lock read-locked.
 */
384
static void dump_tasks(const struct mem_cgroup *mem, const nodemask_t *nodemask)
385
{
386 387
	struct task_struct *p;
	struct task_struct *task;
388

D
David Rientjes 已提交
389
	pr_info("[ pid ]   uid  tgid total_vm      rss cpu oom_adj oom_score_adj name\n");
390
	for_each_process(p) {
391
		if (oom_unkillable_task(p, mem, nodemask))
392
			continue;
393

394 395
		task = find_lock_task_mm(p);
		if (!task) {
396
			/*
397 398
			 * This is a kthread or all of p's threads have already
			 * detached their mm's.  There's no need to report
399
			 * them; they can't be oom killed anyway.
400 401 402
			 */
			continue;
		}
403

D
David Rientjes 已提交
404
		pr_info("[%5d] %5d %5d %8lu %8lu %3u     %3d         %5d %s\n",
405
			task->pid, task_uid(task), task->tgid,
D
David Rientjes 已提交
406 407 408
			task->mm->total_vm, get_mm_rss(task->mm),
			task_cpu(task), task->signal->oom_adj,
			task->signal->oom_score_adj, task->comm);
409 410
		task_unlock(task);
	}
411 412
}

413
static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order,
414
			struct mem_cgroup *mem, const nodemask_t *nodemask)
415
{
416
	task_lock(current);
417
	pr_warning("%s invoked oom-killer: gfp_mask=0x%x, order=%d, "
D
David Rientjes 已提交
418 419 420
		"oom_adj=%d, oom_score_adj=%d\n",
		current->comm, gfp_mask, order, current->signal->oom_adj,
		current->signal->oom_score_adj);
421 422 423
	cpuset_print_task_mems_allowed(current);
	task_unlock(current);
	dump_stack();
424
	mem_cgroup_print_oom_info(mem, p);
425
	show_mem(SHOW_MEM_FILTER_NODES);
426
	if (sysctl_oom_dump_tasks)
427
		dump_tasks(mem, nodemask);
428 429
}

430
#define K(x) ((x) << (PAGE_SHIFT-10))
431
static int oom_kill_task(struct task_struct *p, struct mem_cgroup *mem)
L
Linus Torvalds 已提交
432
{
433 434 435
	struct task_struct *q;
	struct mm_struct *mm;

436
	p = find_lock_task_mm(p);
437
	if (!p)
438
		return 1;
439

440 441 442
	/* mm cannot be safely dereferenced after task_unlock(p) */
	mm = p->mm;

443 444 445 446
	pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB\n",
		task_pid_nr(p), p->comm, K(p->mm->total_vm),
		K(get_mm_counter(p->mm, MM_ANONPAGES)),
		K(get_mm_counter(p->mm, MM_FILEPAGES)));
447
	task_unlock(p);
L
Linus Torvalds 已提交
448

449
	/*
450
	 * Kill all user processes sharing p->mm in other thread groups, if any.
451 452 453 454 455 456 457 458 459
	 * They don't get access to memory reserves or a higher scheduler
	 * priority, though, to avoid depletion of all memory or task
	 * starvation.  This prevents mm->mmap_sem livelock when an oom killed
	 * task cannot exit because it requires the semaphore and its contended
	 * by another thread trying to allocate memory itself.  That thread will
	 * now get access to memory reserves since it has a pending fatal
	 * signal.
	 */
	for_each_process(q)
460 461
		if (q->mm == mm && !same_thread_group(q, p) &&
		    !(q->flags & PF_KTHREAD)) {
D
David Rientjes 已提交
462 463 464
			if (q->signal->oom_score_adj == OOM_SCORE_ADJ_MIN)
				continue;

465 466 467 468 469 470
			task_lock(q);	/* Protect ->comm from prctl() */
			pr_err("Kill process %d (%s) sharing same memory\n",
				task_pid_nr(q), q->comm);
			task_unlock(q);
			force_sig(SIGKILL, q);
		}
471

L
Linus Torvalds 已提交
472 473
	set_tsk_thread_flag(p, TIF_MEMDIE);
	force_sig(SIGKILL, p);
474

475
	return 0;
L
Linus Torvalds 已提交
476
}
477
#undef K
L
Linus Torvalds 已提交
478

479
static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
D
David Rientjes 已提交
480 481 482
			    unsigned int points, unsigned long totalpages,
			    struct mem_cgroup *mem, nodemask_t *nodemask,
			    const char *message)
L
Linus Torvalds 已提交
483
{
484
	struct task_struct *victim = p;
485
	struct task_struct *child;
486 487
	struct task_struct *t = p;
	unsigned int victim_points = 0;
L
Linus Torvalds 已提交
488

489
	if (printk_ratelimit())
490
		dump_header(p, gfp_mask, order, mem, nodemask);
491

492 493 494 495
	/*
	 * If the task is already exiting, don't alarm the sysadmin or kill
	 * its children or threads, just set TIF_MEMDIE so it can die quickly
	 */
496
	if (p->flags & PF_EXITING) {
497
		set_tsk_thread_flag(p, TIF_MEMDIE);
498 499 500
		return 0;
	}

501
	task_lock(p);
D
David Rientjes 已提交
502
	pr_err("%s: Kill process %d (%s) score %d or sacrifice child\n",
503 504
		message, task_pid_nr(p), p->comm, points);
	task_unlock(p);
N
Nick Piggin 已提交
505

506 507
	/*
	 * If any of p's children has a different mm and is eligible for kill,
508
	 * the one with the highest oom_badness() score is sacrificed for its
509 510 511
	 * parent.  This attempts to lose the minimal amount of work done while
	 * still freeing memory.
	 */
512
	do {
513
		list_for_each_entry(child, &t->children, sibling) {
D
David Rientjes 已提交
514
			unsigned int child_points;
515

516 517
			if (child->mm == p->mm)
				continue;
D
David Rientjes 已提交
518 519 520 521 522
			/*
			 * oom_badness() returns 0 if the thread is unkillable
			 */
			child_points = oom_badness(child, mem, nodemask,
								totalpages);
523 524 525 526
			if (child_points > victim_points) {
				victim = child;
				victim_points = child_points;
			}
527 528 529
		}
	} while_each_thread(p, t);

530
	return oom_kill_task(victim, mem);
L
Linus Torvalds 已提交
531 532
}

533 534 535 536
/*
 * Determines whether the kernel must panic because of the panic_on_oom sysctl.
 */
static void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask,
537
				int order, const nodemask_t *nodemask)
538 539 540 541 542 543 544 545 546 547 548 549 550
{
	if (likely(!sysctl_panic_on_oom))
		return;
	if (sysctl_panic_on_oom != 2) {
		/*
		 * panic_on_oom == 1 only affects CONSTRAINT_NONE, the kernel
		 * does not panic for cpuset, mempolicy, or memcg allocation
		 * failures.
		 */
		if (constraint != CONSTRAINT_NONE)
			return;
	}
	read_lock(&tasklist_lock);
551
	dump_header(NULL, gfp_mask, order, NULL, nodemask);
552 553 554 555 556
	read_unlock(&tasklist_lock);
	panic("Out of memory: %s panic_on_oom is enabled\n",
		sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide");
}

557
#ifdef CONFIG_CGROUP_MEM_RES_CTLR
558 559
void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask)
{
D
David Rientjes 已提交
560 561
	unsigned long limit;
	unsigned int points = 0;
562 563
	struct task_struct *p;

564 565 566 567 568 569 570 571 572 573
	/*
	 * If current has a pending SIGKILL, then automatically select it.  The
	 * goal is to allow it to allocate so that it may quickly exit and free
	 * its memory.
	 */
	if (fatal_signal_pending(current)) {
		set_thread_flag(TIF_MEMDIE);
		return;
	}

574
	check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, 0, NULL);
D
David Rientjes 已提交
575
	limit = mem_cgroup_get_limit(mem) >> PAGE_SHIFT;
L
Li Zefan 已提交
576
	read_lock(&tasklist_lock);
577
retry:
D
David Rientjes 已提交
578
	p = select_bad_process(&points, limit, mem, NULL);
579
	if (!p || PTR_ERR(p) == -1UL)
580 581
		goto out;

D
David Rientjes 已提交
582
	if (oom_kill_process(p, gfp_mask, 0, points, limit, mem, NULL,
583 584 585
				"Memory cgroup out of memory"))
		goto retry;
out:
L
Li Zefan 已提交
586
	read_unlock(&tasklist_lock);
587 588 589
}
#endif

590 591 592 593 594 595 596 597 598 599 600 601 602 603
static BLOCKING_NOTIFIER_HEAD(oom_notify_list);

int register_oom_notifier(struct notifier_block *nb)
{
	return blocking_notifier_chain_register(&oom_notify_list, nb);
}
EXPORT_SYMBOL_GPL(register_oom_notifier);

int unregister_oom_notifier(struct notifier_block *nb)
{
	return blocking_notifier_chain_unregister(&oom_notify_list, nb);
}
EXPORT_SYMBOL_GPL(unregister_oom_notifier);

D
David Rientjes 已提交
604 605 606 607 608
/*
 * Try to acquire the OOM killer lock for the zones in zonelist.  Returns zero
 * if a parallel OOM killing is already taking place that includes a zone in
 * the zonelist.  Otherwise, locks all zones in the zonelist and returns 1.
 */
609
int try_set_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_mask)
D
David Rientjes 已提交
610
{
611 612
	struct zoneref *z;
	struct zone *zone;
D
David Rientjes 已提交
613 614
	int ret = 1;

D
David Rientjes 已提交
615
	spin_lock(&zone_scan_lock);
616 617
	for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
		if (zone_is_oom_locked(zone)) {
D
David Rientjes 已提交
618 619 620
			ret = 0;
			goto out;
		}
621 622 623 624
	}

	for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
		/*
D
David Rientjes 已提交
625
		 * Lock each zone in the zonelist under zone_scan_lock so a
626
		 * parallel invocation of try_set_zonelist_oom() doesn't succeed
627 628 629 630
		 * when it shouldn't.
		 */
		zone_set_flag(zone, ZONE_OOM_LOCKED);
	}
D
David Rientjes 已提交
631 632

out:
D
David Rientjes 已提交
633
	spin_unlock(&zone_scan_lock);
D
David Rientjes 已提交
634 635 636 637 638 639 640 641
	return ret;
}

/*
 * Clears the ZONE_OOM_LOCKED flag for all zones in the zonelist so that failed
 * allocation attempts with zonelists containing them may now recall the OOM
 * killer, if necessary.
 */
642
void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_mask)
D
David Rientjes 已提交
643
{
644 645
	struct zoneref *z;
	struct zone *zone;
D
David Rientjes 已提交
646

D
David Rientjes 已提交
647
	spin_lock(&zone_scan_lock);
648 649 650
	for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
		zone_clear_flag(zone, ZONE_OOM_LOCKED);
	}
D
David Rientjes 已提交
651
	spin_unlock(&zone_scan_lock);
D
David Rientjes 已提交
652 653
}

654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690
/*
 * Try to acquire the oom killer lock for all system zones.  Returns zero if a
 * parallel oom killing is taking place, otherwise locks all zones and returns
 * non-zero.
 */
static int try_set_system_oom(void)
{
	struct zone *zone;
	int ret = 1;

	spin_lock(&zone_scan_lock);
	for_each_populated_zone(zone)
		if (zone_is_oom_locked(zone)) {
			ret = 0;
			goto out;
		}
	for_each_populated_zone(zone)
		zone_set_flag(zone, ZONE_OOM_LOCKED);
out:
	spin_unlock(&zone_scan_lock);
	return ret;
}

/*
 * Clears ZONE_OOM_LOCKED for all system zones so that failed allocation
 * attempts or page faults may now recall the oom killer, if necessary.
 */
static void clear_system_oom(void)
{
	struct zone *zone;

	spin_lock(&zone_scan_lock);
	for_each_populated_zone(zone)
		zone_clear_flag(zone, ZONE_OOM_LOCKED);
	spin_unlock(&zone_scan_lock);
}

L
Linus Torvalds 已提交
691
/**
692
 * out_of_memory - kill the "best" process when we run out of memory
R
Randy Dunlap 已提交
693 694 695
 * @zonelist: zonelist pointer
 * @gfp_mask: memory allocation flags
 * @order: amount of memory being requested as a power of 2
696
 * @nodemask: nodemask passed to page allocator
L
Linus Torvalds 已提交
697 698 699 700 701 702
 *
 * If we run out of memory, we have the choice between either
 * killing a random task (bad), letting the system crash (worse)
 * OR try to be smart about which process to kill. Note that we
 * don't have to be perfect here, we just have to be good.
 */
703 704
void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
		int order, nodemask_t *nodemask)
L
Linus Torvalds 已提交
705
{
706
	const nodemask_t *mpol_mask;
707
	struct task_struct *p;
D
David Rientjes 已提交
708
	unsigned long totalpages;
709
	unsigned long freed = 0;
D
David Rientjes 已提交
710
	unsigned int points;
711
	enum oom_constraint constraint = CONSTRAINT_NONE;
K
KOSAKI Motohiro 已提交
712
	int killed = 0;
713 714 715 716 717

	blocking_notifier_call_chain(&oom_notify_list, 0, &freed);
	if (freed > 0)
		/* Got some memory back in the last second. */
		return;
L
Linus Torvalds 已提交
718

719 720 721 722 723 724 725 726 727 728
	/*
	 * If current has a pending SIGKILL, then automatically select it.  The
	 * goal is to allow it to allocate so that it may quickly exit and free
	 * its memory.
	 */
	if (fatal_signal_pending(current)) {
		set_thread_flag(TIF_MEMDIE);
		return;
	}

729 730 731 732
	/*
	 * Check if there were limitations on the allocation (only relevant for
	 * NUMA) that may require different handling.
	 */
D
David Rientjes 已提交
733 734
	constraint = constrained_alloc(zonelist, gfp_mask, nodemask,
						&totalpages);
735 736
	mpol_mask = (constraint == CONSTRAINT_MEMORY_POLICY) ? nodemask : NULL;
	check_panic_on_oom(constraint, gfp_mask, order, mpol_mask);
737

D
David Rientjes 已提交
738
	read_lock(&tasklist_lock);
739
	if (sysctl_oom_kill_allocating_task &&
740
	    !oom_unkillable_task(current, NULL, nodemask) &&
D
David Rientjes 已提交
741
	    current->mm) {
742 743 744 745 746
		/*
		 * oom_kill_process() needs tasklist_lock held.  If it returns
		 * non-zero, current could not be killed so we must fallback to
		 * the tasklist scan.
		 */
D
David Rientjes 已提交
747 748
		if (!oom_kill_process(current, gfp_mask, order, 0, totalpages,
				NULL, nodemask,
749
				"Out of memory (oom_kill_allocating_task)"))
K
KOSAKI Motohiro 已提交
750
			goto out;
751 752 753
	}

retry:
754
	p = select_bad_process(&points, totalpages, NULL, mpol_mask);
755
	if (PTR_ERR(p) == -1UL)
K
KOSAKI Motohiro 已提交
756
		goto out;
757 758 759

	/* Found nothing?!?! Either we hang forever, or we panic. */
	if (!p) {
760
		dump_header(NULL, gfp_mask, order, NULL, mpol_mask);
761 762 763 764
		read_unlock(&tasklist_lock);
		panic("Out of memory and no killable processes...\n");
	}

D
David Rientjes 已提交
765 766
	if (oom_kill_process(p, gfp_mask, order, points, totalpages, NULL,
				nodemask, "Out of memory"))
767
		goto retry;
K
KOSAKI Motohiro 已提交
768 769
	killed = 1;
out:
770
	read_unlock(&tasklist_lock);
L
Linus Torvalds 已提交
771 772 773

	/*
	 * Give "p" a good chance of killing itself before we
774
	 * retry to allocate memory unless "p" is current
L
Linus Torvalds 已提交
775
	 */
K
KOSAKI Motohiro 已提交
776
	if (killed && !test_thread_flag(TIF_MEMDIE))
777
		schedule_timeout_uninterruptible(1);
L
Linus Torvalds 已提交
778
}
779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794

/*
 * The pagefault handler calls here because it is out of memory, so kill a
 * memory-hogging task.  If a populated zone has ZONE_OOM_LOCKED set, a parallel
 * oom killing is already in progress so do nothing.  If a task is found with
 * TIF_MEMDIE set, it has been killed so do nothing and allow it to exit.
 */
void pagefault_out_of_memory(void)
{
	if (try_set_system_oom()) {
		out_of_memory(NULL, 0, 0, NULL);
		clear_system_oom();
	}
	if (!test_thread_flag(TIF_MEMDIE))
		schedule_timeout_uninterruptible(1);
}