oom_kill.c 21.8 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6
/*
 *  linux/mm/oom_kill.c
 * 
 *  Copyright (C)  1998,2000  Rik van Riel
 *	Thanks go out to Claus Fischer for some serious inspiration and
 *	for goading me into coding this file...
D
David Rientjes 已提交
7 8
 *  Copyright (C)  2010  Google, Inc.
 *	Rewritten by David Rientjes
L
Linus Torvalds 已提交
9 10
 *
 *  The routines in this file are used to kill a process when
P
Paul Jackson 已提交
11 12
 *  we're seriously out of memory. This gets called from __alloc_pages()
 *  in mm/page_alloc.c when we really run out of memory.
L
Linus Torvalds 已提交
13 14 15 16 17 18 19
 *
 *  Since we won't call these routines often (on a well-configured
 *  machine) this file will double as a 'coding guide' and a signpost
 *  for newbie kernel hackers. It features several pointers to major
 *  kernel subsystems and hints as to where to find out what things do.
 */

20
#include <linux/oom.h>
L
Linus Torvalds 已提交
21
#include <linux/mm.h>
A
Alexey Dobriyan 已提交
22
#include <linux/err.h>
23
#include <linux/gfp.h>
L
Linus Torvalds 已提交
24 25 26 27
#include <linux/sched.h>
#include <linux/swap.h>
#include <linux/timex.h>
#include <linux/jiffies.h>
28
#include <linux/cpuset.h>
29 30
#include <linux/module.h>
#include <linux/notifier.h>
31
#include <linux/memcontrol.h>
32
#include <linux/mempolicy.h>
33
#include <linux/security.h>
34
#include <linux/ptrace.h>
L
Linus Torvalds 已提交
35

36
int sysctl_panic_on_oom;
37
int sysctl_oom_kill_allocating_task;
38
int sysctl_oom_dump_tasks = 1;
D
David Rientjes 已提交
39
static DEFINE_SPINLOCK(zone_scan_lock);
L
Linus Torvalds 已提交
40

41 42 43 44 45 46 47 48 49
#ifdef CONFIG_NUMA
/**
 * has_intersects_mems_allowed() - check task eligiblity for kill
 * @tsk: task struct of which task to consider
 * @mask: nodemask passed to page allocator for mempolicy ooms
 *
 * Task eligibility is determined by whether or not a candidate task, @tsk,
 * shares the same mempolicy nodes as current if it is bound by such a policy
 * and whether or not it has the same set of allowed cpuset nodes.
50
 */
51 52
static bool has_intersects_mems_allowed(struct task_struct *tsk,
					const nodemask_t *mask)
53
{
54
	struct task_struct *start = tsk;
55 56

	do {
57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73
		if (mask) {
			/*
			 * If this is a mempolicy constrained oom, tsk's
			 * cpuset is irrelevant.  Only return true if its
			 * mempolicy intersects current, otherwise it may be
			 * needlessly killed.
			 */
			if (mempolicy_nodemask_intersects(tsk, mask))
				return true;
		} else {
			/*
			 * This is not a mempolicy constrained oom, so only
			 * check the mems of tsk's cpuset.
			 */
			if (cpuset_mems_allowed_intersects(current, tsk))
				return true;
		}
74 75
	} while_each_thread(start, tsk);

76 77 78 79 80 81 82
	return false;
}
#else
static bool has_intersects_mems_allowed(struct task_struct *tsk,
					const nodemask_t *mask)
{
	return true;
83
}
84
#endif /* CONFIG_NUMA */
85

86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103
/*
 * If this is a system OOM (not a memcg OOM) and the task selected to be
 * killed is not already running at high (RT) priorities, speed up the
 * recovery by boosting the dying task to the lowest FIFO priority.
 * That helps with the recovery and avoids interfering with RT tasks.
 */
static void boost_dying_task_prio(struct task_struct *p,
				  struct mem_cgroup *mem)
{
	struct sched_param param = { .sched_priority = 1 };

	if (mem)
		return;

	if (!rt_task(p))
		sched_setscheduler_nocheck(p, SCHED_FIFO, &param);
}

104 105 106 107 108 109
/*
 * The process p may have detached its own ->mm while exiting or through
 * use_mm(), but one or more of its subthreads may still have a valid
 * pointer.  Return p, or any of its subthreads with a valid ->mm, with
 * task_lock() held.
 */
110
struct task_struct *find_lock_task_mm(struct task_struct *p)
111 112 113 114 115 116 117 118 119 120 121 122 123
{
	struct task_struct *t = p;

	do {
		task_lock(t);
		if (likely(t->mm))
			return t;
		task_unlock(t);
	} while_each_thread(p, t);

	return NULL;
}

124
/* return true if the task is not adequate as candidate victim task. */
125 126
static bool oom_unkillable_task(struct task_struct *p,
		const struct mem_cgroup *mem, const nodemask_t *nodemask)
127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143
{
	if (is_global_init(p))
		return true;
	if (p->flags & PF_KTHREAD)
		return true;

	/* When mem_cgroup_out_of_memory() and p is not member of the group */
	if (mem && !task_in_mem_cgroup(p, mem))
		return true;

	/* p may not have freeable memory in nodemask */
	if (!has_intersects_mems_allowed(p, nodemask))
		return true;

	return false;
}

L
Linus Torvalds 已提交
144
/**
D
David Rientjes 已提交
145
 * oom_badness - heuristic function to determine which candidate task to kill
L
Linus Torvalds 已提交
146
 * @p: task struct of which task we should calculate
D
David Rientjes 已提交
147
 * @totalpages: total present RAM allowed for page allocation
L
Linus Torvalds 已提交
148
 *
D
David Rientjes 已提交
149 150 151
 * The heuristic for determining which task to kill is made to be as simple and
 * predictable as possible.  The goal is to return the highest value for the
 * task consuming the most memory to avoid subsequent oom failures.
L
Linus Torvalds 已提交
152
 */
D
David Rientjes 已提交
153 154
unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem,
		      const nodemask_t *nodemask, unsigned long totalpages)
L
Linus Torvalds 已提交
155
{
D
David Rientjes 已提交
156
	int points;
157

158 159
	if (oom_unkillable_task(p, mem, nodemask))
		return 0;
L
Linus Torvalds 已提交
160

161 162
	p = find_lock_task_mm(p);
	if (!p)
L
Linus Torvalds 已提交
163 164 165
		return 0;

	/*
166 167 168
	 * Shortcut check for a thread sharing p->mm that is OOM_SCORE_ADJ_MIN
	 * so the entire heuristic doesn't need to be executed for something
	 * that cannot be killed.
L
Linus Torvalds 已提交
169
	 */
170
	if (atomic_read(&p->mm->oom_disable_count)) {
D
David Rientjes 已提交
171 172 173
		task_unlock(p);
		return 0;
	}
L
Linus Torvalds 已提交
174 175

	/*
D
David Rientjes 已提交
176 177
	 * When the PF_OOM_ORIGIN bit is set, it indicates the task should have
	 * priority for oom killing.
L
Linus Torvalds 已提交
178
	 */
D
David Rientjes 已提交
179 180 181 182
	if (p->flags & PF_OOM_ORIGIN) {
		task_unlock(p);
		return 1000;
	}
L
Linus Torvalds 已提交
183 184

	/*
D
David Rientjes 已提交
185 186
	 * The memory controller may have a limit of 0 bytes, so avoid a divide
	 * by zero, if necessary.
L
Linus Torvalds 已提交
187
	 */
D
David Rientjes 已提交
188 189
	if (!totalpages)
		totalpages = 1;
L
Linus Torvalds 已提交
190 191

	/*
D
David Rientjes 已提交
192 193
	 * The baseline for the badness score is the proportion of RAM that each
	 * task's rss and swap space use.
L
Linus Torvalds 已提交
194
	 */
D
David Rientjes 已提交
195 196 197
	points = (get_mm_rss(p->mm) + get_mm_counter(p->mm, MM_SWAPENTS)) * 1000 /
			totalpages;
	task_unlock(p);
L
Linus Torvalds 已提交
198 199

	/*
D
David Rientjes 已提交
200 201
	 * Root processes get 3% bonus, just like the __vm_enough_memory()
	 * implementation used by LSMs.
L
Linus Torvalds 已提交
202
	 */
D
David Rientjes 已提交
203 204
	if (has_capability_noaudit(p, CAP_SYS_ADMIN))
		points -= 30;
L
Linus Torvalds 已提交
205 206

	/*
D
David Rientjes 已提交
207 208 209
	 * /proc/pid/oom_score_adj ranges from -1000 to +1000 such that it may
	 * either completely disable oom killing or always prefer a certain
	 * task.
L
Linus Torvalds 已提交
210
	 */
D
David Rientjes 已提交
211
	points += p->signal->oom_score_adj;
L
Linus Torvalds 已提交
212

213 214 215 216 217 218 219
	/*
	 * Never return 0 for an eligible task that may be killed since it's
	 * possible that no single user task uses more than 0.1% of memory and
	 * no single admin tasks uses more than 3.0%.
	 */
	if (points <= 0)
		return 1;
D
David Rientjes 已提交
220
	return (points < 1000) ? points : 1000;
L
Linus Torvalds 已提交
221 222
}

223 224 225 226
/*
 * Determine the type of allocation constraint.
 */
#ifdef CONFIG_NUMA
227
static enum oom_constraint constrained_alloc(struct zonelist *zonelist,
D
David Rientjes 已提交
228 229
				gfp_t gfp_mask, nodemask_t *nodemask,
				unsigned long *totalpages)
230
{
231
	struct zone *zone;
232
	struct zoneref *z;
233
	enum zone_type high_zoneidx = gfp_zone(gfp_mask);
D
David Rientjes 已提交
234 235
	bool cpuset_limited = false;
	int nid;
236

D
David Rientjes 已提交
237 238 239 240 241
	/* Default to all available memory */
	*totalpages = totalram_pages + total_swap_pages;

	if (!zonelist)
		return CONSTRAINT_NONE;
242 243 244 245 246 247 248
	/*
	 * Reach here only when __GFP_NOFAIL is used. So, we should avoid
	 * to kill current.We have to random task kill in this case.
	 * Hopefully, CONSTRAINT_THISNODE...but no way to handle it, now.
	 */
	if (gfp_mask & __GFP_THISNODE)
		return CONSTRAINT_NONE;
249

250
	/*
D
David Rientjes 已提交
251 252 253
	 * This is not a __GFP_THISNODE allocation, so a truncated nodemask in
	 * the page allocator means a mempolicy is in effect.  Cpuset policy
	 * is enforced in get_page_from_freelist().
254
	 */
D
David Rientjes 已提交
255 256 257 258
	if (nodemask && !nodes_subset(node_states[N_HIGH_MEMORY], *nodemask)) {
		*totalpages = total_swap_pages;
		for_each_node_mask(nid, *nodemask)
			*totalpages += node_spanned_pages(nid);
259
		return CONSTRAINT_MEMORY_POLICY;
D
David Rientjes 已提交
260
	}
261 262 263 264 265

	/* Check this allocation failure is caused by cpuset's wall function */
	for_each_zone_zonelist_nodemask(zone, z, zonelist,
			high_zoneidx, nodemask)
		if (!cpuset_zone_allowed_softwall(zone, gfp_mask))
D
David Rientjes 已提交
266
			cpuset_limited = true;
267

D
David Rientjes 已提交
268 269 270 271 272 273
	if (cpuset_limited) {
		*totalpages = total_swap_pages;
		for_each_node_mask(nid, cpuset_current_mems_allowed)
			*totalpages += node_spanned_pages(nid);
		return CONSTRAINT_CPUSET;
	}
274 275
	return CONSTRAINT_NONE;
}
276 277
#else
static enum oom_constraint constrained_alloc(struct zonelist *zonelist,
D
David Rientjes 已提交
278 279
				gfp_t gfp_mask, nodemask_t *nodemask,
				unsigned long *totalpages)
280
{
D
David Rientjes 已提交
281
	*totalpages = totalram_pages + total_swap_pages;
282 283 284
	return CONSTRAINT_NONE;
}
#endif
285

L
Linus Torvalds 已提交
286 287 288 289 290 291
/*
 * Simple selection loop. We chose the process with the highest
 * number of 'points'. We expect the caller will lock the tasklist.
 *
 * (not docbooked, we don't want this one cluttering up the manual)
 */
D
David Rientjes 已提交
292 293 294
static struct task_struct *select_bad_process(unsigned int *ppoints,
		unsigned long totalpages, struct mem_cgroup *mem,
		const nodemask_t *nodemask)
L
Linus Torvalds 已提交
295
{
296
	struct task_struct *g, *p;
L
Linus Torvalds 已提交
297
	struct task_struct *chosen = NULL;
298
	*ppoints = 0;
L
Linus Torvalds 已提交
299

300
	do_each_thread(g, p) {
D
David Rientjes 已提交
301
		unsigned int points;
P
Paul Jackson 已提交
302

303 304
		if (!p->mm)
			continue;
305
		if (oom_unkillable_task(p, mem, nodemask))
306
			continue;
307

308 309 310 311 312 313 314 315 316 317 318 319
		/*
		 * This task already has access to memory reserves and is
		 * being killed. Don't allow any other task access to the
		 * memory reserve.
		 *
		 * Note: this may have a chance of deadlock if it gets
		 * blocked waiting for another task which itself is waiting
		 * for memory. Is there a better alternative?
		 */
		if (test_tsk_thread_flag(p, TIF_MEMDIE))
			return ERR_PTR(-1UL);

320
		if (p->flags & PF_EXITING) {
321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342
			/*
			 * If p is the current task and is in the process of
			 * releasing memory, we allow the "kill" to set
			 * TIF_MEMDIE, which will allow it to gain access to
			 * memory reserves.  Otherwise, it may stall forever.
			 *
			 * The loop isn't broken here, however, in case other
			 * threads are found to have already been oom killed.
			 */
			if (p == current) {
				chosen = p;
				*ppoints = 1000;
			} else {
				/*
				 * If this task is not being ptraced on exit,
				 * then wait for it to finish before killing
				 * some other task unnecessarily.
				 */
				if (!(task_ptrace(p->group_leader) &
							PT_TRACE_EXIT))
					return ERR_PTR(-1UL);
			}
343
		}
344

D
David Rientjes 已提交
345 346
		points = oom_badness(p, mem, nodemask, totalpages);
		if (points > *ppoints) {
P
Paul Jackson 已提交
347
			chosen = p;
348
			*ppoints = points;
L
Linus Torvalds 已提交
349
		}
350
	} while_each_thread(g, p);
351

L
Linus Torvalds 已提交
352 353 354
	return chosen;
}

355
/**
R
Randy Dunlap 已提交
356
 * dump_tasks - dump current memory state of all system tasks
357
 * @mem: current's memory controller, if constrained
358
 * @nodemask: nodemask passed to page allocator for mempolicy ooms
R
Randy Dunlap 已提交
359
 *
360 361 362
 * Dumps the current memory state of all eligible tasks.  Tasks not in the same
 * memcg, not in the same cpuset, or bound to a disjoint set of mempolicy nodes
 * are not shown.
363
 * State information includes task's pid, uid, tgid, vm size, rss, cpu, oom_adj
D
David Rientjes 已提交
364
 * value, oom_score_adj value, and name.
365 366 367
 *
 * Call with tasklist_lock read-locked.
 */
368
static void dump_tasks(const struct mem_cgroup *mem, const nodemask_t *nodemask)
369
{
370 371
	struct task_struct *p;
	struct task_struct *task;
372

D
David Rientjes 已提交
373
	pr_info("[ pid ]   uid  tgid total_vm      rss cpu oom_adj oom_score_adj name\n");
374
	for_each_process(p) {
375
		if (oom_unkillable_task(p, mem, nodemask))
376
			continue;
377

378 379
		task = find_lock_task_mm(p);
		if (!task) {
380
			/*
381 382
			 * This is a kthread or all of p's threads have already
			 * detached their mm's.  There's no need to report
383
			 * them; they can't be oom killed anyway.
384 385 386
			 */
			continue;
		}
387

D
David Rientjes 已提交
388
		pr_info("[%5d] %5d %5d %8lu %8lu %3u     %3d         %5d %s\n",
389
			task->pid, task_uid(task), task->tgid,
D
David Rientjes 已提交
390 391 392
			task->mm->total_vm, get_mm_rss(task->mm),
			task_cpu(task), task->signal->oom_adj,
			task->signal->oom_score_adj, task->comm);
393 394
		task_unlock(task);
	}
395 396
}

397
static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order,
398
			struct mem_cgroup *mem, const nodemask_t *nodemask)
399
{
400
	task_lock(current);
401
	pr_warning("%s invoked oom-killer: gfp_mask=0x%x, order=%d, "
D
David Rientjes 已提交
402 403 404
		"oom_adj=%d, oom_score_adj=%d\n",
		current->comm, gfp_mask, order, current->signal->oom_adj,
		current->signal->oom_score_adj);
405 406 407
	cpuset_print_task_mems_allowed(current);
	task_unlock(current);
	dump_stack();
408
	mem_cgroup_print_oom_info(mem, p);
409
	__show_mem(SHOW_MEM_FILTER_NODES);
410
	if (sysctl_oom_dump_tasks)
411
		dump_tasks(mem, nodemask);
412 413
}

414
#define K(x) ((x) << (PAGE_SHIFT-10))
415
static int oom_kill_task(struct task_struct *p, struct mem_cgroup *mem)
L
Linus Torvalds 已提交
416
{
417 418 419
	struct task_struct *q;
	struct mm_struct *mm;

420
	p = find_lock_task_mm(p);
421
	if (!p)
422
		return 1;
423

424 425 426
	/* mm cannot be safely dereferenced after task_unlock(p) */
	mm = p->mm;

427 428 429 430
	pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB\n",
		task_pid_nr(p), p->comm, K(p->mm->total_vm),
		K(get_mm_counter(p->mm, MM_ANONPAGES)),
		K(get_mm_counter(p->mm, MM_FILEPAGES)));
431
	task_unlock(p);
L
Linus Torvalds 已提交
432

433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450
	/*
	 * Kill all processes sharing p->mm in other thread groups, if any.
	 * They don't get access to memory reserves or a higher scheduler
	 * priority, though, to avoid depletion of all memory or task
	 * starvation.  This prevents mm->mmap_sem livelock when an oom killed
	 * task cannot exit because it requires the semaphore and its contended
	 * by another thread trying to allocate memory itself.  That thread will
	 * now get access to memory reserves since it has a pending fatal
	 * signal.
	 */
	for_each_process(q)
		if (q->mm == mm && !same_thread_group(q, p)) {
			task_lock(q);	/* Protect ->comm from prctl() */
			pr_err("Kill process %d (%s) sharing same memory\n",
				task_pid_nr(q), q->comm);
			task_unlock(q);
			force_sig(SIGKILL, q);
		}
451

L
Linus Torvalds 已提交
452 453
	set_tsk_thread_flag(p, TIF_MEMDIE);
	force_sig(SIGKILL, p);
454 455 456 457 458 459 460 461

	/*
	 * We give our sacrificial lamb high priority and access to
	 * all the memory it needs. That way it should be able to
	 * exit() and clear out its resources quickly...
	 */
	boost_dying_task_prio(p, mem);

462
	return 0;
L
Linus Torvalds 已提交
463
}
464
#undef K
L
Linus Torvalds 已提交
465

466
static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
D
David Rientjes 已提交
467 468 469
			    unsigned int points, unsigned long totalpages,
			    struct mem_cgroup *mem, nodemask_t *nodemask,
			    const char *message)
L
Linus Torvalds 已提交
470
{
471
	struct task_struct *victim = p;
472
	struct task_struct *child;
473 474
	struct task_struct *t = p;
	unsigned int victim_points = 0;
L
Linus Torvalds 已提交
475

476
	if (printk_ratelimit())
477
		dump_header(p, gfp_mask, order, mem, nodemask);
478

479 480 481 482
	/*
	 * If the task is already exiting, don't alarm the sysadmin or kill
	 * its children or threads, just set TIF_MEMDIE so it can die quickly
	 */
483
	if (p->flags & PF_EXITING) {
484
		set_tsk_thread_flag(p, TIF_MEMDIE);
485
		boost_dying_task_prio(p, mem);
486 487 488
		return 0;
	}

489
	task_lock(p);
D
David Rientjes 已提交
490
	pr_err("%s: Kill process %d (%s) score %d or sacrifice child\n",
491 492
		message, task_pid_nr(p), p->comm, points);
	task_unlock(p);
N
Nick Piggin 已提交
493

494 495 496 497 498 499
	/*
	 * If any of p's children has a different mm and is eligible for kill,
	 * the one with the highest badness() score is sacrificed for its
	 * parent.  This attempts to lose the minimal amount of work done while
	 * still freeing memory.
	 */
500
	do {
501
		list_for_each_entry(child, &t->children, sibling) {
D
David Rientjes 已提交
502
			unsigned int child_points;
503

504 505
			if (child->mm == p->mm)
				continue;
D
David Rientjes 已提交
506 507 508 509 510
			/*
			 * oom_badness() returns 0 if the thread is unkillable
			 */
			child_points = oom_badness(child, mem, nodemask,
								totalpages);
511 512 513 514
			if (child_points > victim_points) {
				victim = child;
				victim_points = child_points;
			}
515 516 517
		}
	} while_each_thread(p, t);

518
	return oom_kill_task(victim, mem);
L
Linus Torvalds 已提交
519 520
}

521 522 523 524
/*
 * Determines whether the kernel must panic because of the panic_on_oom sysctl.
 */
static void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask,
525
				int order, const nodemask_t *nodemask)
526 527 528 529 530 531 532 533 534 535 536 537 538
{
	if (likely(!sysctl_panic_on_oom))
		return;
	if (sysctl_panic_on_oom != 2) {
		/*
		 * panic_on_oom == 1 only affects CONSTRAINT_NONE, the kernel
		 * does not panic for cpuset, mempolicy, or memcg allocation
		 * failures.
		 */
		if (constraint != CONSTRAINT_NONE)
			return;
	}
	read_lock(&tasklist_lock);
539
	dump_header(NULL, gfp_mask, order, NULL, nodemask);
540 541 542 543 544
	read_unlock(&tasklist_lock);
	panic("Out of memory: %s panic_on_oom is enabled\n",
		sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide");
}

545
#ifdef CONFIG_CGROUP_MEM_RES_CTLR
546 547
void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask)
{
D
David Rientjes 已提交
548 549
	unsigned long limit;
	unsigned int points = 0;
550 551
	struct task_struct *p;

552 553 554 555 556 557 558 559 560 561 562
	/*
	 * If current has a pending SIGKILL, then automatically select it.  The
	 * goal is to allow it to allocate so that it may quickly exit and free
	 * its memory.
	 */
	if (fatal_signal_pending(current)) {
		set_thread_flag(TIF_MEMDIE);
		boost_dying_task_prio(current, NULL);
		return;
	}

563
	check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, 0, NULL);
D
David Rientjes 已提交
564
	limit = mem_cgroup_get_limit(mem) >> PAGE_SHIFT;
L
Li Zefan 已提交
565
	read_lock(&tasklist_lock);
566
retry:
D
David Rientjes 已提交
567
	p = select_bad_process(&points, limit, mem, NULL);
568
	if (!p || PTR_ERR(p) == -1UL)
569 570
		goto out;

D
David Rientjes 已提交
571
	if (oom_kill_process(p, gfp_mask, 0, points, limit, mem, NULL,
572 573 574
				"Memory cgroup out of memory"))
		goto retry;
out:
L
Li Zefan 已提交
575
	read_unlock(&tasklist_lock);
576 577 578
}
#endif

579 580 581 582 583 584 585 586 587 588 589 590 591 592
static BLOCKING_NOTIFIER_HEAD(oom_notify_list);

int register_oom_notifier(struct notifier_block *nb)
{
	return blocking_notifier_chain_register(&oom_notify_list, nb);
}
EXPORT_SYMBOL_GPL(register_oom_notifier);

int unregister_oom_notifier(struct notifier_block *nb)
{
	return blocking_notifier_chain_unregister(&oom_notify_list, nb);
}
EXPORT_SYMBOL_GPL(unregister_oom_notifier);

D
David Rientjes 已提交
593 594 595 596 597
/*
 * Try to acquire the OOM killer lock for the zones in zonelist.  Returns zero
 * if a parallel OOM killing is already taking place that includes a zone in
 * the zonelist.  Otherwise, locks all zones in the zonelist and returns 1.
 */
598
int try_set_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_mask)
D
David Rientjes 已提交
599
{
600 601
	struct zoneref *z;
	struct zone *zone;
D
David Rientjes 已提交
602 603
	int ret = 1;

D
David Rientjes 已提交
604
	spin_lock(&zone_scan_lock);
605 606
	for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
		if (zone_is_oom_locked(zone)) {
D
David Rientjes 已提交
607 608 609
			ret = 0;
			goto out;
		}
610 611 612 613
	}

	for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
		/*
D
David Rientjes 已提交
614
		 * Lock each zone in the zonelist under zone_scan_lock so a
615
		 * parallel invocation of try_set_zonelist_oom() doesn't succeed
616 617 618 619
		 * when it shouldn't.
		 */
		zone_set_flag(zone, ZONE_OOM_LOCKED);
	}
D
David Rientjes 已提交
620 621

out:
D
David Rientjes 已提交
622
	spin_unlock(&zone_scan_lock);
D
David Rientjes 已提交
623 624 625 626 627 628 629 630
	return ret;
}

/*
 * Clears the ZONE_OOM_LOCKED flag for all zones in the zonelist so that failed
 * allocation attempts with zonelists containing them may now recall the OOM
 * killer, if necessary.
 */
631
void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_mask)
D
David Rientjes 已提交
632
{
633 634
	struct zoneref *z;
	struct zone *zone;
D
David Rientjes 已提交
635

D
David Rientjes 已提交
636
	spin_lock(&zone_scan_lock);
637 638 639
	for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
		zone_clear_flag(zone, ZONE_OOM_LOCKED);
	}
D
David Rientjes 已提交
640
	spin_unlock(&zone_scan_lock);
D
David Rientjes 已提交
641 642
}

643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679
/*
 * Try to acquire the oom killer lock for all system zones.  Returns zero if a
 * parallel oom killing is taking place, otherwise locks all zones and returns
 * non-zero.
 */
static int try_set_system_oom(void)
{
	struct zone *zone;
	int ret = 1;

	spin_lock(&zone_scan_lock);
	for_each_populated_zone(zone)
		if (zone_is_oom_locked(zone)) {
			ret = 0;
			goto out;
		}
	for_each_populated_zone(zone)
		zone_set_flag(zone, ZONE_OOM_LOCKED);
out:
	spin_unlock(&zone_scan_lock);
	return ret;
}

/*
 * Clears ZONE_OOM_LOCKED for all system zones so that failed allocation
 * attempts or page faults may now recall the oom killer, if necessary.
 */
static void clear_system_oom(void)
{
	struct zone *zone;

	spin_lock(&zone_scan_lock);
	for_each_populated_zone(zone)
		zone_clear_flag(zone, ZONE_OOM_LOCKED);
	spin_unlock(&zone_scan_lock);
}

L
Linus Torvalds 已提交
680
/**
681
 * out_of_memory - kill the "best" process when we run out of memory
R
Randy Dunlap 已提交
682 683 684
 * @zonelist: zonelist pointer
 * @gfp_mask: memory allocation flags
 * @order: amount of memory being requested as a power of 2
685
 * @nodemask: nodemask passed to page allocator
L
Linus Torvalds 已提交
686 687 688 689 690 691
 *
 * If we run out of memory, we have the choice between either
 * killing a random task (bad), letting the system crash (worse)
 * OR try to be smart about which process to kill. Note that we
 * don't have to be perfect here, we just have to be good.
 */
692 693
void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
		int order, nodemask_t *nodemask)
L
Linus Torvalds 已提交
694
{
695
	const nodemask_t *mpol_mask;
696
	struct task_struct *p;
D
David Rientjes 已提交
697
	unsigned long totalpages;
698
	unsigned long freed = 0;
D
David Rientjes 已提交
699
	unsigned int points;
700
	enum oom_constraint constraint = CONSTRAINT_NONE;
K
KOSAKI Motohiro 已提交
701
	int killed = 0;
702 703 704 705 706

	blocking_notifier_call_chain(&oom_notify_list, 0, &freed);
	if (freed > 0)
		/* Got some memory back in the last second. */
		return;
L
Linus Torvalds 已提交
707

708 709 710 711 712 713 714
	/*
	 * If current has a pending SIGKILL, then automatically select it.  The
	 * goal is to allow it to allocate so that it may quickly exit and free
	 * its memory.
	 */
	if (fatal_signal_pending(current)) {
		set_thread_flag(TIF_MEMDIE);
715
		boost_dying_task_prio(current, NULL);
716 717 718
		return;
	}

719 720 721 722
	/*
	 * Check if there were limitations on the allocation (only relevant for
	 * NUMA) that may require different handling.
	 */
D
David Rientjes 已提交
723 724
	constraint = constrained_alloc(zonelist, gfp_mask, nodemask,
						&totalpages);
725 726
	mpol_mask = (constraint == CONSTRAINT_MEMORY_POLICY) ? nodemask : NULL;
	check_panic_on_oom(constraint, gfp_mask, order, mpol_mask);
727

D
David Rientjes 已提交
728
	read_lock(&tasklist_lock);
729
	if (sysctl_oom_kill_allocating_task &&
730
	    !oom_unkillable_task(current, NULL, nodemask) &&
731
	    current->mm && !atomic_read(&current->mm->oom_disable_count)) {
732 733 734 735 736
		/*
		 * oom_kill_process() needs tasklist_lock held.  If it returns
		 * non-zero, current could not be killed so we must fallback to
		 * the tasklist scan.
		 */
D
David Rientjes 已提交
737 738
		if (!oom_kill_process(current, gfp_mask, order, 0, totalpages,
				NULL, nodemask,
739
				"Out of memory (oom_kill_allocating_task)"))
K
KOSAKI Motohiro 已提交
740
			goto out;
741 742 743
	}

retry:
744
	p = select_bad_process(&points, totalpages, NULL, mpol_mask);
745
	if (PTR_ERR(p) == -1UL)
K
KOSAKI Motohiro 已提交
746
		goto out;
747 748 749

	/* Found nothing?!?! Either we hang forever, or we panic. */
	if (!p) {
750
		dump_header(NULL, gfp_mask, order, NULL, mpol_mask);
751 752 753 754
		read_unlock(&tasklist_lock);
		panic("Out of memory and no killable processes...\n");
	}

D
David Rientjes 已提交
755 756
	if (oom_kill_process(p, gfp_mask, order, points, totalpages, NULL,
				nodemask, "Out of memory"))
757
		goto retry;
K
KOSAKI Motohiro 已提交
758 759
	killed = 1;
out:
760
	read_unlock(&tasklist_lock);
L
Linus Torvalds 已提交
761 762 763

	/*
	 * Give "p" a good chance of killing itself before we
764
	 * retry to allocate memory unless "p" is current
L
Linus Torvalds 已提交
765
	 */
K
KOSAKI Motohiro 已提交
766
	if (killed && !test_thread_flag(TIF_MEMDIE))
767
		schedule_timeout_uninterruptible(1);
L
Linus Torvalds 已提交
768
}
769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784

/*
 * The pagefault handler calls here because it is out of memory, so kill a
 * memory-hogging task.  If a populated zone has ZONE_OOM_LOCKED set, a parallel
 * oom killing is already in progress so do nothing.  If a task is found with
 * TIF_MEMDIE set, it has been killed so do nothing and allow it to exit.
 */
void pagefault_out_of_memory(void)
{
	if (try_set_system_oom()) {
		out_of_memory(NULL, 0, 0, NULL);
		clear_system_oom();
	}
	if (!test_thread_flag(TIF_MEMDIE))
		schedule_timeout_uninterruptible(1);
}