oom_kill.c 22.6 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6
/*
 *  linux/mm/oom_kill.c
 * 
 *  Copyright (C)  1998,2000  Rik van Riel
 *	Thanks go out to Claus Fischer for some serious inspiration and
 *	for goading me into coding this file...
D
David Rientjes 已提交
7 8
 *  Copyright (C)  2010  Google, Inc.
 *	Rewritten by David Rientjes
L
Linus Torvalds 已提交
9 10
 *
 *  The routines in this file are used to kill a process when
P
Paul Jackson 已提交
11 12
 *  we're seriously out of memory. This gets called from __alloc_pages()
 *  in mm/page_alloc.c when we really run out of memory.
L
Linus Torvalds 已提交
13 14 15 16 17 18 19
 *
 *  Since we won't call these routines often (on a well-configured
 *  machine) this file will double as a 'coding guide' and a signpost
 *  for newbie kernel hackers. It features several pointers to major
 *  kernel subsystems and hints as to where to find out what things do.
 */

20
#include <linux/oom.h>
L
Linus Torvalds 已提交
21
#include <linux/mm.h>
A
Alexey Dobriyan 已提交
22
#include <linux/err.h>
23
#include <linux/gfp.h>
L
Linus Torvalds 已提交
24 25 26 27
#include <linux/sched.h>
#include <linux/swap.h>
#include <linux/timex.h>
#include <linux/jiffies.h>
28
#include <linux/cpuset.h>
29
#include <linux/export.h>
30
#include <linux/notifier.h>
31
#include <linux/memcontrol.h>
32
#include <linux/mempolicy.h>
33
#include <linux/security.h>
34
#include <linux/ptrace.h>
35
#include <linux/freezer.h>
36
#include <linux/ftrace.h>
37
#include <linux/ratelimit.h>
38 39 40

#define CREATE_TRACE_POINTS
#include <trace/events/oom.h>
L
Linus Torvalds 已提交
41

42
int sysctl_panic_on_oom;
43
int sysctl_oom_kill_allocating_task;
44
int sysctl_oom_dump_tasks = 1;
D
David Rientjes 已提交
45
static DEFINE_SPINLOCK(zone_scan_lock);
L
Linus Torvalds 已提交
46

47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62
/*
 * compare_swap_oom_score_adj() - compare and swap current's oom_score_adj
 * @old_val: old oom_score_adj for compare
 * @new_val: new oom_score_adj for swap
 *
 * Sets the oom_score_adj value for current to @new_val iff its present value is
 * @old_val.  Usually used to reinstate a previous value to prevent racing with
 * userspacing tuning the value in the interim.
 */
void compare_swap_oom_score_adj(int old_val, int new_val)
{
	struct sighand_struct *sighand = current->sighand;

	spin_lock_irq(&sighand->siglock);
	if (current->signal->oom_score_adj == old_val)
		current->signal->oom_score_adj = new_val;
63
	trace_oom_score_adj_update(current);
64 65 66
	spin_unlock_irq(&sighand->siglock);
}

67 68 69 70 71 72 73 74 75 76 77 78 79 80 81
/**
 * test_set_oom_score_adj() - set current's oom_score_adj and return old value
 * @new_val: new oom_score_adj value
 *
 * Sets the oom_score_adj value for current to @new_val with proper
 * synchronization and returns the old value.  Usually used to temporarily
 * set a value, save the old value in the caller, and then reinstate it later.
 */
int test_set_oom_score_adj(int new_val)
{
	struct sighand_struct *sighand = current->sighand;
	int old_val;

	spin_lock_irq(&sighand->siglock);
	old_val = current->signal->oom_score_adj;
D
David Rientjes 已提交
82
	current->signal->oom_score_adj = new_val;
83
	trace_oom_score_adj_update(current);
84 85 86 87 88
	spin_unlock_irq(&sighand->siglock);

	return old_val;
}

89 90 91 92 93 94 95 96 97
#ifdef CONFIG_NUMA
/**
 * has_intersects_mems_allowed() - check task eligiblity for kill
 * @tsk: task struct of which task to consider
 * @mask: nodemask passed to page allocator for mempolicy ooms
 *
 * Task eligibility is determined by whether or not a candidate task, @tsk,
 * shares the same mempolicy nodes as current if it is bound by such a policy
 * and whether or not it has the same set of allowed cpuset nodes.
98
 */
99 100
static bool has_intersects_mems_allowed(struct task_struct *tsk,
					const nodemask_t *mask)
101
{
102
	struct task_struct *start = tsk;
103 104

	do {
105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121
		if (mask) {
			/*
			 * If this is a mempolicy constrained oom, tsk's
			 * cpuset is irrelevant.  Only return true if its
			 * mempolicy intersects current, otherwise it may be
			 * needlessly killed.
			 */
			if (mempolicy_nodemask_intersects(tsk, mask))
				return true;
		} else {
			/*
			 * This is not a mempolicy constrained oom, so only
			 * check the mems of tsk's cpuset.
			 */
			if (cpuset_mems_allowed_intersects(current, tsk))
				return true;
		}
122 123
	} while_each_thread(start, tsk);

124 125 126 127 128 129 130
	return false;
}
#else
static bool has_intersects_mems_allowed(struct task_struct *tsk,
					const nodemask_t *mask)
{
	return true;
131
}
132
#endif /* CONFIG_NUMA */
133

134 135 136 137 138 139
/*
 * The process p may have detached its own ->mm while exiting or through
 * use_mm(), but one or more of its subthreads may still have a valid
 * pointer.  Return p, or any of its subthreads with a valid ->mm, with
 * task_lock() held.
 */
140
struct task_struct *find_lock_task_mm(struct task_struct *p)
141 142 143 144 145 146 147 148 149 150 151 152 153
{
	struct task_struct *t = p;

	do {
		task_lock(t);
		if (likely(t->mm))
			return t;
		task_unlock(t);
	} while_each_thread(p, t);

	return NULL;
}

154
/* return true if the task is not adequate as candidate victim task. */
155
static bool oom_unkillable_task(struct task_struct *p,
156
		const struct mem_cgroup *memcg, const nodemask_t *nodemask)
157 158 159 160 161 162 163
{
	if (is_global_init(p))
		return true;
	if (p->flags & PF_KTHREAD)
		return true;

	/* When mem_cgroup_out_of_memory() and p is not member of the group */
164
	if (memcg && !task_in_mem_cgroup(p, memcg))
165 166 167 168 169 170 171 172 173
		return true;

	/* p may not have freeable memory in nodemask */
	if (!has_intersects_mems_allowed(p, nodemask))
		return true;

	return false;
}

L
Linus Torvalds 已提交
174
/**
D
David Rientjes 已提交
175
 * oom_badness - heuristic function to determine which candidate task to kill
L
Linus Torvalds 已提交
176
 * @p: task struct of which task we should calculate
D
David Rientjes 已提交
177
 * @totalpages: total present RAM allowed for page allocation
L
Linus Torvalds 已提交
178
 *
D
David Rientjes 已提交
179 180 181
 * The heuristic for determining which task to kill is made to be as simple and
 * predictable as possible.  The goal is to return the highest value for the
 * task consuming the most memory to avoid subsequent oom failures.
L
Linus Torvalds 已提交
182
 */
183 184
unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
			  const nodemask_t *nodemask, unsigned long totalpages)
L
Linus Torvalds 已提交
185
{
186
	long points;
187
	long adj;
188

189
	if (oom_unkillable_task(p, memcg, nodemask))
190
		return 0;
L
Linus Torvalds 已提交
191

192 193
	p = find_lock_task_mm(p);
	if (!p)
L
Linus Torvalds 已提交
194 195
		return 0;

196 197
	adj = p->signal->oom_score_adj;
	if (adj == OOM_SCORE_ADJ_MIN) {
198 199 200 201
		task_unlock(p);
		return 0;
	}

L
Linus Torvalds 已提交
202
	/*
D
David Rientjes 已提交
203
	 * The baseline for the badness score is the proportion of RAM that each
204
	 * task's rss, pagetable and swap space use.
L
Linus Torvalds 已提交
205
	 */
206 207
	points = get_mm_rss(p->mm) + p->mm->nr_ptes +
		 get_mm_counter(p->mm, MM_SWAPENTS);
D
David Rientjes 已提交
208
	task_unlock(p);
L
Linus Torvalds 已提交
209 210

	/*
D
David Rientjes 已提交
211 212
	 * Root processes get 3% bonus, just like the __vm_enough_memory()
	 * implementation used by LSMs.
L
Linus Torvalds 已提交
213
	 */
D
David Rientjes 已提交
214
	if (has_capability_noaudit(p, CAP_SYS_ADMIN))
215
		adj -= 30;
L
Linus Torvalds 已提交
216

217 218 219
	/* Normalize to oom_score_adj units */
	adj *= totalpages / 1000;
	points += adj;
L
Linus Torvalds 已提交
220

221
	/*
222 223
	 * Never return 0 for an eligible task regardless of the root bonus and
	 * oom_score_adj (oom_score_adj can't be OOM_SCORE_ADJ_MIN here).
224
	 */
225
	return points > 0 ? points : 1;
L
Linus Torvalds 已提交
226 227
}

228 229 230 231
/*
 * Determine the type of allocation constraint.
 */
#ifdef CONFIG_NUMA
232
static enum oom_constraint constrained_alloc(struct zonelist *zonelist,
D
David Rientjes 已提交
233 234
				gfp_t gfp_mask, nodemask_t *nodemask,
				unsigned long *totalpages)
235
{
236
	struct zone *zone;
237
	struct zoneref *z;
238
	enum zone_type high_zoneidx = gfp_zone(gfp_mask);
D
David Rientjes 已提交
239 240
	bool cpuset_limited = false;
	int nid;
241

D
David Rientjes 已提交
242 243 244 245 246
	/* Default to all available memory */
	*totalpages = totalram_pages + total_swap_pages;

	if (!zonelist)
		return CONSTRAINT_NONE;
247 248 249 250 251 252 253
	/*
	 * Reach here only when __GFP_NOFAIL is used. So, we should avoid
	 * to kill current.We have to random task kill in this case.
	 * Hopefully, CONSTRAINT_THISNODE...but no way to handle it, now.
	 */
	if (gfp_mask & __GFP_THISNODE)
		return CONSTRAINT_NONE;
254

255
	/*
D
David Rientjes 已提交
256 257 258
	 * This is not a __GFP_THISNODE allocation, so a truncated nodemask in
	 * the page allocator means a mempolicy is in effect.  Cpuset policy
	 * is enforced in get_page_from_freelist().
259
	 */
D
David Rientjes 已提交
260 261 262 263
	if (nodemask && !nodes_subset(node_states[N_HIGH_MEMORY], *nodemask)) {
		*totalpages = total_swap_pages;
		for_each_node_mask(nid, *nodemask)
			*totalpages += node_spanned_pages(nid);
264
		return CONSTRAINT_MEMORY_POLICY;
D
David Rientjes 已提交
265
	}
266 267 268 269 270

	/* Check this allocation failure is caused by cpuset's wall function */
	for_each_zone_zonelist_nodemask(zone, z, zonelist,
			high_zoneidx, nodemask)
		if (!cpuset_zone_allowed_softwall(zone, gfp_mask))
D
David Rientjes 已提交
271
			cpuset_limited = true;
272

D
David Rientjes 已提交
273 274 275 276 277 278
	if (cpuset_limited) {
		*totalpages = total_swap_pages;
		for_each_node_mask(nid, cpuset_current_mems_allowed)
			*totalpages += node_spanned_pages(nid);
		return CONSTRAINT_CPUSET;
	}
279 280
	return CONSTRAINT_NONE;
}
281 282
#else
static enum oom_constraint constrained_alloc(struct zonelist *zonelist,
D
David Rientjes 已提交
283 284
				gfp_t gfp_mask, nodemask_t *nodemask,
				unsigned long *totalpages)
285
{
D
David Rientjes 已提交
286
	*totalpages = totalram_pages + total_swap_pages;
287 288 289
	return CONSTRAINT_NONE;
}
#endif
290

291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343
enum oom_scan_t {
	OOM_SCAN_OK,		/* scan thread and find its badness */
	OOM_SCAN_CONTINUE,	/* do not consider thread for oom kill */
	OOM_SCAN_ABORT,		/* abort the iteration and return */
	OOM_SCAN_SELECT,	/* always select this thread first */
};

static enum oom_scan_t oom_scan_process_thread(struct task_struct *task,
		struct mem_cgroup *memcg, unsigned long totalpages,
		const nodemask_t *nodemask, bool force_kill)
{
	if (task->exit_state)
		return OOM_SCAN_CONTINUE;
	if (oom_unkillable_task(task, memcg, nodemask))
		return OOM_SCAN_CONTINUE;

	/*
	 * This task already has access to memory reserves and is being killed.
	 * Don't allow any other task to have access to the reserves.
	 */
	if (test_tsk_thread_flag(task, TIF_MEMDIE)) {
		if (unlikely(frozen(task)))
			__thaw_task(task);
		if (!force_kill)
			return OOM_SCAN_ABORT;
	}
	if (!task->mm)
		return OOM_SCAN_CONTINUE;

	if (task->flags & PF_EXITING) {
		/*
		 * If task is current and is in the process of releasing memory,
		 * allow the "kill" to set TIF_MEMDIE, which will allow it to
		 * access memory reserves.  Otherwise, it may stall forever.
		 *
		 * The iteration isn't broken here, however, in case other
		 * threads are found to have already been oom killed.
		 */
		if (task == current)
			return OOM_SCAN_SELECT;
		else if (!force_kill) {
			/*
			 * If this task is not being ptraced on exit, then wait
			 * for it to finish before killing some other task
			 * unnecessarily.
			 */
			if (!(task->group_leader->ptrace & PT_TRACE_EXIT))
				return OOM_SCAN_ABORT;
		}
	}
	return OOM_SCAN_OK;
}

L
Linus Torvalds 已提交
344 345 346 347 348 349
/*
 * Simple selection loop. We chose the process with the highest
 * number of 'points'. We expect the caller will lock the tasklist.
 *
 * (not docbooked, we don't want this one cluttering up the manual)
 */
D
David Rientjes 已提交
350
static struct task_struct *select_bad_process(unsigned int *ppoints,
351
		unsigned long totalpages, struct mem_cgroup *memcg,
352
		const nodemask_t *nodemask, bool force_kill)
L
Linus Torvalds 已提交
353
{
354
	struct task_struct *g, *p;
L
Linus Torvalds 已提交
355
	struct task_struct *chosen = NULL;
356
	unsigned long chosen_points = 0;
L
Linus Torvalds 已提交
357

358
	do_each_thread(g, p) {
D
David Rientjes 已提交
359
		unsigned int points;
P
Paul Jackson 已提交
360

361 362 363 364 365 366 367
		switch (oom_scan_process_thread(p, memcg, totalpages, nodemask,
						force_kill)) {
		case OOM_SCAN_SELECT:
			chosen = p;
			chosen_points = ULONG_MAX;
			/* fall through */
		case OOM_SCAN_CONTINUE:
368
			continue;
369 370 371 372 373
		case OOM_SCAN_ABORT:
			return ERR_PTR(-1UL);
		case OOM_SCAN_OK:
			break;
		};
374
		points = oom_badness(p, memcg, nodemask, totalpages);
375
		if (points > chosen_points) {
P
Paul Jackson 已提交
376
			chosen = p;
377
			chosen_points = points;
L
Linus Torvalds 已提交
378
		}
379
	} while_each_thread(g, p);
380

381
	*ppoints = chosen_points * 1000 / totalpages;
L
Linus Torvalds 已提交
382 383 384
	return chosen;
}

385
/**
R
Randy Dunlap 已提交
386
 * dump_tasks - dump current memory state of all system tasks
W
Wanpeng Li 已提交
387
 * @memcg: current's memory controller, if constrained
388
 * @nodemask: nodemask passed to page allocator for mempolicy ooms
R
Randy Dunlap 已提交
389
 *
390 391 392
 * Dumps the current memory state of all eligible tasks.  Tasks not in the same
 * memcg, not in the same cpuset, or bound to a disjoint set of mempolicy nodes
 * are not shown.
393 394
 * State information includes task's pid, uid, tgid, vm size, rss, nr_ptes,
 * swapents, oom_score_adj value, and name.
395 396 397
 *
 * Call with tasklist_lock read-locked.
 */
398
static void dump_tasks(const struct mem_cgroup *memcg, const nodemask_t *nodemask)
399
{
400 401
	struct task_struct *p;
	struct task_struct *task;
402

403
	pr_info("[ pid ]   uid  tgid total_vm      rss nr_ptes swapents oom_score_adj name\n");
404
	for_each_process(p) {
405
		if (oom_unkillable_task(p, memcg, nodemask))
406
			continue;
407

408 409
		task = find_lock_task_mm(p);
		if (!task) {
410
			/*
411 412
			 * This is a kthread or all of p's threads have already
			 * detached their mm's.  There's no need to report
413
			 * them; they can't be oom killed anyway.
414 415 416
			 */
			continue;
		}
417

418
		pr_info("[%5d] %5d %5d %8lu %8lu %7lu %8lu         %5d %s\n",
419 420
			task->pid, from_kuid(&init_user_ns, task_uid(task)),
			task->tgid, task->mm->total_vm, get_mm_rss(task->mm),
421 422
			task->mm->nr_ptes,
			get_mm_counter(task->mm, MM_SWAPENTS),
D
David Rientjes 已提交
423
			task->signal->oom_score_adj, task->comm);
424 425
		task_unlock(task);
	}
426 427
}

428
static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order,
429
			struct mem_cgroup *memcg, const nodemask_t *nodemask)
430
{
431
	task_lock(current);
432
	pr_warning("%s invoked oom-killer: gfp_mask=0x%x, order=%d, "
D
David Rientjes 已提交
433 434 435
		"oom_adj=%d, oom_score_adj=%d\n",
		current->comm, gfp_mask, order, current->signal->oom_adj,
		current->signal->oom_score_adj);
436 437 438
	cpuset_print_task_mems_allowed(current);
	task_unlock(current);
	dump_stack();
439
	mem_cgroup_print_oom_info(memcg, p);
440
	show_mem(SHOW_MEM_FILTER_NODES);
441
	if (sysctl_oom_dump_tasks)
442
		dump_tasks(memcg, nodemask);
443 444
}

445
#define K(x) ((x) << (PAGE_SHIFT-10))
446 447 448 449
static void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
			     unsigned int points, unsigned long totalpages,
			     struct mem_cgroup *memcg, nodemask_t *nodemask,
			     const char *message)
L
Linus Torvalds 已提交
450
{
451
	struct task_struct *victim = p;
452
	struct task_struct *child;
453
	struct task_struct *t = p;
454
	struct mm_struct *mm;
455
	unsigned int victim_points = 0;
456 457
	static DEFINE_RATELIMIT_STATE(oom_rs, DEFAULT_RATELIMIT_INTERVAL,
					      DEFAULT_RATELIMIT_BURST);
L
Linus Torvalds 已提交
458

459 460 461 462
	/*
	 * If the task is already exiting, don't alarm the sysadmin or kill
	 * its children or threads, just set TIF_MEMDIE so it can die quickly
	 */
463
	if (p->flags & PF_EXITING) {
464
		set_tsk_thread_flag(p, TIF_MEMDIE);
465
		return;
466 467
	}

468
	if (__ratelimit(&oom_rs))
469 470
		dump_header(p, gfp_mask, order, memcg, nodemask);

471
	task_lock(p);
D
David Rientjes 已提交
472
	pr_err("%s: Kill process %d (%s) score %d or sacrifice child\n",
473 474
		message, task_pid_nr(p), p->comm, points);
	task_unlock(p);
N
Nick Piggin 已提交
475

476 477
	/*
	 * If any of p's children has a different mm and is eligible for kill,
478
	 * the one with the highest oom_badness() score is sacrificed for its
479 480 481
	 * parent.  This attempts to lose the minimal amount of work done while
	 * still freeing memory.
	 */
482
	do {
483
		list_for_each_entry(child, &t->children, sibling) {
D
David Rientjes 已提交
484
			unsigned int child_points;
485

486 487
			if (child->mm == p->mm)
				continue;
D
David Rientjes 已提交
488 489 490
			/*
			 * oom_badness() returns 0 if the thread is unkillable
			 */
491
			child_points = oom_badness(child, memcg, nodemask,
D
David Rientjes 已提交
492
								totalpages);
493 494 495 496
			if (child_points > victim_points) {
				victim = child;
				victim_points = child_points;
			}
497 498 499
		}
	} while_each_thread(p, t);

500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530
	victim = find_lock_task_mm(victim);
	if (!victim)
		return;

	/* mm cannot safely be dereferenced after task_unlock(victim) */
	mm = victim->mm;
	pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB\n",
		task_pid_nr(victim), victim->comm, K(victim->mm->total_vm),
		K(get_mm_counter(victim->mm, MM_ANONPAGES)),
		K(get_mm_counter(victim->mm, MM_FILEPAGES)));
	task_unlock(victim);

	/*
	 * Kill all user processes sharing victim->mm in other thread groups, if
	 * any.  They don't get access to memory reserves, though, to avoid
	 * depletion of all memory.  This prevents mm->mmap_sem livelock when an
	 * oom killed thread cannot exit because it requires the semaphore and
	 * its contended by another thread trying to allocate memory itself.
	 * That thread will now get access to memory reserves since it has a
	 * pending fatal signal.
	 */
	for_each_process(p)
		if (p->mm == mm && !same_thread_group(p, victim) &&
		    !(p->flags & PF_KTHREAD)) {
			if (p->signal->oom_score_adj == OOM_SCORE_ADJ_MIN)
				continue;

			task_lock(p);	/* Protect ->comm from prctl() */
			pr_err("Kill process %d (%s) sharing same memory\n",
				task_pid_nr(p), p->comm);
			task_unlock(p);
531
			do_send_sig_info(SIGKILL, SEND_SIG_FORCED, p, true);
532 533 534
		}

	set_tsk_thread_flag(victim, TIF_MEMDIE);
535
	do_send_sig_info(SIGKILL, SEND_SIG_FORCED, victim, true);
L
Linus Torvalds 已提交
536
}
537
#undef K
L
Linus Torvalds 已提交
538

539 540 541 542
/*
 * Determines whether the kernel must panic because of the panic_on_oom sysctl.
 */
static void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask,
543
				int order, const nodemask_t *nodemask)
544 545 546 547 548 549 550 551 552 553 554 555 556
{
	if (likely(!sysctl_panic_on_oom))
		return;
	if (sysctl_panic_on_oom != 2) {
		/*
		 * panic_on_oom == 1 only affects CONSTRAINT_NONE, the kernel
		 * does not panic for cpuset, mempolicy, or memcg allocation
		 * failures.
		 */
		if (constraint != CONSTRAINT_NONE)
			return;
	}
	read_lock(&tasklist_lock);
557
	dump_header(NULL, gfp_mask, order, NULL, nodemask);
558 559 560 561 562
	read_unlock(&tasklist_lock);
	panic("Out of memory: %s panic_on_oom is enabled\n",
		sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide");
}

A
Andrew Morton 已提交
563
#ifdef CONFIG_MEMCG
564 565
void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
			      int order)
566
{
D
David Rientjes 已提交
567 568
	unsigned long limit;
	unsigned int points = 0;
569 570
	struct task_struct *p;

571 572 573 574 575 576 577 578 579 580
	/*
	 * If current has a pending SIGKILL, then automatically select it.  The
	 * goal is to allow it to allocate so that it may quickly exit and free
	 * its memory.
	 */
	if (fatal_signal_pending(current)) {
		set_thread_flag(TIF_MEMDIE);
		return;
	}

581
	check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, order, NULL);
582
	limit = mem_cgroup_get_limit(memcg) >> PAGE_SHIFT ? : 1;
L
Li Zefan 已提交
583
	read_lock(&tasklist_lock);
584
	p = select_bad_process(&points, limit, memcg, NULL, false);
585
	if (p && PTR_ERR(p) != -1UL)
586
		oom_kill_process(p, gfp_mask, order, points, limit, memcg, NULL,
587
				 "Memory cgroup out of memory");
L
Li Zefan 已提交
588
	read_unlock(&tasklist_lock);
589 590 591
}
#endif

592 593 594 595 596 597 598 599 600 601 602 603 604 605
static BLOCKING_NOTIFIER_HEAD(oom_notify_list);

int register_oom_notifier(struct notifier_block *nb)
{
	return blocking_notifier_chain_register(&oom_notify_list, nb);
}
EXPORT_SYMBOL_GPL(register_oom_notifier);

int unregister_oom_notifier(struct notifier_block *nb)
{
	return blocking_notifier_chain_unregister(&oom_notify_list, nb);
}
EXPORT_SYMBOL_GPL(unregister_oom_notifier);

D
David Rientjes 已提交
606 607 608 609 610
/*
 * Try to acquire the OOM killer lock for the zones in zonelist.  Returns zero
 * if a parallel OOM killing is already taking place that includes a zone in
 * the zonelist.  Otherwise, locks all zones in the zonelist and returns 1.
 */
611
int try_set_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_mask)
D
David Rientjes 已提交
612
{
613 614
	struct zoneref *z;
	struct zone *zone;
D
David Rientjes 已提交
615 616
	int ret = 1;

D
David Rientjes 已提交
617
	spin_lock(&zone_scan_lock);
618 619
	for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
		if (zone_is_oom_locked(zone)) {
D
David Rientjes 已提交
620 621 622
			ret = 0;
			goto out;
		}
623 624 625 626
	}

	for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
		/*
D
David Rientjes 已提交
627
		 * Lock each zone in the zonelist under zone_scan_lock so a
628
		 * parallel invocation of try_set_zonelist_oom() doesn't succeed
629 630 631 632
		 * when it shouldn't.
		 */
		zone_set_flag(zone, ZONE_OOM_LOCKED);
	}
D
David Rientjes 已提交
633 634

out:
D
David Rientjes 已提交
635
	spin_unlock(&zone_scan_lock);
D
David Rientjes 已提交
636 637 638 639 640 641 642 643
	return ret;
}

/*
 * Clears the ZONE_OOM_LOCKED flag for all zones in the zonelist so that failed
 * allocation attempts with zonelists containing them may now recall the OOM
 * killer, if necessary.
 */
644
void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_mask)
D
David Rientjes 已提交
645
{
646 647
	struct zoneref *z;
	struct zone *zone;
D
David Rientjes 已提交
648

D
David Rientjes 已提交
649
	spin_lock(&zone_scan_lock);
650 651 652
	for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
		zone_clear_flag(zone, ZONE_OOM_LOCKED);
	}
D
David Rientjes 已提交
653
	spin_unlock(&zone_scan_lock);
D
David Rientjes 已提交
654 655
}

656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692
/*
 * Try to acquire the oom killer lock for all system zones.  Returns zero if a
 * parallel oom killing is taking place, otherwise locks all zones and returns
 * non-zero.
 */
static int try_set_system_oom(void)
{
	struct zone *zone;
	int ret = 1;

	spin_lock(&zone_scan_lock);
	for_each_populated_zone(zone)
		if (zone_is_oom_locked(zone)) {
			ret = 0;
			goto out;
		}
	for_each_populated_zone(zone)
		zone_set_flag(zone, ZONE_OOM_LOCKED);
out:
	spin_unlock(&zone_scan_lock);
	return ret;
}

/*
 * Clears ZONE_OOM_LOCKED for all system zones so that failed allocation
 * attempts or page faults may now recall the oom killer, if necessary.
 */
static void clear_system_oom(void)
{
	struct zone *zone;

	spin_lock(&zone_scan_lock);
	for_each_populated_zone(zone)
		zone_clear_flag(zone, ZONE_OOM_LOCKED);
	spin_unlock(&zone_scan_lock);
}

L
Linus Torvalds 已提交
693
/**
694
 * out_of_memory - kill the "best" process when we run out of memory
R
Randy Dunlap 已提交
695 696 697
 * @zonelist: zonelist pointer
 * @gfp_mask: memory allocation flags
 * @order: amount of memory being requested as a power of 2
698
 * @nodemask: nodemask passed to page allocator
699
 * @force_kill: true if a task must be killed, even if others are exiting
L
Linus Torvalds 已提交
700 701 702 703 704 705
 *
 * If we run out of memory, we have the choice between either
 * killing a random task (bad), letting the system crash (worse)
 * OR try to be smart about which process to kill. Note that we
 * don't have to be perfect here, we just have to be good.
 */
706
void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
707
		int order, nodemask_t *nodemask, bool force_kill)
L
Linus Torvalds 已提交
708
{
709
	const nodemask_t *mpol_mask;
710
	struct task_struct *p;
D
David Rientjes 已提交
711
	unsigned long totalpages;
712
	unsigned long freed = 0;
D
David Rientjes 已提交
713
	unsigned int points;
714
	enum oom_constraint constraint = CONSTRAINT_NONE;
K
KOSAKI Motohiro 已提交
715
	int killed = 0;
716 717 718 719 720

	blocking_notifier_call_chain(&oom_notify_list, 0, &freed);
	if (freed > 0)
		/* Got some memory back in the last second. */
		return;
L
Linus Torvalds 已提交
721

722 723 724 725 726 727 728 729 730 731
	/*
	 * If current has a pending SIGKILL, then automatically select it.  The
	 * goal is to allow it to allocate so that it may quickly exit and free
	 * its memory.
	 */
	if (fatal_signal_pending(current)) {
		set_thread_flag(TIF_MEMDIE);
		return;
	}

732 733 734 735
	/*
	 * Check if there were limitations on the allocation (only relevant for
	 * NUMA) that may require different handling.
	 */
D
David Rientjes 已提交
736 737
	constraint = constrained_alloc(zonelist, gfp_mask, nodemask,
						&totalpages);
738 739
	mpol_mask = (constraint == CONSTRAINT_MEMORY_POLICY) ? nodemask : NULL;
	check_panic_on_oom(constraint, gfp_mask, order, mpol_mask);
740

D
David Rientjes 已提交
741
	read_lock(&tasklist_lock);
742
	if (sysctl_oom_kill_allocating_task && current->mm &&
743
	    !oom_unkillable_task(current, NULL, nodemask) &&
744
	    current->signal->oom_score_adj != OOM_SCORE_ADJ_MIN) {
745 746 747 748
		oom_kill_process(current, gfp_mask, order, 0, totalpages, NULL,
				 nodemask,
				 "Out of memory (oom_kill_allocating_task)");
		goto out;
749 750
	}

751 752
	p = select_bad_process(&points, totalpages, NULL, mpol_mask,
			       force_kill);
753 754
	/* Found nothing?!?! Either we hang forever, or we panic. */
	if (!p) {
755
		dump_header(NULL, gfp_mask, order, NULL, mpol_mask);
756 757 758
		read_unlock(&tasklist_lock);
		panic("Out of memory and no killable processes...\n");
	}
759 760 761 762 763
	if (PTR_ERR(p) != -1UL) {
		oom_kill_process(p, gfp_mask, order, points, totalpages, NULL,
				 nodemask, "Out of memory");
		killed = 1;
	}
K
KOSAKI Motohiro 已提交
764
out:
765
	read_unlock(&tasklist_lock);
L
Linus Torvalds 已提交
766 767

	/*
768 769
	 * Give the killed threads a good chance of exiting before trying to
	 * allocate memory again.
L
Linus Torvalds 已提交
770
	 */
771 772
	if (killed)
		schedule_timeout_killable(1);
L
Linus Torvalds 已提交
773
}
774 775 776 777 778 779 780 781 782 783

/*
 * The pagefault handler calls here because it is out of memory, so kill a
 * memory-hogging task.  If a populated zone has ZONE_OOM_LOCKED set, a parallel
 * oom killing is already in progress so do nothing.  If a task is found with
 * TIF_MEMDIE set, it has been killed so do nothing and allow it to exit.
 */
void pagefault_out_of_memory(void)
{
	if (try_set_system_oom()) {
784
		out_of_memory(NULL, 0, 0, NULL, false);
785 786
		clear_system_oom();
	}
787
	schedule_timeout_killable(1);
788
}