oom_kill.c 26.5 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6
/*
 *  linux/mm/oom_kill.c
 * 
 *  Copyright (C)  1998,2000  Rik van Riel
 *	Thanks go out to Claus Fischer for some serious inspiration and
 *	for goading me into coding this file...
D
David Rientjes 已提交
7 8
 *  Copyright (C)  2010  Google, Inc.
 *	Rewritten by David Rientjes
L
Linus Torvalds 已提交
9 10
 *
 *  The routines in this file are used to kill a process when
P
Paul Jackson 已提交
11 12
 *  we're seriously out of memory. This gets called from __alloc_pages()
 *  in mm/page_alloc.c when we really run out of memory.
L
Linus Torvalds 已提交
13 14 15 16 17 18 19
 *
 *  Since we won't call these routines often (on a well-configured
 *  machine) this file will double as a 'coding guide' and a signpost
 *  for newbie kernel hackers. It features several pointers to major
 *  kernel subsystems and hints as to where to find out what things do.
 */

20
#include <linux/oom.h>
L
Linus Torvalds 已提交
21
#include <linux/mm.h>
A
Alexey Dobriyan 已提交
22
#include <linux/err.h>
23
#include <linux/gfp.h>
L
Linus Torvalds 已提交
24 25 26 27
#include <linux/sched.h>
#include <linux/swap.h>
#include <linux/timex.h>
#include <linux/jiffies.h>
28
#include <linux/cpuset.h>
29
#include <linux/export.h>
30
#include <linux/notifier.h>
31
#include <linux/memcontrol.h>
32
#include <linux/mempolicy.h>
33
#include <linux/security.h>
34
#include <linux/ptrace.h>
35
#include <linux/freezer.h>
36
#include <linux/ftrace.h>
37
#include <linux/ratelimit.h>
M
Michal Hocko 已提交
38 39 40 41 42
#include <linux/kthread.h>
#include <linux/init.h>

#include <asm/tlb.h>
#include "internal.h"
43 44 45

#define CREATE_TRACE_POINTS
#include <trace/events/oom.h>
L
Linus Torvalds 已提交
46

47
int sysctl_panic_on_oom;
48
int sysctl_oom_kill_allocating_task;
49
int sysctl_oom_dump_tasks = 1;
50 51

DEFINE_MUTEX(oom_lock);
L
Linus Torvalds 已提交
52

53 54 55
#ifdef CONFIG_NUMA
/**
 * has_intersects_mems_allowed() - check task eligiblity for kill
56
 * @start: task struct of which task to consider
57 58 59 60 61
 * @mask: nodemask passed to page allocator for mempolicy ooms
 *
 * Task eligibility is determined by whether or not a candidate task, @tsk,
 * shares the same mempolicy nodes as current if it is bound by such a policy
 * and whether or not it has the same set of allowed cpuset nodes.
62
 */
63
static bool has_intersects_mems_allowed(struct task_struct *start,
64
					const nodemask_t *mask)
65
{
66 67
	struct task_struct *tsk;
	bool ret = false;
68

69
	rcu_read_lock();
70
	for_each_thread(start, tsk) {
71 72 73 74 75 76 77
		if (mask) {
			/*
			 * If this is a mempolicy constrained oom, tsk's
			 * cpuset is irrelevant.  Only return true if its
			 * mempolicy intersects current, otherwise it may be
			 * needlessly killed.
			 */
78
			ret = mempolicy_nodemask_intersects(tsk, mask);
79 80 81 82 83
		} else {
			/*
			 * This is not a mempolicy constrained oom, so only
			 * check the mems of tsk's cpuset.
			 */
84
			ret = cpuset_mems_allowed_intersects(current, tsk);
85
		}
86 87
		if (ret)
			break;
88
	}
89
	rcu_read_unlock();
90

91
	return ret;
92 93 94 95 96 97
}
#else
static bool has_intersects_mems_allowed(struct task_struct *tsk,
					const nodemask_t *mask)
{
	return true;
98
}
99
#endif /* CONFIG_NUMA */
100

101 102 103 104 105 106
/*
 * The process p may have detached its own ->mm while exiting or through
 * use_mm(), but one or more of its subthreads may still have a valid
 * pointer.  Return p, or any of its subthreads with a valid ->mm, with
 * task_lock() held.
 */
107
struct task_struct *find_lock_task_mm(struct task_struct *p)
108
{
109
	struct task_struct *t;
110

111 112
	rcu_read_lock();

113
	for_each_thread(p, t) {
114 115
		task_lock(t);
		if (likely(t->mm))
116
			goto found;
117
		task_unlock(t);
118
	}
119 120 121
	t = NULL;
found:
	rcu_read_unlock();
122

123
	return t;
124 125
}

126 127 128 129 130 131 132 133 134
/*
 * order == -1 means the oom kill is required by sysrq, otherwise only
 * for display purposes.
 */
static inline bool is_sysrq_oom(struct oom_control *oc)
{
	return oc->order == -1;
}

135
/* return true if the task is not adequate as candidate victim task. */
136
static bool oom_unkillable_task(struct task_struct *p,
137
		struct mem_cgroup *memcg, const nodemask_t *nodemask)
138 139 140 141 142 143 144
{
	if (is_global_init(p))
		return true;
	if (p->flags & PF_KTHREAD)
		return true;

	/* When mem_cgroup_out_of_memory() and p is not member of the group */
145
	if (memcg && !task_in_mem_cgroup(p, memcg))
146 147 148 149 150 151 152 153 154
		return true;

	/* p may not have freeable memory in nodemask */
	if (!has_intersects_mems_allowed(p, nodemask))
		return true;

	return false;
}

L
Linus Torvalds 已提交
155
/**
D
David Rientjes 已提交
156
 * oom_badness - heuristic function to determine which candidate task to kill
L
Linus Torvalds 已提交
157
 * @p: task struct of which task we should calculate
D
David Rientjes 已提交
158
 * @totalpages: total present RAM allowed for page allocation
L
Linus Torvalds 已提交
159
 *
D
David Rientjes 已提交
160 161 162
 * The heuristic for determining which task to kill is made to be as simple and
 * predictable as possible.  The goal is to return the highest value for the
 * task consuming the most memory to avoid subsequent oom failures.
L
Linus Torvalds 已提交
163
 */
164 165
unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
			  const nodemask_t *nodemask, unsigned long totalpages)
L
Linus Torvalds 已提交
166
{
167
	long points;
168
	long adj;
169

170
	if (oom_unkillable_task(p, memcg, nodemask))
171
		return 0;
L
Linus Torvalds 已提交
172

173 174
	p = find_lock_task_mm(p);
	if (!p)
L
Linus Torvalds 已提交
175 176
		return 0;

177
	adj = (long)p->signal->oom_score_adj;
178
	if (adj == OOM_SCORE_ADJ_MIN) {
179 180 181 182
		task_unlock(p);
		return 0;
	}

L
Linus Torvalds 已提交
183
	/*
D
David Rientjes 已提交
184
	 * The baseline for the badness score is the proportion of RAM that each
185
	 * task's rss, pagetable and swap space use.
L
Linus Torvalds 已提交
186
	 */
187 188
	points = get_mm_rss(p->mm) + get_mm_counter(p->mm, MM_SWAPENTS) +
		atomic_long_read(&p->mm->nr_ptes) + mm_nr_pmds(p->mm);
D
David Rientjes 已提交
189
	task_unlock(p);
L
Linus Torvalds 已提交
190 191

	/*
D
David Rientjes 已提交
192 193
	 * Root processes get 3% bonus, just like the __vm_enough_memory()
	 * implementation used by LSMs.
L
Linus Torvalds 已提交
194
	 */
D
David Rientjes 已提交
195
	if (has_capability_noaudit(p, CAP_SYS_ADMIN))
196
		points -= (points * 3) / 100;
L
Linus Torvalds 已提交
197

198 199 200
	/* Normalize to oom_score_adj units */
	adj *= totalpages / 1000;
	points += adj;
L
Linus Torvalds 已提交
201

202
	/*
203 204
	 * Never return 0 for an eligible task regardless of the root bonus and
	 * oom_score_adj (oom_score_adj can't be OOM_SCORE_ADJ_MIN here).
205
	 */
206
	return points > 0 ? points : 1;
L
Linus Torvalds 已提交
207 208
}

209 210 211 212
/*
 * Determine the type of allocation constraint.
 */
#ifdef CONFIG_NUMA
213 214
static enum oom_constraint constrained_alloc(struct oom_control *oc,
					     unsigned long *totalpages)
215
{
216
	struct zone *zone;
217
	struct zoneref *z;
218
	enum zone_type high_zoneidx = gfp_zone(oc->gfp_mask);
D
David Rientjes 已提交
219 220
	bool cpuset_limited = false;
	int nid;
221

D
David Rientjes 已提交
222 223 224
	/* Default to all available memory */
	*totalpages = totalram_pages + total_swap_pages;

225
	if (!oc->zonelist)
D
David Rientjes 已提交
226
		return CONSTRAINT_NONE;
227 228 229 230 231
	/*
	 * Reach here only when __GFP_NOFAIL is used. So, we should avoid
	 * to kill current.We have to random task kill in this case.
	 * Hopefully, CONSTRAINT_THISNODE...but no way to handle it, now.
	 */
232
	if (oc->gfp_mask & __GFP_THISNODE)
233
		return CONSTRAINT_NONE;
234

235
	/*
D
David Rientjes 已提交
236 237 238
	 * This is not a __GFP_THISNODE allocation, so a truncated nodemask in
	 * the page allocator means a mempolicy is in effect.  Cpuset policy
	 * is enforced in get_page_from_freelist().
239
	 */
240 241
	if (oc->nodemask &&
	    !nodes_subset(node_states[N_MEMORY], *oc->nodemask)) {
D
David Rientjes 已提交
242
		*totalpages = total_swap_pages;
243
		for_each_node_mask(nid, *oc->nodemask)
D
David Rientjes 已提交
244
			*totalpages += node_spanned_pages(nid);
245
		return CONSTRAINT_MEMORY_POLICY;
D
David Rientjes 已提交
246
	}
247 248

	/* Check this allocation failure is caused by cpuset's wall function */
249 250 251
	for_each_zone_zonelist_nodemask(zone, z, oc->zonelist,
			high_zoneidx, oc->nodemask)
		if (!cpuset_zone_allowed(zone, oc->gfp_mask))
D
David Rientjes 已提交
252
			cpuset_limited = true;
253

D
David Rientjes 已提交
254 255 256 257 258 259
	if (cpuset_limited) {
		*totalpages = total_swap_pages;
		for_each_node_mask(nid, cpuset_current_mems_allowed)
			*totalpages += node_spanned_pages(nid);
		return CONSTRAINT_CPUSET;
	}
260 261
	return CONSTRAINT_NONE;
}
262
#else
263 264
static enum oom_constraint constrained_alloc(struct oom_control *oc,
					     unsigned long *totalpages)
265
{
D
David Rientjes 已提交
266
	*totalpages = totalram_pages + total_swap_pages;
267 268 269
	return CONSTRAINT_NONE;
}
#endif
270

271 272
enum oom_scan_t oom_scan_process_thread(struct oom_control *oc,
			struct task_struct *task, unsigned long totalpages)
273
{
274
	if (oom_unkillable_task(task, NULL, oc->nodemask))
275 276 277 278 279 280 281
		return OOM_SCAN_CONTINUE;

	/*
	 * This task already has access to memory reserves and is being killed.
	 * Don't allow any other task to have access to the reserves.
	 */
	if (test_tsk_thread_flag(task, TIF_MEMDIE)) {
282
		if (!is_sysrq_oom(oc))
283 284 285 286 287
			return OOM_SCAN_ABORT;
	}
	if (!task->mm)
		return OOM_SCAN_CONTINUE;

288 289 290 291 292 293 294
	/*
	 * If task is allocating a lot of memory and has been marked to be
	 * killed first if it triggers an oom, then select it.
	 */
	if (oom_task_origin(task))
		return OOM_SCAN_SELECT;

295 296 297
	return OOM_SCAN_OK;
}

L
Linus Torvalds 已提交
298 299
/*
 * Simple selection loop. We chose the process with the highest
300
 * number of 'points'.  Returns -1 on scan abort.
L
Linus Torvalds 已提交
301
 */
302 303
static struct task_struct *select_bad_process(struct oom_control *oc,
		unsigned int *ppoints, unsigned long totalpages)
L
Linus Torvalds 已提交
304
{
305
	struct task_struct *g, *p;
L
Linus Torvalds 已提交
306
	struct task_struct *chosen = NULL;
307
	unsigned long chosen_points = 0;
L
Linus Torvalds 已提交
308

309
	rcu_read_lock();
310
	for_each_process_thread(g, p) {
D
David Rientjes 已提交
311
		unsigned int points;
P
Paul Jackson 已提交
312

313
		switch (oom_scan_process_thread(oc, p, totalpages)) {
314 315 316 317 318
		case OOM_SCAN_SELECT:
			chosen = p;
			chosen_points = ULONG_MAX;
			/* fall through */
		case OOM_SCAN_CONTINUE:
319
			continue;
320
		case OOM_SCAN_ABORT:
321
			rcu_read_unlock();
322
			return (struct task_struct *)(-1UL);
323 324 325
		case OOM_SCAN_OK:
			break;
		};
326
		points = oom_badness(p, NULL, oc->nodemask, totalpages);
327 328 329 330 331 332 333 334
		if (!points || points < chosen_points)
			continue;
		/* Prefer thread group leaders for display purposes */
		if (points == chosen_points && thread_group_leader(chosen))
			continue;

		chosen = p;
		chosen_points = points;
335
	}
336 337 338
	if (chosen)
		get_task_struct(chosen);
	rcu_read_unlock();
339

340
	*ppoints = chosen_points * 1000 / totalpages;
L
Linus Torvalds 已提交
341 342 343
	return chosen;
}

344
/**
R
Randy Dunlap 已提交
345
 * dump_tasks - dump current memory state of all system tasks
W
Wanpeng Li 已提交
346
 * @memcg: current's memory controller, if constrained
347
 * @nodemask: nodemask passed to page allocator for mempolicy ooms
R
Randy Dunlap 已提交
348
 *
349 350 351
 * Dumps the current memory state of all eligible tasks.  Tasks not in the same
 * memcg, not in the same cpuset, or bound to a disjoint set of mempolicy nodes
 * are not shown.
352 353
 * State information includes task's pid, uid, tgid, vm size, rss, nr_ptes,
 * swapents, oom_score_adj value, and name.
354
 */
355
static void dump_tasks(struct mem_cgroup *memcg, const nodemask_t *nodemask)
356
{
357 358
	struct task_struct *p;
	struct task_struct *task;
359

360
	pr_info("[ pid ]   uid  tgid total_vm      rss nr_ptes nr_pmds swapents oom_score_adj name\n");
361
	rcu_read_lock();
362
	for_each_process(p) {
363
		if (oom_unkillable_task(p, memcg, nodemask))
364
			continue;
365

366 367
		task = find_lock_task_mm(p);
		if (!task) {
368
			/*
369 370
			 * This is a kthread or all of p's threads have already
			 * detached their mm's.  There's no need to report
371
			 * them; they can't be oom killed anyway.
372 373 374
			 */
			continue;
		}
375

376
		pr_info("[%5d] %5d %5d %8lu %8lu %7ld %7ld %8lu         %5hd %s\n",
377 378
			task->pid, from_kuid(&init_user_ns, task_uid(task)),
			task->tgid, task->mm->total_vm, get_mm_rss(task->mm),
379
			atomic_long_read(&task->mm->nr_ptes),
380
			mm_nr_pmds(task->mm),
381
			get_mm_counter(task->mm, MM_SWAPENTS),
D
David Rientjes 已提交
382
			task->signal->oom_score_adj, task->comm);
383 384
		task_unlock(task);
	}
385
	rcu_read_unlock();
386 387
}

388 389
static void dump_header(struct oom_control *oc, struct task_struct *p,
			struct mem_cgroup *memcg)
390
{
J
Joe Perches 已提交
391
	pr_warn("%s invoked oom-killer: gfp_mask=%#x(%pGg), order=%d, oom_score_adj=%hd\n",
392
		current->comm, oc->gfp_mask, &oc->gfp_mask, oc->order,
D
David Rientjes 已提交
393
		current->signal->oom_score_adj);
394

395
	cpuset_print_current_mems_allowed();
396
	dump_stack();
397 398 399 400
	if (memcg)
		mem_cgroup_print_oom_info(memcg, p);
	else
		show_mem(SHOW_MEM_FILTER_NODES);
401
	if (sysctl_oom_dump_tasks)
402
		dump_tasks(memcg, oc->nodemask);
403 404
}

405
/*
406
 * Number of OOM victims in flight
407
 */
408 409
static atomic_t oom_victims = ATOMIC_INIT(0);
static DECLARE_WAIT_QUEUE_HEAD(oom_victims_wait);
410

411
bool oom_killer_disabled __read_mostly;
412

413 414
#define K(x) ((x) << (PAGE_SHIFT-10))

415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433
/*
 * task->mm can be NULL if the task is the exited group leader.  So to
 * determine whether the task is using a particular mm, we examine all the
 * task's threads: if one of those is using this mm then this task was also
 * using it.
 */
static bool process_shares_mm(struct task_struct *p, struct mm_struct *mm)
{
	struct task_struct *t;

	for_each_thread(p, t) {
		struct mm_struct *t_mm = READ_ONCE(t->mm);
		if (t_mm)
			return t_mm == mm;
	}
	return false;
}


M
Michal Hocko 已提交
434 435 436 437 438 439 440
#ifdef CONFIG_MMU
/*
 * OOM Reaper kernel thread which tries to reap the memory used by the OOM
 * victim (if that is possible) to help the OOM killer to move on.
 */
static struct task_struct *oom_reaper_th;
static DECLARE_WAIT_QUEUE_HEAD(oom_reaper_wait);
441
static struct task_struct *oom_reaper_list;
442 443
static DEFINE_SPINLOCK(oom_reaper_lock);

M
Michal Hocko 已提交
444

445
static bool __oom_reap_task(struct task_struct *tsk)
M
Michal Hocko 已提交
446 447 448
{
	struct mmu_gather tlb;
	struct vm_area_struct *vma;
449 450
	struct mm_struct *mm;
	struct task_struct *p;
M
Michal Hocko 已提交
451 452 453 454
	struct zap_details details = {.check_swap_entries = true,
				      .ignore_dirty = true};
	bool ret = true;

455 456 457 458 459 460 461 462 463 464 465 466 467
	/*
	 * Make sure we find the associated mm_struct even when the particular
	 * thread has already terminated and cleared its mm.
	 * We might have race with exit path so consider our work done if there
	 * is no mm.
	 */
	p = find_lock_task_mm(tsk);
	if (!p)
		return true;

	mm = p->mm;
	if (!atomic_inc_not_zero(&mm->mm_users)) {
		task_unlock(p);
M
Michal Hocko 已提交
468
		return true;
469 470 471
	}

	task_unlock(p);
M
Michal Hocko 已提交
472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504

	if (!down_read_trylock(&mm->mmap_sem)) {
		ret = false;
		goto out;
	}

	tlb_gather_mmu(&tlb, mm, 0, -1);
	for (vma = mm->mmap ; vma; vma = vma->vm_next) {
		if (is_vm_hugetlb_page(vma))
			continue;

		/*
		 * mlocked VMAs require explicit munlocking before unmap.
		 * Let's keep it simple here and skip such VMAs.
		 */
		if (vma->vm_flags & VM_LOCKED)
			continue;

		/*
		 * Only anonymous pages have a good chance to be dropped
		 * without additional steps which we cannot afford as we
		 * are OOM already.
		 *
		 * We do not even care about fs backed pages because all
		 * which are reclaimable have already been reclaimed and
		 * we do not want to block exit_mmap by keeping mm ref
		 * count elevated without a good reason.
		 */
		if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED))
			unmap_page_range(&tlb, vma, vma->vm_start, vma->vm_end,
					 &details);
	}
	tlb_finish_mmu(&tlb, 0, -1);
505 506 507 508 509
	pr_info("oom_reaper: reaped process %d (%s), now anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
			task_pid_nr(tsk), tsk->comm,
			K(get_mm_counter(mm, MM_ANONPAGES)),
			K(get_mm_counter(mm, MM_FILEPAGES)),
			K(get_mm_counter(mm, MM_SHMEMPAGES)));
M
Michal Hocko 已提交
510
	up_read(&mm->mmap_sem);
511 512 513 514 515 516 517 518 519 520

	/*
	 * Clear TIF_MEMDIE because the task shouldn't be sitting on a
	 * reasonably reclaimable memory anymore. OOM killer can continue
	 * by selecting other victim if unmapping hasn't led to any
	 * improvements. This also means that selecting this task doesn't
	 * make any sense.
	 */
	tsk->signal->oom_score_adj = OOM_SCORE_ADJ_MIN;
	exit_oom_victim(tsk);
M
Michal Hocko 已提交
521 522 523 524 525
out:
	mmput(mm);
	return ret;
}

526
#define MAX_OOM_REAP_RETRIES 10
527
static void oom_reap_task(struct task_struct *tsk)
M
Michal Hocko 已提交
528 529 530 531
{
	int attempts = 0;

	/* Retry the down_read_trylock(mmap_sem) a few times */
532
	while (attempts++ < MAX_OOM_REAP_RETRIES && !__oom_reap_task(tsk))
M
Michal Hocko 已提交
533 534
		schedule_timeout_idle(HZ/10);

535 536 537 538 539 540
	if (attempts > MAX_OOM_REAP_RETRIES) {
		pr_info("oom_reaper: unable to reap pid:%d (%s)\n",
				task_pid_nr(tsk), tsk->comm);
		debug_show_all_locks();
	}

M
Michal Hocko 已提交
541
	/* Drop a reference taken by wake_oom_reaper */
542
	put_task_struct(tsk);
M
Michal Hocko 已提交
543 544 545 546
}

static int oom_reaper(void *unused)
{
M
Michal Hocko 已提交
547 548
	set_freezable();

M
Michal Hocko 已提交
549
	while (true) {
550
		struct task_struct *tsk = NULL;
M
Michal Hocko 已提交
551

552
		wait_event_freezable(oom_reaper_wait, oom_reaper_list != NULL);
553
		spin_lock(&oom_reaper_lock);
554 555 556
		if (oom_reaper_list != NULL) {
			tsk = oom_reaper_list;
			oom_reaper_list = tsk->oom_reaper_list;
557 558 559 560 561
		}
		spin_unlock(&oom_reaper_lock);

		if (tsk)
			oom_reap_task(tsk);
M
Michal Hocko 已提交
562 563 564 565 566
	}

	return 0;
}

567
static void wake_oom_reaper(struct task_struct *tsk)
M
Michal Hocko 已提交
568
{
569 570 571 572 573
	if (!oom_reaper_th)
		return;

	/* tsk is already queued? */
	if (tsk == oom_reaper_list || tsk->oom_reaper_list)
M
Michal Hocko 已提交
574 575
		return;

576
	get_task_struct(tsk);
M
Michal Hocko 已提交
577

578
	spin_lock(&oom_reaper_lock);
579 580
	tsk->oom_reaper_list = oom_reaper_list;
	oom_reaper_list = tsk;
581 582
	spin_unlock(&oom_reaper_lock);
	wake_up(&oom_reaper_wait);
M
Michal Hocko 已提交
583 584
}

585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631
/* Check if we can reap the given task. This has to be called with stable
 * tsk->mm
 */
void try_oom_reaper(struct task_struct *tsk)
{
	struct mm_struct *mm = tsk->mm;
	struct task_struct *p;

	if (!mm)
		return;

	/*
	 * There might be other threads/processes which are either not
	 * dying or even not killable.
	 */
	if (atomic_read(&mm->mm_users) > 1) {
		rcu_read_lock();
		for_each_process(p) {
			bool exiting;

			if (!process_shares_mm(p, mm))
				continue;
			if (same_thread_group(p, tsk))
				continue;
			if (fatal_signal_pending(p))
				continue;

			/*
			 * If the task is exiting make sure the whole thread group
			 * is exiting and cannot acces mm anymore.
			 */
			spin_lock_irq(&p->sighand->siglock);
			exiting = signal_group_exit(p->signal);
			spin_unlock_irq(&p->sighand->siglock);
			if (exiting)
				continue;

			/* Give up */
			rcu_read_unlock();
			return;
		}
		rcu_read_unlock();
	}

	wake_oom_reaper(tsk);
}

M
Michal Hocko 已提交
632 633 634 635 636 637 638 639 640 641 642 643
static int __init oom_init(void)
{
	oom_reaper_th = kthread_run(oom_reaper, NULL, "oom_reaper");
	if (IS_ERR(oom_reaper_th)) {
		pr_err("Unable to start OOM reaper %ld. Continuing regardless\n",
				PTR_ERR(oom_reaper_th));
		oom_reaper_th = NULL;
	}
	return 0;
}
subsys_initcall(oom_init)
#else
644
static void wake_oom_reaper(struct task_struct *tsk)
M
Michal Hocko 已提交
645 646 647 648
{
}
#endif

649
/**
650
 * mark_oom_victim - mark the given task as OOM victim
651
 * @tsk: task to mark
652
 *
653
 * Has to be called with oom_lock held and never after
654
 * oom has been disabled already.
655
 */
656
void mark_oom_victim(struct task_struct *tsk)
657
{
658 659 660 661
	WARN_ON(oom_killer_disabled);
	/* OOM killer might race with memcg OOM */
	if (test_and_set_tsk_thread_flag(tsk, TIF_MEMDIE))
		return;
662 663 664 665 666 667 668
	/*
	 * Make sure that the task is woken up from uninterruptible sleep
	 * if it is frozen because OOM killer wouldn't be able to free
	 * any memory and livelock. freezing_slow_path will tell the freezer
	 * that TIF_MEMDIE tasks should be ignored.
	 */
	__thaw_task(tsk);
669
	atomic_inc(&oom_victims);
670 671 672
}

/**
673
 * exit_oom_victim - note the exit of an OOM victim
674
 */
675
void exit_oom_victim(struct task_struct *tsk)
676
{
677 678
	if (!test_and_clear_tsk_thread_flag(tsk, TIF_MEMDIE))
		return;
679

680
	if (!atomic_dec_return(&oom_victims))
681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699
		wake_up_all(&oom_victims_wait);
}

/**
 * oom_killer_disable - disable OOM killer
 *
 * Forces all page allocations to fail rather than trigger OOM killer.
 * Will block and wait until all OOM victims are killed.
 *
 * The function cannot be called when there are runnable user tasks because
 * the userspace would see unexpected allocation failures as a result. Any
 * new usage of this function should be consulted with MM people.
 *
 * Returns true if successful and false if the OOM killer cannot be
 * disabled.
 */
bool oom_killer_disable(void)
{
	/*
700 701
	 * Make sure to not race with an ongoing OOM killer. Check that the
	 * current is not killed (possibly due to sharing the victim's memory).
702
	 */
703
	if (mutex_lock_killable(&oom_lock))
704 705
		return false;
	oom_killer_disabled = true;
706
	mutex_unlock(&oom_lock);
707 708 709 710 711 712 713 714 715 716 717 718

	wait_event(oom_victims_wait, !atomic_read(&oom_victims));

	return true;
}

/**
 * oom_killer_enable - enable OOM killer
 */
void oom_killer_enable(void)
{
	oom_killer_disabled = false;
719 720
}

721 722 723 724
/*
 * Must be called while holding a reference to p, which will be released upon
 * returning.
 */
725
void oom_kill_process(struct oom_control *oc, struct task_struct *p,
726
		      unsigned int points, unsigned long totalpages,
727
		      struct mem_cgroup *memcg, const char *message)
L
Linus Torvalds 已提交
728
{
729
	struct task_struct *victim = p;
730
	struct task_struct *child;
731
	struct task_struct *t;
732
	struct mm_struct *mm;
733
	unsigned int victim_points = 0;
734 735
	static DEFINE_RATELIMIT_STATE(oom_rs, DEFAULT_RATELIMIT_INTERVAL,
					      DEFAULT_RATELIMIT_BURST);
736
	bool can_oom_reap = true;
L
Linus Torvalds 已提交
737

738 739 740 741
	/*
	 * If the task is already exiting, don't alarm the sysadmin or kill
	 * its children or threads, just set TIF_MEMDIE so it can die quickly
	 */
742 743
	task_lock(p);
	if (p->mm && task_will_free_mem(p)) {
744
		mark_oom_victim(p);
745
		try_oom_reaper(p);
746
		task_unlock(p);
747
		put_task_struct(p);
748
		return;
749
	}
750
	task_unlock(p);
751

752
	if (__ratelimit(&oom_rs))
753
		dump_header(oc, p, memcg);
754

755
	pr_err("%s: Kill process %d (%s) score %u or sacrifice child\n",
756
		message, task_pid_nr(p), p->comm, points);
N
Nick Piggin 已提交
757

758 759
	/*
	 * If any of p's children has a different mm and is eligible for kill,
760
	 * the one with the highest oom_badness() score is sacrificed for its
761 762 763
	 * parent.  This attempts to lose the minimal amount of work done while
	 * still freeing memory.
	 */
764
	read_lock(&tasklist_lock);
765
	for_each_thread(p, t) {
766
		list_for_each_entry(child, &t->children, sibling) {
D
David Rientjes 已提交
767
			unsigned int child_points;
768

769
			if (process_shares_mm(child, p->mm))
770
				continue;
D
David Rientjes 已提交
771 772 773
			/*
			 * oom_badness() returns 0 if the thread is unkillable
			 */
774
			child_points = oom_badness(child, memcg, oc->nodemask,
D
David Rientjes 已提交
775
								totalpages);
776
			if (child_points > victim_points) {
777
				put_task_struct(victim);
778 779
				victim = child;
				victim_points = child_points;
780
				get_task_struct(victim);
781
			}
782
		}
783
	}
784
	read_unlock(&tasklist_lock);
785

786 787 788
	p = find_lock_task_mm(victim);
	if (!p) {
		put_task_struct(victim);
789
		return;
790 791 792 793 794
	} else if (victim != p) {
		get_task_struct(p);
		put_task_struct(victim);
		victim = p;
	}
795

796
	/* Get a reference to safely compare mm after task_unlock(victim) */
797
	mm = victim->mm;
798
	atomic_inc(&mm->mm_count);
799 800 801 802 803 804
	/*
	 * We should send SIGKILL before setting TIF_MEMDIE in order to prevent
	 * the OOM victim from depleting the memory reserves from the user
	 * space under its control.
	 */
	do_send_sig_info(SIGKILL, SEND_SIG_FORCED, victim, true);
805
	mark_oom_victim(victim);
806
	pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
807 808
		task_pid_nr(victim), victim->comm, K(victim->mm->total_vm),
		K(get_mm_counter(victim->mm, MM_ANONPAGES)),
809 810
		K(get_mm_counter(victim->mm, MM_FILEPAGES)),
		K(get_mm_counter(victim->mm, MM_SHMEMPAGES)));
811 812 813 814 815 816 817 818 819 820 821
	task_unlock(victim);

	/*
	 * Kill all user processes sharing victim->mm in other thread groups, if
	 * any.  They don't get access to memory reserves, though, to avoid
	 * depletion of all memory.  This prevents mm->mmap_sem livelock when an
	 * oom killed thread cannot exit because it requires the semaphore and
	 * its contended by another thread trying to allocate memory itself.
	 * That thread will now get access to memory reserves since it has a
	 * pending fatal signal.
	 */
822
	rcu_read_lock();
823
	for_each_process(p) {
824
		if (!process_shares_mm(p, mm))
825 826 827
			continue;
		if (same_thread_group(p, victim))
			continue;
M
Michal Hocko 已提交
828 829 830 831 832 833 834 835
		if (unlikely(p->flags & PF_KTHREAD) || is_global_init(p) ||
		    p->signal->oom_score_adj == OOM_SCORE_ADJ_MIN) {
			/*
			 * We cannot use oom_reaper for the mm shared by this
			 * process because it wouldn't get killed and so the
			 * memory might be still used.
			 */
			can_oom_reap = false;
836
			continue;
M
Michal Hocko 已提交
837
		}
838 839
		do_send_sig_info(SIGKILL, SEND_SIG_FORCED, p, true);
	}
840
	rcu_read_unlock();
841

M
Michal Hocko 已提交
842
	if (can_oom_reap)
843
		wake_oom_reaper(victim);
M
Michal Hocko 已提交
844

845
	mmdrop(mm);
846
	put_task_struct(victim);
L
Linus Torvalds 已提交
847
}
848
#undef K
L
Linus Torvalds 已提交
849

850 851 852
/*
 * Determines whether the kernel must panic because of the panic_on_oom sysctl.
 */
853
void check_panic_on_oom(struct oom_control *oc, enum oom_constraint constraint,
854
			struct mem_cgroup *memcg)
855 856 857 858 859 860 861 862 863 864 865 866
{
	if (likely(!sysctl_panic_on_oom))
		return;
	if (sysctl_panic_on_oom != 2) {
		/*
		 * panic_on_oom == 1 only affects CONSTRAINT_NONE, the kernel
		 * does not panic for cpuset, mempolicy, or memcg allocation
		 * failures.
		 */
		if (constraint != CONSTRAINT_NONE)
			return;
	}
867
	/* Do not panic for oom kills triggered by sysrq */
868
	if (is_sysrq_oom(oc))
869
		return;
870
	dump_header(oc, NULL, memcg);
871 872 873 874
	panic("Out of memory: %s panic_on_oom is enabled\n",
		sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide");
}

875 876 877 878 879 880 881 882 883 884 885 886 887 888
static BLOCKING_NOTIFIER_HEAD(oom_notify_list);

int register_oom_notifier(struct notifier_block *nb)
{
	return blocking_notifier_chain_register(&oom_notify_list, nb);
}
EXPORT_SYMBOL_GPL(register_oom_notifier);

int unregister_oom_notifier(struct notifier_block *nb)
{
	return blocking_notifier_chain_unregister(&oom_notify_list, nb);
}
EXPORT_SYMBOL_GPL(unregister_oom_notifier);

L
Linus Torvalds 已提交
889
/**
890 891
 * out_of_memory - kill the "best" process when we run out of memory
 * @oc: pointer to struct oom_control
L
Linus Torvalds 已提交
892 893 894 895 896 897
 *
 * If we run out of memory, we have the choice between either
 * killing a random task (bad), letting the system crash (worse)
 * OR try to be smart about which process to kill. Note that we
 * don't have to be perfect here, we just have to be good.
 */
898
bool out_of_memory(struct oom_control *oc)
L
Linus Torvalds 已提交
899
{
900
	struct task_struct *p;
D
David Rientjes 已提交
901
	unsigned long totalpages;
902
	unsigned long freed = 0;
903
	unsigned int uninitialized_var(points);
904
	enum oom_constraint constraint = CONSTRAINT_NONE;
905

906 907 908
	if (oom_killer_disabled)
		return false;

909 910 911
	blocking_notifier_call_chain(&oom_notify_list, 0, &freed);
	if (freed > 0)
		/* Got some memory back in the last second. */
912
		return true;
L
Linus Torvalds 已提交
913

914
	/*
915 916 917
	 * If current has a pending SIGKILL or is exiting, then automatically
	 * select it.  The goal is to allow it to allocate so that it may
	 * quickly exit and free its memory.
918 919 920
	 *
	 * But don't select if current has already released its mm and cleared
	 * TIF_MEMDIE flag at exit_mm(), otherwise an OOM livelock may occur.
921
	 */
922 923
	if (current->mm &&
	    (fatal_signal_pending(current) || task_will_free_mem(current))) {
924
		mark_oom_victim(current);
925
		try_oom_reaper(current);
926
		return true;
927 928
	}

929 930 931 932 933 934 935 936 937
	/*
	 * The OOM killer does not compensate for IO-less reclaim.
	 * pagefault_out_of_memory lost its gfp context so we have to
	 * make sure exclude 0 mask - all other users should have at least
	 * ___GFP_DIRECT_RECLAIM to get here.
	 */
	if (oc->gfp_mask && !(oc->gfp_mask & (__GFP_FS|__GFP_NOFAIL)))
		return true;

938 939 940 941
	/*
	 * Check if there were limitations on the allocation (only relevant for
	 * NUMA) that may require different handling.
	 */
942 943 944 945
	constraint = constrained_alloc(oc, &totalpages);
	if (constraint != CONSTRAINT_MEMORY_POLICY)
		oc->nodemask = NULL;
	check_panic_on_oom(oc, constraint, NULL);
946

947
	if (sysctl_oom_kill_allocating_task && current->mm &&
948
	    !oom_unkillable_task(current, NULL, oc->nodemask) &&
949
	    current->signal->oom_score_adj != OOM_SCORE_ADJ_MIN) {
950
		get_task_struct(current);
951
		oom_kill_process(oc, current, 0, totalpages, NULL,
952
				 "Out of memory (oom_kill_allocating_task)");
953
		return true;
954 955
	}

956
	p = select_bad_process(oc, &points, totalpages);
957
	/* Found nothing?!?! Either we hang forever, or we panic. */
958
	if (!p && !is_sysrq_oom(oc)) {
959
		dump_header(oc, NULL, NULL);
960 961
		panic("Out of memory and no killable processes...\n");
	}
962
	if (p && p != (void *)-1UL) {
963 964
		oom_kill_process(oc, p, points, totalpages, NULL,
				 "Out of memory");
965 966 967 968
		/*
		 * Give the killed process a good chance to exit before trying
		 * to allocate memory again.
		 */
969
		schedule_timeout_killable(1);
970
	}
971
	return true;
972 973
}

974 975
/*
 * The pagefault handler calls here because it is out of memory, so kill a
976 977
 * memory-hogging task.  If any populated zone has ZONE_OOM_LOCKED set, a
 * parallel oom killing is already in progress so do nothing.
978 979 980
 */
void pagefault_out_of_memory(void)
{
981 982 983 984 985 986 987
	struct oom_control oc = {
		.zonelist = NULL,
		.nodemask = NULL,
		.gfp_mask = 0,
		.order = 0,
	};

988
	if (mem_cgroup_oom_synchronize(true))
989
		return;
990

991 992
	if (!mutex_trylock(&oom_lock))
		return;
993

994
	if (!out_of_memory(&oc)) {
995 996 997 998 999 1000 1001
		/*
		 * There shouldn't be any user tasks runnable while the
		 * OOM killer is disabled, so the current task has to
		 * be a racing OOM victim for which oom_killer_disable()
		 * is waiting for.
		 */
		WARN_ON(test_thread_flag(TIF_MEMDIE));
1002
	}
1003 1004

	mutex_unlock(&oom_lock);
1005
}