oom_kill.c 29.3 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6
/*
 *  linux/mm/oom_kill.c
 * 
 *  Copyright (C)  1998,2000  Rik van Riel
 *	Thanks go out to Claus Fischer for some serious inspiration and
 *	for goading me into coding this file...
D
David Rientjes 已提交
7 8
 *  Copyright (C)  2010  Google, Inc.
 *	Rewritten by David Rientjes
L
Linus Torvalds 已提交
9 10
 *
 *  The routines in this file are used to kill a process when
P
Paul Jackson 已提交
11 12
 *  we're seriously out of memory. This gets called from __alloc_pages()
 *  in mm/page_alloc.c when we really run out of memory.
L
Linus Torvalds 已提交
13 14 15 16 17 18 19
 *
 *  Since we won't call these routines often (on a well-configured
 *  machine) this file will double as a 'coding guide' and a signpost
 *  for newbie kernel hackers. It features several pointers to major
 *  kernel subsystems and hints as to where to find out what things do.
 */

20
#include <linux/oom.h>
L
Linus Torvalds 已提交
21
#include <linux/mm.h>
A
Alexey Dobriyan 已提交
22
#include <linux/err.h>
23
#include <linux/gfp.h>
L
Linus Torvalds 已提交
24 25 26 27
#include <linux/sched.h>
#include <linux/swap.h>
#include <linux/timex.h>
#include <linux/jiffies.h>
28
#include <linux/cpuset.h>
29
#include <linux/export.h>
30
#include <linux/notifier.h>
31
#include <linux/memcontrol.h>
32
#include <linux/mempolicy.h>
33
#include <linux/security.h>
34
#include <linux/ptrace.h>
35
#include <linux/freezer.h>
36
#include <linux/ftrace.h>
37
#include <linux/ratelimit.h>
M
Michal Hocko 已提交
38 39 40 41 42
#include <linux/kthread.h>
#include <linux/init.h>

#include <asm/tlb.h>
#include "internal.h"
43 44 45

#define CREATE_TRACE_POINTS
#include <trace/events/oom.h>
L
Linus Torvalds 已提交
46

47
int sysctl_panic_on_oom;
48
int sysctl_oom_kill_allocating_task;
49
int sysctl_oom_dump_tasks = 1;
50 51

DEFINE_MUTEX(oom_lock);
L
Linus Torvalds 已提交
52

53 54 55
#ifdef CONFIG_NUMA
/**
 * has_intersects_mems_allowed() - check task eligiblity for kill
56
 * @start: task struct of which task to consider
57 58 59 60 61
 * @mask: nodemask passed to page allocator for mempolicy ooms
 *
 * Task eligibility is determined by whether or not a candidate task, @tsk,
 * shares the same mempolicy nodes as current if it is bound by such a policy
 * and whether or not it has the same set of allowed cpuset nodes.
62
 */
63
static bool has_intersects_mems_allowed(struct task_struct *start,
64
					const nodemask_t *mask)
65
{
66 67
	struct task_struct *tsk;
	bool ret = false;
68

69
	rcu_read_lock();
70
	for_each_thread(start, tsk) {
71 72 73 74 75 76 77
		if (mask) {
			/*
			 * If this is a mempolicy constrained oom, tsk's
			 * cpuset is irrelevant.  Only return true if its
			 * mempolicy intersects current, otherwise it may be
			 * needlessly killed.
			 */
78
			ret = mempolicy_nodemask_intersects(tsk, mask);
79 80 81 82 83
		} else {
			/*
			 * This is not a mempolicy constrained oom, so only
			 * check the mems of tsk's cpuset.
			 */
84
			ret = cpuset_mems_allowed_intersects(current, tsk);
85
		}
86 87
		if (ret)
			break;
88
	}
89
	rcu_read_unlock();
90

91
	return ret;
92 93 94 95 96 97
}
#else
static bool has_intersects_mems_allowed(struct task_struct *tsk,
					const nodemask_t *mask)
{
	return true;
98
}
99
#endif /* CONFIG_NUMA */
100

101 102 103 104 105 106
/*
 * The process p may have detached its own ->mm while exiting or through
 * use_mm(), but one or more of its subthreads may still have a valid
 * pointer.  Return p, or any of its subthreads with a valid ->mm, with
 * task_lock() held.
 */
107
struct task_struct *find_lock_task_mm(struct task_struct *p)
108
{
109
	struct task_struct *t;
110

111 112
	rcu_read_lock();

113
	for_each_thread(p, t) {
114 115
		task_lock(t);
		if (likely(t->mm))
116
			goto found;
117
		task_unlock(t);
118
	}
119 120 121
	t = NULL;
found:
	rcu_read_unlock();
122

123
	return t;
124 125
}

126 127 128 129 130 131 132 133 134
/*
 * order == -1 means the oom kill is required by sysrq, otherwise only
 * for display purposes.
 */
static inline bool is_sysrq_oom(struct oom_control *oc)
{
	return oc->order == -1;
}

135 136 137 138 139
static inline bool is_memcg_oom(struct oom_control *oc)
{
	return oc->memcg != NULL;
}

140
/* return true if the task is not adequate as candidate victim task. */
141
static bool oom_unkillable_task(struct task_struct *p,
142
		struct mem_cgroup *memcg, const nodemask_t *nodemask)
143 144 145 146 147 148 149
{
	if (is_global_init(p))
		return true;
	if (p->flags & PF_KTHREAD)
		return true;

	/* When mem_cgroup_out_of_memory() and p is not member of the group */
150
	if (memcg && !task_in_mem_cgroup(p, memcg))
151 152 153 154 155 156 157 158 159
		return true;

	/* p may not have freeable memory in nodemask */
	if (!has_intersects_mems_allowed(p, nodemask))
		return true;

	return false;
}

L
Linus Torvalds 已提交
160
/**
D
David Rientjes 已提交
161
 * oom_badness - heuristic function to determine which candidate task to kill
L
Linus Torvalds 已提交
162
 * @p: task struct of which task we should calculate
D
David Rientjes 已提交
163
 * @totalpages: total present RAM allowed for page allocation
L
Linus Torvalds 已提交
164
 *
D
David Rientjes 已提交
165 166 167
 * The heuristic for determining which task to kill is made to be as simple and
 * predictable as possible.  The goal is to return the highest value for the
 * task consuming the most memory to avoid subsequent oom failures.
L
Linus Torvalds 已提交
168
 */
169 170
unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
			  const nodemask_t *nodemask, unsigned long totalpages)
L
Linus Torvalds 已提交
171
{
172
	long points;
173
	long adj;
174

175
	if (oom_unkillable_task(p, memcg, nodemask))
176
		return 0;
L
Linus Torvalds 已提交
177

178 179
	p = find_lock_task_mm(p);
	if (!p)
L
Linus Torvalds 已提交
180 181
		return 0;

182 183
	/*
	 * Do not even consider tasks which are explicitly marked oom
184 185
	 * unkillable or have been already oom reaped or the are in
	 * the middle of vfork
186
	 */
187
	adj = (long)p->signal->oom_score_adj;
188
	if (adj == OOM_SCORE_ADJ_MIN ||
189 190
			test_bit(MMF_OOM_REAPED, &p->mm->flags) ||
			in_vfork(p)) {
191 192 193 194
		task_unlock(p);
		return 0;
	}

L
Linus Torvalds 已提交
195
	/*
D
David Rientjes 已提交
196
	 * The baseline for the badness score is the proportion of RAM that each
197
	 * task's rss, pagetable and swap space use.
L
Linus Torvalds 已提交
198
	 */
199 200
	points = get_mm_rss(p->mm) + get_mm_counter(p->mm, MM_SWAPENTS) +
		atomic_long_read(&p->mm->nr_ptes) + mm_nr_pmds(p->mm);
D
David Rientjes 已提交
201
	task_unlock(p);
L
Linus Torvalds 已提交
202 203

	/*
D
David Rientjes 已提交
204 205
	 * Root processes get 3% bonus, just like the __vm_enough_memory()
	 * implementation used by LSMs.
L
Linus Torvalds 已提交
206
	 */
D
David Rientjes 已提交
207
	if (has_capability_noaudit(p, CAP_SYS_ADMIN))
208
		points -= (points * 3) / 100;
L
Linus Torvalds 已提交
209

210 211 212
	/* Normalize to oom_score_adj units */
	adj *= totalpages / 1000;
	points += adj;
L
Linus Torvalds 已提交
213

214
	/*
215 216
	 * Never return 0 for an eligible task regardless of the root bonus and
	 * oom_score_adj (oom_score_adj can't be OOM_SCORE_ADJ_MIN here).
217
	 */
218
	return points > 0 ? points : 1;
L
Linus Torvalds 已提交
219 220
}

221 222 223 224 225 226 227
enum oom_constraint {
	CONSTRAINT_NONE,
	CONSTRAINT_CPUSET,
	CONSTRAINT_MEMORY_POLICY,
	CONSTRAINT_MEMCG,
};

228 229 230
/*
 * Determine the type of allocation constraint.
 */
231
static enum oom_constraint constrained_alloc(struct oom_control *oc)
232
{
233
	struct zone *zone;
234
	struct zoneref *z;
235
	enum zone_type high_zoneidx = gfp_zone(oc->gfp_mask);
D
David Rientjes 已提交
236 237
	bool cpuset_limited = false;
	int nid;
238

239 240 241 242 243
	if (is_memcg_oom(oc)) {
		oc->totalpages = mem_cgroup_get_limit(oc->memcg) ?: 1;
		return CONSTRAINT_MEMCG;
	}

D
David Rientjes 已提交
244
	/* Default to all available memory */
245 246 247 248
	oc->totalpages = totalram_pages + total_swap_pages;

	if (!IS_ENABLED(CONFIG_NUMA))
		return CONSTRAINT_NONE;
D
David Rientjes 已提交
249

250
	if (!oc->zonelist)
D
David Rientjes 已提交
251
		return CONSTRAINT_NONE;
252 253 254 255 256
	/*
	 * Reach here only when __GFP_NOFAIL is used. So, we should avoid
	 * to kill current.We have to random task kill in this case.
	 * Hopefully, CONSTRAINT_THISNODE...but no way to handle it, now.
	 */
257
	if (oc->gfp_mask & __GFP_THISNODE)
258
		return CONSTRAINT_NONE;
259

260
	/*
D
David Rientjes 已提交
261 262 263
	 * This is not a __GFP_THISNODE allocation, so a truncated nodemask in
	 * the page allocator means a mempolicy is in effect.  Cpuset policy
	 * is enforced in get_page_from_freelist().
264
	 */
265 266
	if (oc->nodemask &&
	    !nodes_subset(node_states[N_MEMORY], *oc->nodemask)) {
267
		oc->totalpages = total_swap_pages;
268
		for_each_node_mask(nid, *oc->nodemask)
269
			oc->totalpages += node_spanned_pages(nid);
270
		return CONSTRAINT_MEMORY_POLICY;
D
David Rientjes 已提交
271
	}
272 273

	/* Check this allocation failure is caused by cpuset's wall function */
274 275 276
	for_each_zone_zonelist_nodemask(zone, z, oc->zonelist,
			high_zoneidx, oc->nodemask)
		if (!cpuset_zone_allowed(zone, oc->gfp_mask))
D
David Rientjes 已提交
277
			cpuset_limited = true;
278

D
David Rientjes 已提交
279
	if (cpuset_limited) {
280
		oc->totalpages = total_swap_pages;
D
David Rientjes 已提交
281
		for_each_node_mask(nid, cpuset_current_mems_allowed)
282
			oc->totalpages += node_spanned_pages(nid);
D
David Rientjes 已提交
283 284
		return CONSTRAINT_CPUSET;
	}
285 286 287
	return CONSTRAINT_NONE;
}

288
static int oom_evaluate_task(struct task_struct *task, void *arg)
289
{
290 291 292
	struct oom_control *oc = arg;
	unsigned long points;

293
	if (oom_unkillable_task(task, NULL, oc->nodemask))
294
		goto next;
295 296 297

	/*
	 * This task already has access to memory reserves and is being killed.
298 299 300
	 * Don't allow any other task to have access to the reserves unless
	 * the task has MMF_OOM_REAPED because chances that it would release
	 * any memory is quite low.
301
	 */
302 303
	if (!is_sysrq_oom(oc) && atomic_read(&task->signal->oom_victims)) {
		struct task_struct *p = find_lock_task_mm(task);
304
		bool reaped = false;
305 306

		if (p) {
307
			reaped = test_bit(MMF_OOM_REAPED, &p->mm->flags);
308 309
			task_unlock(p);
		}
310 311 312
		if (reaped)
			goto next;
		goto abort;
313
	}
314

315 316 317 318
	/*
	 * If task is allocating a lot of memory and has been marked to be
	 * killed first if it triggers an oom, then select it.
	 */
319 320 321 322
	if (oom_task_origin(task)) {
		points = ULONG_MAX;
		goto select;
	}
323

324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343
	points = oom_badness(task, NULL, oc->nodemask, oc->totalpages);
	if (!points || points < oc->chosen_points)
		goto next;

	/* Prefer thread group leaders for display purposes */
	if (points == oc->chosen_points && thread_group_leader(oc->chosen))
		goto next;
select:
	if (oc->chosen)
		put_task_struct(oc->chosen);
	get_task_struct(task);
	oc->chosen = task;
	oc->chosen_points = points;
next:
	return 0;
abort:
	if (oc->chosen)
		put_task_struct(oc->chosen);
	oc->chosen = (void *)-1UL;
	return 1;
344 345
}

L
Linus Torvalds 已提交
346
/*
347 348
 * Simple selection loop. We choose the process with the highest number of
 * 'points'. In case scan was aborted, oc->chosen is set to -1.
L
Linus Torvalds 已提交
349
 */
350
static void select_bad_process(struct oom_control *oc)
L
Linus Torvalds 已提交
351
{
352 353 354 355
	if (is_memcg_oom(oc))
		mem_cgroup_scan_tasks(oc->memcg, oom_evaluate_task, oc);
	else {
		struct task_struct *p;
356

357 358 359 360 361
		rcu_read_lock();
		for_each_process(p)
			if (oom_evaluate_task(p, oc))
				break;
		rcu_read_unlock();
362
	}
363

364
	oc->chosen_points = oc->chosen_points * 1000 / oc->totalpages;
L
Linus Torvalds 已提交
365 366
}

367
/**
R
Randy Dunlap 已提交
368
 * dump_tasks - dump current memory state of all system tasks
W
Wanpeng Li 已提交
369
 * @memcg: current's memory controller, if constrained
370
 * @nodemask: nodemask passed to page allocator for mempolicy ooms
R
Randy Dunlap 已提交
371
 *
372 373 374
 * Dumps the current memory state of all eligible tasks.  Tasks not in the same
 * memcg, not in the same cpuset, or bound to a disjoint set of mempolicy nodes
 * are not shown.
375 376
 * State information includes task's pid, uid, tgid, vm size, rss, nr_ptes,
 * swapents, oom_score_adj value, and name.
377
 */
378
static void dump_tasks(struct mem_cgroup *memcg, const nodemask_t *nodemask)
379
{
380 381
	struct task_struct *p;
	struct task_struct *task;
382

383
	pr_info("[ pid ]   uid  tgid total_vm      rss nr_ptes nr_pmds swapents oom_score_adj name\n");
384
	rcu_read_lock();
385
	for_each_process(p) {
386
		if (oom_unkillable_task(p, memcg, nodemask))
387
			continue;
388

389 390
		task = find_lock_task_mm(p);
		if (!task) {
391
			/*
392 393
			 * This is a kthread or all of p's threads have already
			 * detached their mm's.  There's no need to report
394
			 * them; they can't be oom killed anyway.
395 396 397
			 */
			continue;
		}
398

399
		pr_info("[%5d] %5d %5d %8lu %8lu %7ld %7ld %8lu         %5hd %s\n",
400 401
			task->pid, from_kuid(&init_user_ns, task_uid(task)),
			task->tgid, task->mm->total_vm, get_mm_rss(task->mm),
402
			atomic_long_read(&task->mm->nr_ptes),
403
			mm_nr_pmds(task->mm),
404
			get_mm_counter(task->mm, MM_SWAPENTS),
D
David Rientjes 已提交
405
			task->signal->oom_score_adj, task->comm);
406 407
		task_unlock(task);
	}
408
	rcu_read_unlock();
409 410
}

411
static void dump_header(struct oom_control *oc, struct task_struct *p)
412
{
J
Joe Perches 已提交
413
	pr_warn("%s invoked oom-killer: gfp_mask=%#x(%pGg), order=%d, oom_score_adj=%hd\n",
414
		current->comm, oc->gfp_mask, &oc->gfp_mask, oc->order,
D
David Rientjes 已提交
415
		current->signal->oom_score_adj);
416

417
	cpuset_print_current_mems_allowed();
418
	dump_stack();
419 420
	if (oc->memcg)
		mem_cgroup_print_oom_info(oc->memcg, p);
421 422
	else
		show_mem(SHOW_MEM_FILTER_NODES);
423
	if (sysctl_oom_dump_tasks)
424
		dump_tasks(oc->memcg, oc->nodemask);
425 426
}

427
/*
428
 * Number of OOM victims in flight
429
 */
430 431
static atomic_t oom_victims = ATOMIC_INIT(0);
static DECLARE_WAIT_QUEUE_HEAD(oom_victims_wait);
432

433
static bool oom_killer_disabled __read_mostly;
434

435 436
#define K(x) ((x) << (PAGE_SHIFT-10))

437 438 439 440 441 442
/*
 * task->mm can be NULL if the task is the exited group leader.  So to
 * determine whether the task is using a particular mm, we examine all the
 * task's threads: if one of those is using this mm then this task was also
 * using it.
 */
443
bool process_shares_mm(struct task_struct *p, struct mm_struct *mm)
444 445 446 447 448 449 450 451 452 453 454 455
{
	struct task_struct *t;

	for_each_thread(p, t) {
		struct mm_struct *t_mm = READ_ONCE(t->mm);
		if (t_mm)
			return t_mm == mm;
	}
	return false;
}


M
Michal Hocko 已提交
456 457 458 459 460 461 462
#ifdef CONFIG_MMU
/*
 * OOM Reaper kernel thread which tries to reap the memory used by the OOM
 * victim (if that is possible) to help the OOM killer to move on.
 */
static struct task_struct *oom_reaper_th;
static DECLARE_WAIT_QUEUE_HEAD(oom_reaper_wait);
463
static struct task_struct *oom_reaper_list;
464 465
static DEFINE_SPINLOCK(oom_reaper_lock);

466
static bool __oom_reap_task(struct task_struct *tsk)
M
Michal Hocko 已提交
467 468 469
{
	struct mmu_gather tlb;
	struct vm_area_struct *vma;
470
	struct mm_struct *mm = NULL;
471
	struct task_struct *p;
M
Michal Hocko 已提交
472 473 474 475
	struct zap_details details = {.check_swap_entries = true,
				      .ignore_dirty = true};
	bool ret = true;

476 477 478 479
	/*
	 * We have to make sure to not race with the victim exit path
	 * and cause premature new oom victim selection:
	 * __oom_reap_task		exit_mm
480
	 *   mmget_not_zero
481 482 483 484 485 486 487 488 489 490 491
	 *				  mmput
	 *				    atomic_dec_and_test
	 *				  exit_oom_victim
	 *				[...]
	 *				out_of_memory
	 *				  select_bad_process
	 *				    # no TIF_MEMDIE task selects new victim
	 *  unmap_page_range # frees some memory
	 */
	mutex_lock(&oom_lock);

492 493 494 495 496 497 498 499
	/*
	 * Make sure we find the associated mm_struct even when the particular
	 * thread has already terminated and cleared its mm.
	 * We might have race with exit path so consider our work done if there
	 * is no mm.
	 */
	p = find_lock_task_mm(tsk);
	if (!p)
500
		goto unlock_oom;
501
	mm = p->mm;
502
	atomic_inc(&mm->mm_count);
503
	task_unlock(p);
M
Michal Hocko 已提交
504 505 506

	if (!down_read_trylock(&mm->mmap_sem)) {
		ret = false;
507 508 509 510 511 512 513 514 515 516 517
		goto mm_drop;
	}

	/*
	 * increase mm_users only after we know we will reap something so
	 * that the mmput_async is called only when we have reaped something
	 * and delayed __mmput doesn't matter that much
	 */
	if (!mmget_not_zero(mm)) {
		up_read(&mm->mmap_sem);
		goto mm_drop;
M
Michal Hocko 已提交
518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546
	}

	tlb_gather_mmu(&tlb, mm, 0, -1);
	for (vma = mm->mmap ; vma; vma = vma->vm_next) {
		if (is_vm_hugetlb_page(vma))
			continue;

		/*
		 * mlocked VMAs require explicit munlocking before unmap.
		 * Let's keep it simple here and skip such VMAs.
		 */
		if (vma->vm_flags & VM_LOCKED)
			continue;

		/*
		 * Only anonymous pages have a good chance to be dropped
		 * without additional steps which we cannot afford as we
		 * are OOM already.
		 *
		 * We do not even care about fs backed pages because all
		 * which are reclaimable have already been reclaimed and
		 * we do not want to block exit_mmap by keeping mm ref
		 * count elevated without a good reason.
		 */
		if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED))
			unmap_page_range(&tlb, vma, vma->vm_start, vma->vm_end,
					 &details);
	}
	tlb_finish_mmu(&tlb, 0, -1);
547 548 549 550 551
	pr_info("oom_reaper: reaped process %d (%s), now anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
			task_pid_nr(tsk), tsk->comm,
			K(get_mm_counter(mm, MM_ANONPAGES)),
			K(get_mm_counter(mm, MM_FILEPAGES)),
			K(get_mm_counter(mm, MM_SHMEMPAGES)));
M
Michal Hocko 已提交
552
	up_read(&mm->mmap_sem);
553 554

	/*
555 556
	 * This task can be safely ignored because we cannot do much more
	 * to release its memory.
557
	 */
558
	set_bit(MMF_OOM_REAPED, &mm->flags);
559 560 561 562 563
	/*
	 * Drop our reference but make sure the mmput slow path is called from a
	 * different context because we shouldn't risk we get stuck there and
	 * put the oom_reaper out of the way.
	 */
564 565 566 567 568
	mmput_async(mm);
mm_drop:
	mmdrop(mm);
unlock_oom:
	mutex_unlock(&oom_lock);
M
Michal Hocko 已提交
569 570 571
	return ret;
}

572
#define MAX_OOM_REAP_RETRIES 10
573
static void oom_reap_task(struct task_struct *tsk)
M
Michal Hocko 已提交
574 575 576 577
{
	int attempts = 0;

	/* Retry the down_read_trylock(mmap_sem) a few times */
578
	while (attempts++ < MAX_OOM_REAP_RETRIES && !__oom_reap_task(tsk))
M
Michal Hocko 已提交
579 580
		schedule_timeout_idle(HZ/10);

581
	if (attempts > MAX_OOM_REAP_RETRIES) {
582 583
		struct task_struct *p;

584 585
		pr_info("oom_reaper: unable to reap pid:%d (%s)\n",
				task_pid_nr(tsk), tsk->comm);
586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602

		/*
		 * If we've already tried to reap this task in the past and
		 * failed it probably doesn't make much sense to try yet again
		 * so hide the mm from the oom killer so that it can move on
		 * to another task with a different mm struct.
		 */
		p = find_lock_task_mm(tsk);
		if (p) {
			if (test_and_set_bit(MMF_OOM_NOT_REAPABLE, &p->mm->flags)) {
				pr_info("oom_reaper: giving up pid:%d (%s)\n",
						task_pid_nr(tsk), tsk->comm);
				set_bit(MMF_OOM_REAPED, &p->mm->flags);
			}
			task_unlock(p);
		}

603 604 605
		debug_show_all_locks();
	}

606 607 608 609 610 611 612 613 614
	/*
	 * Clear TIF_MEMDIE because the task shouldn't be sitting on a
	 * reasonably reclaimable memory anymore or it is not a good candidate
	 * for the oom victim right now because it cannot release its memory
	 * itself nor by the oom reaper.
	 */
	tsk->oom_reaper_list = NULL;
	exit_oom_victim(tsk);

M
Michal Hocko 已提交
615
	/* Drop a reference taken by wake_oom_reaper */
616
	put_task_struct(tsk);
M
Michal Hocko 已提交
617 618 619 620
}

static int oom_reaper(void *unused)
{
M
Michal Hocko 已提交
621 622
	set_freezable();

M
Michal Hocko 已提交
623
	while (true) {
624
		struct task_struct *tsk = NULL;
M
Michal Hocko 已提交
625

626
		wait_event_freezable(oom_reaper_wait, oom_reaper_list != NULL);
627
		spin_lock(&oom_reaper_lock);
628 629 630
		if (oom_reaper_list != NULL) {
			tsk = oom_reaper_list;
			oom_reaper_list = tsk->oom_reaper_list;
631 632 633 634 635
		}
		spin_unlock(&oom_reaper_lock);

		if (tsk)
			oom_reap_task(tsk);
M
Michal Hocko 已提交
636 637 638 639 640
	}

	return 0;
}

641
static void wake_oom_reaper(struct task_struct *tsk)
M
Michal Hocko 已提交
642
{
643 644 645 646 647
	if (!oom_reaper_th)
		return;

	/* tsk is already queued? */
	if (tsk == oom_reaper_list || tsk->oom_reaper_list)
M
Michal Hocko 已提交
648 649
		return;

650
	get_task_struct(tsk);
M
Michal Hocko 已提交
651

652
	spin_lock(&oom_reaper_lock);
653 654
	tsk->oom_reaper_list = oom_reaper_list;
	oom_reaper_list = tsk;
655 656
	spin_unlock(&oom_reaper_lock);
	wake_up(&oom_reaper_wait);
M
Michal Hocko 已提交
657 658 659 660 661 662 663 664 665 666 667 668 669
}

static int __init oom_init(void)
{
	oom_reaper_th = kthread_run(oom_reaper, NULL, "oom_reaper");
	if (IS_ERR(oom_reaper_th)) {
		pr_err("Unable to start OOM reaper %ld. Continuing regardless\n",
				PTR_ERR(oom_reaper_th));
		oom_reaper_th = NULL;
	}
	return 0;
}
subsys_initcall(oom_init)
670 671 672 673 674
#else
static inline void wake_oom_reaper(struct task_struct *tsk)
{
}
#endif /* CONFIG_MMU */
M
Michal Hocko 已提交
675

676
/**
677
 * mark_oom_victim - mark the given task as OOM victim
678
 * @tsk: task to mark
679
 *
680
 * Has to be called with oom_lock held and never after
681
 * oom has been disabled already.
682
 */
683
static void mark_oom_victim(struct task_struct *tsk)
684
{
685 686 687 688
	WARN_ON(oom_killer_disabled);
	/* OOM killer might race with memcg OOM */
	if (test_and_set_tsk_thread_flag(tsk, TIF_MEMDIE))
		return;
689
	atomic_inc(&tsk->signal->oom_victims);
690 691 692 693 694 695 696
	/*
	 * Make sure that the task is woken up from uninterruptible sleep
	 * if it is frozen because OOM killer wouldn't be able to free
	 * any memory and livelock. freezing_slow_path will tell the freezer
	 * that TIF_MEMDIE tasks should be ignored.
	 */
	__thaw_task(tsk);
697
	atomic_inc(&oom_victims);
698 699 700
}

/**
701
 * exit_oom_victim - note the exit of an OOM victim
702
 */
703
void exit_oom_victim(struct task_struct *tsk)
704
{
705 706
	if (!test_and_clear_tsk_thread_flag(tsk, TIF_MEMDIE))
		return;
707
	atomic_dec(&tsk->signal->oom_victims);
708

709
	if (!atomic_dec_return(&oom_victims))
710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728
		wake_up_all(&oom_victims_wait);
}

/**
 * oom_killer_disable - disable OOM killer
 *
 * Forces all page allocations to fail rather than trigger OOM killer.
 * Will block and wait until all OOM victims are killed.
 *
 * The function cannot be called when there are runnable user tasks because
 * the userspace would see unexpected allocation failures as a result. Any
 * new usage of this function should be consulted with MM people.
 *
 * Returns true if successful and false if the OOM killer cannot be
 * disabled.
 */
bool oom_killer_disable(void)
{
	/*
729 730
	 * Make sure to not race with an ongoing OOM killer. Check that the
	 * current is not killed (possibly due to sharing the victim's memory).
731
	 */
732
	if (mutex_lock_killable(&oom_lock))
733 734
		return false;
	oom_killer_disabled = true;
735
	mutex_unlock(&oom_lock);
736 737 738 739 740 741 742 743 744 745 746 747

	wait_event(oom_victims_wait, !atomic_read(&oom_victims));

	return true;
}

/**
 * oom_killer_enable - enable OOM killer
 */
void oom_killer_enable(void)
{
	oom_killer_disabled = false;
748 749
}

750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774
static inline bool __task_will_free_mem(struct task_struct *task)
{
	struct signal_struct *sig = task->signal;

	/*
	 * A coredumping process may sleep for an extended period in exit_mm(),
	 * so the oom killer cannot assume that the process will promptly exit
	 * and release memory.
	 */
	if (sig->flags & SIGNAL_GROUP_COREDUMP)
		return false;

	if (sig->flags & SIGNAL_GROUP_EXIT)
		return true;

	if (thread_group_empty(task) && (task->flags & PF_EXITING))
		return true;

	return false;
}

/*
 * Checks whether the given task is dying or exiting and likely to
 * release its address space. This means that all threads and processes
 * sharing the same mm have to be killed or exiting.
775 776
 * Caller has to make sure that task->mm is stable (hold task_lock or
 * it operates on the current).
777
 */
778
static bool task_will_free_mem(struct task_struct *task)
779
{
780
	struct mm_struct *mm = task->mm;
781
	struct task_struct *p;
782
	bool ret = true;
783 784

	/*
785 786 787
	 * Skip tasks without mm because it might have passed its exit_mm and
	 * exit_oom_victim. oom_reaper could have rescued that but do not rely
	 * on that for now. We can consider find_lock_task_mm in future.
788
	 */
789
	if (!mm)
790 791
		return false;

792 793
	if (!__task_will_free_mem(task))
		return false;
794 795 796 797 798

	/*
	 * This task has already been drained by the oom reaper so there are
	 * only small chances it will free some more
	 */
799
	if (test_bit(MMF_OOM_REAPED, &mm->flags))
800 801
		return false;

802
	if (atomic_read(&mm->mm_users) <= 1)
803 804 805
		return true;

	/*
806 807 808
	 * Make sure that all tasks which share the mm with the given tasks
	 * are dying as well to make sure that a) nobody pins its mm and
	 * b) the task is also reapable by the oom reaper.
809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824
	 */
	rcu_read_lock();
	for_each_process(p) {
		if (!process_shares_mm(p, mm))
			continue;
		if (same_thread_group(task, p))
			continue;
		ret = __task_will_free_mem(p);
		if (!ret)
			break;
	}
	rcu_read_unlock();

	return ret;
}

825
static void oom_kill_process(struct oom_control *oc, const char *message)
L
Linus Torvalds 已提交
826
{
827 828
	struct task_struct *p = oc->chosen;
	unsigned int points = oc->chosen_points;
829
	struct task_struct *victim = p;
830
	struct task_struct *child;
831
	struct task_struct *t;
832
	struct mm_struct *mm;
833
	unsigned int victim_points = 0;
834 835
	static DEFINE_RATELIMIT_STATE(oom_rs, DEFAULT_RATELIMIT_INTERVAL,
					      DEFAULT_RATELIMIT_BURST);
836
	bool can_oom_reap = true;
L
Linus Torvalds 已提交
837

838 839 840 841
	/*
	 * If the task is already exiting, don't alarm the sysadmin or kill
	 * its children or threads, just set TIF_MEMDIE so it can die quickly
	 */
842
	task_lock(p);
843
	if (task_will_free_mem(p)) {
844
		mark_oom_victim(p);
845
		wake_oom_reaper(p);
846
		task_unlock(p);
847
		put_task_struct(p);
848
		return;
849
	}
850
	task_unlock(p);
851

852
	if (__ratelimit(&oom_rs))
853
		dump_header(oc, p);
854

855
	pr_err("%s: Kill process %d (%s) score %u or sacrifice child\n",
856
		message, task_pid_nr(p), p->comm, points);
N
Nick Piggin 已提交
857

858 859
	/*
	 * If any of p's children has a different mm and is eligible for kill,
860
	 * the one with the highest oom_badness() score is sacrificed for its
861 862 863
	 * parent.  This attempts to lose the minimal amount of work done while
	 * still freeing memory.
	 */
864
	read_lock(&tasklist_lock);
865
	for_each_thread(p, t) {
866
		list_for_each_entry(child, &t->children, sibling) {
D
David Rientjes 已提交
867
			unsigned int child_points;
868

869
			if (process_shares_mm(child, p->mm))
870
				continue;
D
David Rientjes 已提交
871 872 873
			/*
			 * oom_badness() returns 0 if the thread is unkillable
			 */
874
			child_points = oom_badness(child,
875
				oc->memcg, oc->nodemask, oc->totalpages);
876
			if (child_points > victim_points) {
877
				put_task_struct(victim);
878 879
				victim = child;
				victim_points = child_points;
880
				get_task_struct(victim);
881
			}
882
		}
883
	}
884
	read_unlock(&tasklist_lock);
885

886 887 888
	p = find_lock_task_mm(victim);
	if (!p) {
		put_task_struct(victim);
889
		return;
890 891 892 893 894
	} else if (victim != p) {
		get_task_struct(p);
		put_task_struct(victim);
		victim = p;
	}
895

896
	/* Get a reference to safely compare mm after task_unlock(victim) */
897
	mm = victim->mm;
898
	atomic_inc(&mm->mm_count);
899 900 901 902 903 904
	/*
	 * We should send SIGKILL before setting TIF_MEMDIE in order to prevent
	 * the OOM victim from depleting the memory reserves from the user
	 * space under its control.
	 */
	do_send_sig_info(SIGKILL, SEND_SIG_FORCED, victim, true);
905
	mark_oom_victim(victim);
906
	pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
907 908
		task_pid_nr(victim), victim->comm, K(victim->mm->total_vm),
		K(get_mm_counter(victim->mm, MM_ANONPAGES)),
909 910
		K(get_mm_counter(victim->mm, MM_FILEPAGES)),
		K(get_mm_counter(victim->mm, MM_SHMEMPAGES)));
911 912 913 914 915 916 917 918 919 920 921
	task_unlock(victim);

	/*
	 * Kill all user processes sharing victim->mm in other thread groups, if
	 * any.  They don't get access to memory reserves, though, to avoid
	 * depletion of all memory.  This prevents mm->mmap_sem livelock when an
	 * oom killed thread cannot exit because it requires the semaphore and
	 * its contended by another thread trying to allocate memory itself.
	 * That thread will now get access to memory reserves since it has a
	 * pending fatal signal.
	 */
922
	rcu_read_lock();
923
	for_each_process(p) {
924
		if (!process_shares_mm(p, mm))
925 926 927
			continue;
		if (same_thread_group(p, victim))
			continue;
928
		if (unlikely(p->flags & PF_KTHREAD) || is_global_init(p)) {
M
Michal Hocko 已提交
929 930 931
			/*
			 * We cannot use oom_reaper for the mm shared by this
			 * process because it wouldn't get killed and so the
932 933
			 * memory might be still used. Hide the mm from the oom
			 * killer to guarantee OOM forward progress.
M
Michal Hocko 已提交
934 935
			 */
			can_oom_reap = false;
936 937 938 939
			set_bit(MMF_OOM_REAPED, &mm->flags);
			pr_info("oom killer %d (%s) has mm pinned by %d (%s)\n",
					task_pid_nr(victim), victim->comm,
					task_pid_nr(p), p->comm);
940
			continue;
M
Michal Hocko 已提交
941
		}
942 943
		do_send_sig_info(SIGKILL, SEND_SIG_FORCED, p, true);
	}
944
	rcu_read_unlock();
945

M
Michal Hocko 已提交
946
	if (can_oom_reap)
947
		wake_oom_reaper(victim);
M
Michal Hocko 已提交
948

949
	mmdrop(mm);
950
	put_task_struct(victim);
L
Linus Torvalds 已提交
951
}
952
#undef K
L
Linus Torvalds 已提交
953

954 955 956
/*
 * Determines whether the kernel must panic because of the panic_on_oom sysctl.
 */
957 958
static void check_panic_on_oom(struct oom_control *oc,
			       enum oom_constraint constraint)
959 960 961 962 963 964 965 966 967 968 969 970
{
	if (likely(!sysctl_panic_on_oom))
		return;
	if (sysctl_panic_on_oom != 2) {
		/*
		 * panic_on_oom == 1 only affects CONSTRAINT_NONE, the kernel
		 * does not panic for cpuset, mempolicy, or memcg allocation
		 * failures.
		 */
		if (constraint != CONSTRAINT_NONE)
			return;
	}
971
	/* Do not panic for oom kills triggered by sysrq */
972
	if (is_sysrq_oom(oc))
973
		return;
974
	dump_header(oc, NULL);
975 976 977 978
	panic("Out of memory: %s panic_on_oom is enabled\n",
		sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide");
}

979 980 981 982 983 984 985 986 987 988 989 990 991 992
static BLOCKING_NOTIFIER_HEAD(oom_notify_list);

int register_oom_notifier(struct notifier_block *nb)
{
	return blocking_notifier_chain_register(&oom_notify_list, nb);
}
EXPORT_SYMBOL_GPL(register_oom_notifier);

int unregister_oom_notifier(struct notifier_block *nb)
{
	return blocking_notifier_chain_unregister(&oom_notify_list, nb);
}
EXPORT_SYMBOL_GPL(unregister_oom_notifier);

L
Linus Torvalds 已提交
993
/**
994 995
 * out_of_memory - kill the "best" process when we run out of memory
 * @oc: pointer to struct oom_control
L
Linus Torvalds 已提交
996 997 998 999 1000 1001
 *
 * If we run out of memory, we have the choice between either
 * killing a random task (bad), letting the system crash (worse)
 * OR try to be smart about which process to kill. Note that we
 * don't have to be perfect here, we just have to be good.
 */
1002
bool out_of_memory(struct oom_control *oc)
L
Linus Torvalds 已提交
1003
{
1004
	unsigned long freed = 0;
1005
	enum oom_constraint constraint = CONSTRAINT_NONE;
1006

1007 1008 1009
	if (oom_killer_disabled)
		return false;

1010 1011 1012 1013 1014 1015
	if (!is_memcg_oom(oc)) {
		blocking_notifier_call_chain(&oom_notify_list, 0, &freed);
		if (freed > 0)
			/* Got some memory back in the last second. */
			return true;
	}
L
Linus Torvalds 已提交
1016

1017
	/*
1018 1019 1020
	 * If current has a pending SIGKILL or is exiting, then automatically
	 * select it.  The goal is to allow it to allocate so that it may
	 * quickly exit and free its memory.
1021
	 */
1022
	if (task_will_free_mem(current)) {
1023
		mark_oom_victim(current);
1024
		wake_oom_reaper(current);
1025
		return true;
1026 1027
	}

1028 1029 1030 1031 1032 1033 1034 1035 1036
	/*
	 * The OOM killer does not compensate for IO-less reclaim.
	 * pagefault_out_of_memory lost its gfp context so we have to
	 * make sure exclude 0 mask - all other users should have at least
	 * ___GFP_DIRECT_RECLAIM to get here.
	 */
	if (oc->gfp_mask && !(oc->gfp_mask & (__GFP_FS|__GFP_NOFAIL)))
		return true;

1037 1038
	/*
	 * Check if there were limitations on the allocation (only relevant for
1039
	 * NUMA and memcg) that may require different handling.
1040
	 */
1041
	constraint = constrained_alloc(oc);
1042 1043
	if (constraint != CONSTRAINT_MEMORY_POLICY)
		oc->nodemask = NULL;
1044
	check_panic_on_oom(oc, constraint);
1045

1046 1047
	if (!is_memcg_oom(oc) && sysctl_oom_kill_allocating_task &&
	    current->mm && !oom_unkillable_task(current, NULL, oc->nodemask) &&
1048
	    current->signal->oom_score_adj != OOM_SCORE_ADJ_MIN) {
1049
		get_task_struct(current);
1050 1051
		oc->chosen = current;
		oom_kill_process(oc, "Out of memory (oom_kill_allocating_task)");
1052
		return true;
1053 1054
	}

1055
	select_bad_process(oc);
1056
	/* Found nothing?!?! Either we hang forever, or we panic. */
1057
	if (!oc->chosen && !is_sysrq_oom(oc) && !is_memcg_oom(oc)) {
1058
		dump_header(oc, NULL);
1059 1060
		panic("Out of memory and no killable processes...\n");
	}
1061 1062 1063
	if (oc->chosen && oc->chosen != (void *)-1UL) {
		oom_kill_process(oc, !is_memcg_oom(oc) ? "Out of memory" :
				 "Memory cgroup out of memory");
1064 1065 1066 1067
		/*
		 * Give the killed process a good chance to exit before trying
		 * to allocate memory again.
		 */
1068
		schedule_timeout_killable(1);
1069
	}
1070
	return !!oc->chosen;
1071 1072
}

1073 1074
/*
 * The pagefault handler calls here because it is out of memory, so kill a
V
Vladimir Davydov 已提交
1075 1076
 * memory-hogging task. If oom_lock is held by somebody else, a parallel oom
 * killing is already in progress so do nothing.
1077 1078 1079
 */
void pagefault_out_of_memory(void)
{
1080 1081 1082
	struct oom_control oc = {
		.zonelist = NULL,
		.nodemask = NULL,
1083
		.memcg = NULL,
1084 1085 1086 1087
		.gfp_mask = 0,
		.order = 0,
	};

1088
	if (mem_cgroup_oom_synchronize(true))
1089
		return;
1090

1091 1092
	if (!mutex_trylock(&oom_lock))
		return;
1093

1094
	if (!out_of_memory(&oc)) {
1095 1096 1097 1098 1099 1100 1101
		/*
		 * There shouldn't be any user tasks runnable while the
		 * OOM killer is disabled, so the current task has to
		 * be a racing OOM victim for which oom_killer_disable()
		 * is waiting for.
		 */
		WARN_ON(test_thread_flag(TIF_MEMDIE));
1102
	}
1103 1104

	mutex_unlock(&oom_lock);
1105
}