oom_kill.c 28.6 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6
/*
 *  linux/mm/oom_kill.c
 * 
 *  Copyright (C)  1998,2000  Rik van Riel
 *	Thanks go out to Claus Fischer for some serious inspiration and
 *	for goading me into coding this file...
D
David Rientjes 已提交
7 8
 *  Copyright (C)  2010  Google, Inc.
 *	Rewritten by David Rientjes
L
Linus Torvalds 已提交
9 10
 *
 *  The routines in this file are used to kill a process when
P
Paul Jackson 已提交
11 12
 *  we're seriously out of memory. This gets called from __alloc_pages()
 *  in mm/page_alloc.c when we really run out of memory.
L
Linus Torvalds 已提交
13 14 15 16 17 18 19
 *
 *  Since we won't call these routines often (on a well-configured
 *  machine) this file will double as a 'coding guide' and a signpost
 *  for newbie kernel hackers. It features several pointers to major
 *  kernel subsystems and hints as to where to find out what things do.
 */

20
#include <linux/oom.h>
L
Linus Torvalds 已提交
21
#include <linux/mm.h>
A
Alexey Dobriyan 已提交
22
#include <linux/err.h>
23
#include <linux/gfp.h>
L
Linus Torvalds 已提交
24 25 26 27
#include <linux/sched.h>
#include <linux/swap.h>
#include <linux/timex.h>
#include <linux/jiffies.h>
28
#include <linux/cpuset.h>
29
#include <linux/export.h>
30
#include <linux/notifier.h>
31
#include <linux/memcontrol.h>
32
#include <linux/mempolicy.h>
33
#include <linux/security.h>
34
#include <linux/ptrace.h>
35
#include <linux/freezer.h>
36
#include <linux/ftrace.h>
37
#include <linux/ratelimit.h>
M
Michal Hocko 已提交
38 39 40 41 42
#include <linux/kthread.h>
#include <linux/init.h>

#include <asm/tlb.h>
#include "internal.h"
43 44 45

#define CREATE_TRACE_POINTS
#include <trace/events/oom.h>
L
Linus Torvalds 已提交
46

47
int sysctl_panic_on_oom;
48
int sysctl_oom_kill_allocating_task;
49
int sysctl_oom_dump_tasks = 1;
50 51

DEFINE_MUTEX(oom_lock);
L
Linus Torvalds 已提交
52

53 54 55
#ifdef CONFIG_NUMA
/**
 * has_intersects_mems_allowed() - check task eligiblity for kill
56
 * @start: task struct of which task to consider
57 58 59 60 61
 * @mask: nodemask passed to page allocator for mempolicy ooms
 *
 * Task eligibility is determined by whether or not a candidate task, @tsk,
 * shares the same mempolicy nodes as current if it is bound by such a policy
 * and whether or not it has the same set of allowed cpuset nodes.
62
 */
63
static bool has_intersects_mems_allowed(struct task_struct *start,
64
					const nodemask_t *mask)
65
{
66 67
	struct task_struct *tsk;
	bool ret = false;
68

69
	rcu_read_lock();
70
	for_each_thread(start, tsk) {
71 72 73 74 75 76 77
		if (mask) {
			/*
			 * If this is a mempolicy constrained oom, tsk's
			 * cpuset is irrelevant.  Only return true if its
			 * mempolicy intersects current, otherwise it may be
			 * needlessly killed.
			 */
78
			ret = mempolicy_nodemask_intersects(tsk, mask);
79 80 81 82 83
		} else {
			/*
			 * This is not a mempolicy constrained oom, so only
			 * check the mems of tsk's cpuset.
			 */
84
			ret = cpuset_mems_allowed_intersects(current, tsk);
85
		}
86 87
		if (ret)
			break;
88
	}
89
	rcu_read_unlock();
90

91
	return ret;
92 93 94 95 96 97
}
#else
static bool has_intersects_mems_allowed(struct task_struct *tsk,
					const nodemask_t *mask)
{
	return true;
98
}
99
#endif /* CONFIG_NUMA */
100

101 102 103 104 105 106
/*
 * The process p may have detached its own ->mm while exiting or through
 * use_mm(), but one or more of its subthreads may still have a valid
 * pointer.  Return p, or any of its subthreads with a valid ->mm, with
 * task_lock() held.
 */
107
struct task_struct *find_lock_task_mm(struct task_struct *p)
108
{
109
	struct task_struct *t;
110

111 112
	rcu_read_lock();

113
	for_each_thread(p, t) {
114 115
		task_lock(t);
		if (likely(t->mm))
116
			goto found;
117
		task_unlock(t);
118
	}
119 120 121
	t = NULL;
found:
	rcu_read_unlock();
122

123
	return t;
124 125
}

126 127 128 129 130 131 132 133 134
/*
 * order == -1 means the oom kill is required by sysrq, otherwise only
 * for display purposes.
 */
static inline bool is_sysrq_oom(struct oom_control *oc)
{
	return oc->order == -1;
}

135 136 137 138 139
static inline bool is_memcg_oom(struct oom_control *oc)
{
	return oc->memcg != NULL;
}

140
/* return true if the task is not adequate as candidate victim task. */
141
static bool oom_unkillable_task(struct task_struct *p,
142
		struct mem_cgroup *memcg, const nodemask_t *nodemask)
143 144 145 146 147 148 149
{
	if (is_global_init(p))
		return true;
	if (p->flags & PF_KTHREAD)
		return true;

	/* When mem_cgroup_out_of_memory() and p is not member of the group */
150
	if (memcg && !task_in_mem_cgroup(p, memcg))
151 152 153 154 155 156 157 158 159
		return true;

	/* p may not have freeable memory in nodemask */
	if (!has_intersects_mems_allowed(p, nodemask))
		return true;

	return false;
}

L
Linus Torvalds 已提交
160
/**
D
David Rientjes 已提交
161
 * oom_badness - heuristic function to determine which candidate task to kill
L
Linus Torvalds 已提交
162
 * @p: task struct of which task we should calculate
D
David Rientjes 已提交
163
 * @totalpages: total present RAM allowed for page allocation
L
Linus Torvalds 已提交
164
 *
D
David Rientjes 已提交
165 166 167
 * The heuristic for determining which task to kill is made to be as simple and
 * predictable as possible.  The goal is to return the highest value for the
 * task consuming the most memory to avoid subsequent oom failures.
L
Linus Torvalds 已提交
168
 */
169 170
unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
			  const nodemask_t *nodemask, unsigned long totalpages)
L
Linus Torvalds 已提交
171
{
172
	long points;
173
	long adj;
174

175
	if (oom_unkillable_task(p, memcg, nodemask))
176
		return 0;
L
Linus Torvalds 已提交
177

178 179
	p = find_lock_task_mm(p);
	if (!p)
L
Linus Torvalds 已提交
180 181
		return 0;

182 183
	/*
	 * Do not even consider tasks which are explicitly marked oom
184 185
	 * unkillable or have been already oom reaped or the are in
	 * the middle of vfork
186
	 */
187
	adj = (long)p->signal->oom_score_adj;
188
	if (adj == OOM_SCORE_ADJ_MIN ||
189
			test_bit(MMF_OOM_SKIP, &p->mm->flags) ||
190
			in_vfork(p)) {
191 192 193 194
		task_unlock(p);
		return 0;
	}

L
Linus Torvalds 已提交
195
	/*
D
David Rientjes 已提交
196
	 * The baseline for the badness score is the proportion of RAM that each
197
	 * task's rss, pagetable and swap space use.
L
Linus Torvalds 已提交
198
	 */
199 200
	points = get_mm_rss(p->mm) + get_mm_counter(p->mm, MM_SWAPENTS) +
		atomic_long_read(&p->mm->nr_ptes) + mm_nr_pmds(p->mm);
D
David Rientjes 已提交
201
	task_unlock(p);
L
Linus Torvalds 已提交
202 203

	/*
D
David Rientjes 已提交
204 205
	 * Root processes get 3% bonus, just like the __vm_enough_memory()
	 * implementation used by LSMs.
L
Linus Torvalds 已提交
206
	 */
D
David Rientjes 已提交
207
	if (has_capability_noaudit(p, CAP_SYS_ADMIN))
208
		points -= (points * 3) / 100;
L
Linus Torvalds 已提交
209

210 211 212
	/* Normalize to oom_score_adj units */
	adj *= totalpages / 1000;
	points += adj;
L
Linus Torvalds 已提交
213

214
	/*
215 216
	 * Never return 0 for an eligible task regardless of the root bonus and
	 * oom_score_adj (oom_score_adj can't be OOM_SCORE_ADJ_MIN here).
217
	 */
218
	return points > 0 ? points : 1;
L
Linus Torvalds 已提交
219 220
}

221 222 223 224 225 226 227
enum oom_constraint {
	CONSTRAINT_NONE,
	CONSTRAINT_CPUSET,
	CONSTRAINT_MEMORY_POLICY,
	CONSTRAINT_MEMCG,
};

228 229 230
/*
 * Determine the type of allocation constraint.
 */
231
static enum oom_constraint constrained_alloc(struct oom_control *oc)
232
{
233
	struct zone *zone;
234
	struct zoneref *z;
235
	enum zone_type high_zoneidx = gfp_zone(oc->gfp_mask);
D
David Rientjes 已提交
236 237
	bool cpuset_limited = false;
	int nid;
238

239 240 241 242 243
	if (is_memcg_oom(oc)) {
		oc->totalpages = mem_cgroup_get_limit(oc->memcg) ?: 1;
		return CONSTRAINT_MEMCG;
	}

D
David Rientjes 已提交
244
	/* Default to all available memory */
245 246 247 248
	oc->totalpages = totalram_pages + total_swap_pages;

	if (!IS_ENABLED(CONFIG_NUMA))
		return CONSTRAINT_NONE;
D
David Rientjes 已提交
249

250
	if (!oc->zonelist)
D
David Rientjes 已提交
251
		return CONSTRAINT_NONE;
252 253 254 255 256
	/*
	 * Reach here only when __GFP_NOFAIL is used. So, we should avoid
	 * to kill current.We have to random task kill in this case.
	 * Hopefully, CONSTRAINT_THISNODE...but no way to handle it, now.
	 */
257
	if (oc->gfp_mask & __GFP_THISNODE)
258
		return CONSTRAINT_NONE;
259

260
	/*
D
David Rientjes 已提交
261 262 263
	 * This is not a __GFP_THISNODE allocation, so a truncated nodemask in
	 * the page allocator means a mempolicy is in effect.  Cpuset policy
	 * is enforced in get_page_from_freelist().
264
	 */
265 266
	if (oc->nodemask &&
	    !nodes_subset(node_states[N_MEMORY], *oc->nodemask)) {
267
		oc->totalpages = total_swap_pages;
268
		for_each_node_mask(nid, *oc->nodemask)
269
			oc->totalpages += node_spanned_pages(nid);
270
		return CONSTRAINT_MEMORY_POLICY;
D
David Rientjes 已提交
271
	}
272 273

	/* Check this allocation failure is caused by cpuset's wall function */
274 275 276
	for_each_zone_zonelist_nodemask(zone, z, oc->zonelist,
			high_zoneidx, oc->nodemask)
		if (!cpuset_zone_allowed(zone, oc->gfp_mask))
D
David Rientjes 已提交
277
			cpuset_limited = true;
278

D
David Rientjes 已提交
279
	if (cpuset_limited) {
280
		oc->totalpages = total_swap_pages;
D
David Rientjes 已提交
281
		for_each_node_mask(nid, cpuset_current_mems_allowed)
282
			oc->totalpages += node_spanned_pages(nid);
D
David Rientjes 已提交
283 284
		return CONSTRAINT_CPUSET;
	}
285 286 287
	return CONSTRAINT_NONE;
}

288
static int oom_evaluate_task(struct task_struct *task, void *arg)
289
{
290 291 292
	struct oom_control *oc = arg;
	unsigned long points;

293
	if (oom_unkillable_task(task, NULL, oc->nodemask))
294
		goto next;
295 296 297

	/*
	 * This task already has access to memory reserves and is being killed.
298
	 * Don't allow any other task to have access to the reserves unless
299
	 * the task has MMF_OOM_SKIP because chances that it would release
300
	 * any memory is quite low.
301
	 */
302 303
	if (!is_sysrq_oom(oc) && tsk_is_oom_victim(task)) {
		if (test_bit(MMF_OOM_SKIP, &task->signal->oom_mm->flags))
304 305
			goto next;
		goto abort;
306
	}
307

308 309 310 311
	/*
	 * If task is allocating a lot of memory and has been marked to be
	 * killed first if it triggers an oom, then select it.
	 */
312 313 314 315
	if (oom_task_origin(task)) {
		points = ULONG_MAX;
		goto select;
	}
316

317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336
	points = oom_badness(task, NULL, oc->nodemask, oc->totalpages);
	if (!points || points < oc->chosen_points)
		goto next;

	/* Prefer thread group leaders for display purposes */
	if (points == oc->chosen_points && thread_group_leader(oc->chosen))
		goto next;
select:
	if (oc->chosen)
		put_task_struct(oc->chosen);
	get_task_struct(task);
	oc->chosen = task;
	oc->chosen_points = points;
next:
	return 0;
abort:
	if (oc->chosen)
		put_task_struct(oc->chosen);
	oc->chosen = (void *)-1UL;
	return 1;
337 338
}

L
Linus Torvalds 已提交
339
/*
340 341
 * Simple selection loop. We choose the process with the highest number of
 * 'points'. In case scan was aborted, oc->chosen is set to -1.
L
Linus Torvalds 已提交
342
 */
343
static void select_bad_process(struct oom_control *oc)
L
Linus Torvalds 已提交
344
{
345 346 347 348
	if (is_memcg_oom(oc))
		mem_cgroup_scan_tasks(oc->memcg, oom_evaluate_task, oc);
	else {
		struct task_struct *p;
349

350 351 352 353 354
		rcu_read_lock();
		for_each_process(p)
			if (oom_evaluate_task(p, oc))
				break;
		rcu_read_unlock();
355
	}
356

357
	oc->chosen_points = oc->chosen_points * 1000 / oc->totalpages;
L
Linus Torvalds 已提交
358 359
}

360
/**
R
Randy Dunlap 已提交
361
 * dump_tasks - dump current memory state of all system tasks
W
Wanpeng Li 已提交
362
 * @memcg: current's memory controller, if constrained
363
 * @nodemask: nodemask passed to page allocator for mempolicy ooms
R
Randy Dunlap 已提交
364
 *
365 366 367
 * Dumps the current memory state of all eligible tasks.  Tasks not in the same
 * memcg, not in the same cpuset, or bound to a disjoint set of mempolicy nodes
 * are not shown.
368 369
 * State information includes task's pid, uid, tgid, vm size, rss, nr_ptes,
 * swapents, oom_score_adj value, and name.
370
 */
371
static void dump_tasks(struct mem_cgroup *memcg, const nodemask_t *nodemask)
372
{
373 374
	struct task_struct *p;
	struct task_struct *task;
375

376
	pr_info("[ pid ]   uid  tgid total_vm      rss nr_ptes nr_pmds swapents oom_score_adj name\n");
377
	rcu_read_lock();
378
	for_each_process(p) {
379
		if (oom_unkillable_task(p, memcg, nodemask))
380
			continue;
381

382 383
		task = find_lock_task_mm(p);
		if (!task) {
384
			/*
385 386
			 * This is a kthread or all of p's threads have already
			 * detached their mm's.  There's no need to report
387
			 * them; they can't be oom killed anyway.
388 389 390
			 */
			continue;
		}
391

392
		pr_info("[%5d] %5d %5d %8lu %8lu %7ld %7ld %8lu         %5hd %s\n",
393 394
			task->pid, from_kuid(&init_user_ns, task_uid(task)),
			task->tgid, task->mm->total_vm, get_mm_rss(task->mm),
395
			atomic_long_read(&task->mm->nr_ptes),
396
			mm_nr_pmds(task->mm),
397
			get_mm_counter(task->mm, MM_SWAPENTS),
D
David Rientjes 已提交
398
			task->signal->oom_score_adj, task->comm);
399 400
		task_unlock(task);
	}
401
	rcu_read_unlock();
402 403
}

404
static void dump_header(struct oom_control *oc, struct task_struct *p)
405
{
406 407 408 409 410
	nodemask_t *nm = (oc->nodemask) ? oc->nodemask : &cpuset_current_mems_allowed;

	pr_warn("%s invoked oom-killer: gfp_mask=%#x(%pGg), nodemask=%*pbl, order=%d, oom_score_adj=%hd\n",
		current->comm, oc->gfp_mask, &oc->gfp_mask,
		nodemask_pr_args(nm), oc->order,
D
David Rientjes 已提交
411
		current->signal->oom_score_adj);
412 413
	if (!IS_ENABLED(CONFIG_COMPACTION) && oc->order)
		pr_warn("COMPACTION is disabled!!!\n");
414

415
	cpuset_print_current_mems_allowed();
416
	dump_stack();
417 418
	if (oc->memcg)
		mem_cgroup_print_oom_info(oc->memcg, p);
419 420
	else
		show_mem(SHOW_MEM_FILTER_NODES);
421
	if (sysctl_oom_dump_tasks)
422
		dump_tasks(oc->memcg, oc->nodemask);
423 424
}

425
/*
426
 * Number of OOM victims in flight
427
 */
428 429
static atomic_t oom_victims = ATOMIC_INIT(0);
static DECLARE_WAIT_QUEUE_HEAD(oom_victims_wait);
430

431
static bool oom_killer_disabled __read_mostly;
432

433 434
#define K(x) ((x) << (PAGE_SHIFT-10))

435 436 437 438 439 440
/*
 * task->mm can be NULL if the task is the exited group leader.  So to
 * determine whether the task is using a particular mm, we examine all the
 * task's threads: if one of those is using this mm then this task was also
 * using it.
 */
441
bool process_shares_mm(struct task_struct *p, struct mm_struct *mm)
442 443 444 445 446 447 448 449 450 451 452 453
{
	struct task_struct *t;

	for_each_thread(p, t) {
		struct mm_struct *t_mm = READ_ONCE(t->mm);
		if (t_mm)
			return t_mm == mm;
	}
	return false;
}


M
Michal Hocko 已提交
454 455 456 457 458 459 460
#ifdef CONFIG_MMU
/*
 * OOM Reaper kernel thread which tries to reap the memory used by the OOM
 * victim (if that is possible) to help the OOM killer to move on.
 */
static struct task_struct *oom_reaper_th;
static DECLARE_WAIT_QUEUE_HEAD(oom_reaper_wait);
461
static struct task_struct *oom_reaper_list;
462 463
static DEFINE_SPINLOCK(oom_reaper_lock);

464
static bool __oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
M
Michal Hocko 已提交
465 466 467 468 469 470 471
{
	struct mmu_gather tlb;
	struct vm_area_struct *vma;
	struct zap_details details = {.check_swap_entries = true,
				      .ignore_dirty = true};
	bool ret = true;

472 473 474
	/*
	 * We have to make sure to not race with the victim exit path
	 * and cause premature new oom victim selection:
475
	 * __oom_reap_task_mm		exit_mm
476
	 *   mmget_not_zero
477 478 479 480 481 482 483 484 485 486 487
	 *				  mmput
	 *				    atomic_dec_and_test
	 *				  exit_oom_victim
	 *				[...]
	 *				out_of_memory
	 *				  select_bad_process
	 *				    # no TIF_MEMDIE task selects new victim
	 *  unmap_page_range # frees some memory
	 */
	mutex_lock(&oom_lock);

M
Michal Hocko 已提交
488 489
	if (!down_read_trylock(&mm->mmap_sem)) {
		ret = false;
490
		goto unlock_oom;
491 492 493 494 495 496 497 498 499
	}

	/*
	 * increase mm_users only after we know we will reap something so
	 * that the mmput_async is called only when we have reaped something
	 * and delayed __mmput doesn't matter that much
	 */
	if (!mmget_not_zero(mm)) {
		up_read(&mm->mmap_sem);
500
		goto unlock_oom;
M
Michal Hocko 已提交
501 502
	}

503 504 505 506 507 508 509 510
	/*
	 * Tell all users of get_user/copy_from_user etc... that the content
	 * is no longer stable. No barriers really needed because unmapping
	 * should imply barriers already and the reader would hit a page fault
	 * if it stumbled over a reaped memory.
	 */
	set_bit(MMF_UNSTABLE, &mm->flags);

M
Michal Hocko 已提交
511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537
	tlb_gather_mmu(&tlb, mm, 0, -1);
	for (vma = mm->mmap ; vma; vma = vma->vm_next) {
		if (is_vm_hugetlb_page(vma))
			continue;

		/*
		 * mlocked VMAs require explicit munlocking before unmap.
		 * Let's keep it simple here and skip such VMAs.
		 */
		if (vma->vm_flags & VM_LOCKED)
			continue;

		/*
		 * Only anonymous pages have a good chance to be dropped
		 * without additional steps which we cannot afford as we
		 * are OOM already.
		 *
		 * We do not even care about fs backed pages because all
		 * which are reclaimable have already been reclaimed and
		 * we do not want to block exit_mmap by keeping mm ref
		 * count elevated without a good reason.
		 */
		if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED))
			unmap_page_range(&tlb, vma, vma->vm_start, vma->vm_end,
					 &details);
	}
	tlb_finish_mmu(&tlb, 0, -1);
538 539 540 541 542
	pr_info("oom_reaper: reaped process %d (%s), now anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
			task_pid_nr(tsk), tsk->comm,
			K(get_mm_counter(mm, MM_ANONPAGES)),
			K(get_mm_counter(mm, MM_FILEPAGES)),
			K(get_mm_counter(mm, MM_SHMEMPAGES)));
M
Michal Hocko 已提交
543
	up_read(&mm->mmap_sem);
544

545 546 547 548 549
	/*
	 * Drop our reference but make sure the mmput slow path is called from a
	 * different context because we shouldn't risk we get stuck there and
	 * put the oom_reaper out of the way.
	 */
550 551 552
	mmput_async(mm);
unlock_oom:
	mutex_unlock(&oom_lock);
M
Michal Hocko 已提交
553 554 555
	return ret;
}

556
#define MAX_OOM_REAP_RETRIES 10
557
static void oom_reap_task(struct task_struct *tsk)
M
Michal Hocko 已提交
558 559
{
	int attempts = 0;
560
	struct mm_struct *mm = tsk->signal->oom_mm;
M
Michal Hocko 已提交
561 562

	/* Retry the down_read_trylock(mmap_sem) a few times */
563
	while (attempts++ < MAX_OOM_REAP_RETRIES && !__oom_reap_task_mm(tsk, mm))
M
Michal Hocko 已提交
564 565
		schedule_timeout_idle(HZ/10);

566 567
	if (attempts <= MAX_OOM_REAP_RETRIES)
		goto done;
568

569

570 571 572
	pr_info("oom_reaper: unable to reap pid:%d (%s)\n",
		task_pid_nr(tsk), tsk->comm);
	debug_show_all_locks();
573

574
done:
575 576
	tsk->oom_reaper_list = NULL;

577 578 579 580
	/*
	 * Hide this mm from OOM killer because it has been either reaped or
	 * somebody can't call up_write(mmap_sem).
	 */
581
	set_bit(MMF_OOM_SKIP, &mm->flags);
582

M
Michal Hocko 已提交
583
	/* Drop a reference taken by wake_oom_reaper */
584
	put_task_struct(tsk);
M
Michal Hocko 已提交
585 586 587 588 589
}

static int oom_reaper(void *unused)
{
	while (true) {
590
		struct task_struct *tsk = NULL;
M
Michal Hocko 已提交
591

592
		wait_event_freezable(oom_reaper_wait, oom_reaper_list != NULL);
593
		spin_lock(&oom_reaper_lock);
594 595 596
		if (oom_reaper_list != NULL) {
			tsk = oom_reaper_list;
			oom_reaper_list = tsk->oom_reaper_list;
597 598 599 600 601
		}
		spin_unlock(&oom_reaper_lock);

		if (tsk)
			oom_reap_task(tsk);
M
Michal Hocko 已提交
602 603 604 605 606
	}

	return 0;
}

607
static void wake_oom_reaper(struct task_struct *tsk)
M
Michal Hocko 已提交
608
{
609 610 611 612 613
	if (!oom_reaper_th)
		return;

	/* tsk is already queued? */
	if (tsk == oom_reaper_list || tsk->oom_reaper_list)
M
Michal Hocko 已提交
614 615
		return;

616
	get_task_struct(tsk);
M
Michal Hocko 已提交
617

618
	spin_lock(&oom_reaper_lock);
619 620
	tsk->oom_reaper_list = oom_reaper_list;
	oom_reaper_list = tsk;
621 622
	spin_unlock(&oom_reaper_lock);
	wake_up(&oom_reaper_wait);
M
Michal Hocko 已提交
623 624 625 626 627 628 629 630 631 632 633 634 635
}

static int __init oom_init(void)
{
	oom_reaper_th = kthread_run(oom_reaper, NULL, "oom_reaper");
	if (IS_ERR(oom_reaper_th)) {
		pr_err("Unable to start OOM reaper %ld. Continuing regardless\n",
				PTR_ERR(oom_reaper_th));
		oom_reaper_th = NULL;
	}
	return 0;
}
subsys_initcall(oom_init)
636 637 638 639 640
#else
static inline void wake_oom_reaper(struct task_struct *tsk)
{
}
#endif /* CONFIG_MMU */
M
Michal Hocko 已提交
641

642
/**
643
 * mark_oom_victim - mark the given task as OOM victim
644
 * @tsk: task to mark
645
 *
646
 * Has to be called with oom_lock held and never after
647
 * oom has been disabled already.
648 649 650
 *
 * tsk->mm has to be non NULL and caller has to guarantee it is stable (either
 * under task_lock or operate on the current).
651
 */
652
static void mark_oom_victim(struct task_struct *tsk)
653
{
654 655
	struct mm_struct *mm = tsk->mm;

656 657 658 659
	WARN_ON(oom_killer_disabled);
	/* OOM killer might race with memcg OOM */
	if (test_and_set_tsk_thread_flag(tsk, TIF_MEMDIE))
		return;
660 661 662 663 664

	/* oom_mm is bound to the signal struct life time. */
	if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm))
		atomic_inc(&tsk->signal->oom_mm->mm_count);

665 666 667 668 669 670 671
	/*
	 * Make sure that the task is woken up from uninterruptible sleep
	 * if it is frozen because OOM killer wouldn't be able to free
	 * any memory and livelock. freezing_slow_path will tell the freezer
	 * that TIF_MEMDIE tasks should be ignored.
	 */
	__thaw_task(tsk);
672
	atomic_inc(&oom_victims);
673 674 675
}

/**
676
 * exit_oom_victim - note the exit of an OOM victim
677
 */
678
void exit_oom_victim(void)
679
{
680
	clear_thread_flag(TIF_MEMDIE);
681

682
	if (!atomic_dec_return(&oom_victims))
683 684 685
		wake_up_all(&oom_victims_wait);
}

686 687 688 689 690 691 692 693
/**
 * oom_killer_enable - enable OOM killer
 */
void oom_killer_enable(void)
{
	oom_killer_disabled = false;
}

694 695
/**
 * oom_killer_disable - disable OOM killer
696
 * @timeout: maximum timeout to wait for oom victims in jiffies
697 698
 *
 * Forces all page allocations to fail rather than trigger OOM killer.
699 700
 * Will block and wait until all OOM victims are killed or the given
 * timeout expires.
701 702 703 704 705 706 707 708
 *
 * The function cannot be called when there are runnable user tasks because
 * the userspace would see unexpected allocation failures as a result. Any
 * new usage of this function should be consulted with MM people.
 *
 * Returns true if successful and false if the OOM killer cannot be
 * disabled.
 */
709
bool oom_killer_disable(signed long timeout)
710
{
711 712
	signed long ret;

713
	/*
714 715
	 * Make sure to not race with an ongoing OOM killer. Check that the
	 * current is not killed (possibly due to sharing the victim's memory).
716
	 */
717
	if (mutex_lock_killable(&oom_lock))
718 719
		return false;
	oom_killer_disabled = true;
720
	mutex_unlock(&oom_lock);
721

722 723 724 725 726 727
	ret = wait_event_interruptible_timeout(oom_victims_wait,
			!atomic_read(&oom_victims), timeout);
	if (ret <= 0) {
		oom_killer_enable();
		return false;
	}
728 729 730 731

	return true;
}

732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756
static inline bool __task_will_free_mem(struct task_struct *task)
{
	struct signal_struct *sig = task->signal;

	/*
	 * A coredumping process may sleep for an extended period in exit_mm(),
	 * so the oom killer cannot assume that the process will promptly exit
	 * and release memory.
	 */
	if (sig->flags & SIGNAL_GROUP_COREDUMP)
		return false;

	if (sig->flags & SIGNAL_GROUP_EXIT)
		return true;

	if (thread_group_empty(task) && (task->flags & PF_EXITING))
		return true;

	return false;
}

/*
 * Checks whether the given task is dying or exiting and likely to
 * release its address space. This means that all threads and processes
 * sharing the same mm have to be killed or exiting.
757 758
 * Caller has to make sure that task->mm is stable (hold task_lock or
 * it operates on the current).
759
 */
760
static bool task_will_free_mem(struct task_struct *task)
761
{
762
	struct mm_struct *mm = task->mm;
763
	struct task_struct *p;
764
	bool ret = true;
765 766

	/*
767 768 769
	 * Skip tasks without mm because it might have passed its exit_mm and
	 * exit_oom_victim. oom_reaper could have rescued that but do not rely
	 * on that for now. We can consider find_lock_task_mm in future.
770
	 */
771
	if (!mm)
772 773
		return false;

774 775
	if (!__task_will_free_mem(task))
		return false;
776 777 778 779 780

	/*
	 * This task has already been drained by the oom reaper so there are
	 * only small chances it will free some more
	 */
781
	if (test_bit(MMF_OOM_SKIP, &mm->flags))
782 783
		return false;

784
	if (atomic_read(&mm->mm_users) <= 1)
785 786 787
		return true;

	/*
788 789 790
	 * Make sure that all tasks which share the mm with the given tasks
	 * are dying as well to make sure that a) nobody pins its mm and
	 * b) the task is also reapable by the oom reaper.
791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806
	 */
	rcu_read_lock();
	for_each_process(p) {
		if (!process_shares_mm(p, mm))
			continue;
		if (same_thread_group(task, p))
			continue;
		ret = __task_will_free_mem(p);
		if (!ret)
			break;
	}
	rcu_read_unlock();

	return ret;
}

807
static void oom_kill_process(struct oom_control *oc, const char *message)
L
Linus Torvalds 已提交
808
{
809 810
	struct task_struct *p = oc->chosen;
	unsigned int points = oc->chosen_points;
811
	struct task_struct *victim = p;
812
	struct task_struct *child;
813
	struct task_struct *t;
814
	struct mm_struct *mm;
815
	unsigned int victim_points = 0;
816 817
	static DEFINE_RATELIMIT_STATE(oom_rs, DEFAULT_RATELIMIT_INTERVAL,
					      DEFAULT_RATELIMIT_BURST);
818
	bool can_oom_reap = true;
L
Linus Torvalds 已提交
819

820 821 822 823
	/*
	 * If the task is already exiting, don't alarm the sysadmin or kill
	 * its children or threads, just set TIF_MEMDIE so it can die quickly
	 */
824
	task_lock(p);
825
	if (task_will_free_mem(p)) {
826
		mark_oom_victim(p);
827
		wake_oom_reaper(p);
828
		task_unlock(p);
829
		put_task_struct(p);
830
		return;
831
	}
832
	task_unlock(p);
833

834
	if (__ratelimit(&oom_rs))
835
		dump_header(oc, p);
836

837
	pr_err("%s: Kill process %d (%s) score %u or sacrifice child\n",
838
		message, task_pid_nr(p), p->comm, points);
N
Nick Piggin 已提交
839

840 841
	/*
	 * If any of p's children has a different mm and is eligible for kill,
842
	 * the one with the highest oom_badness() score is sacrificed for its
843 844 845
	 * parent.  This attempts to lose the minimal amount of work done while
	 * still freeing memory.
	 */
846
	read_lock(&tasklist_lock);
847
	for_each_thread(p, t) {
848
		list_for_each_entry(child, &t->children, sibling) {
D
David Rientjes 已提交
849
			unsigned int child_points;
850

851
			if (process_shares_mm(child, p->mm))
852
				continue;
D
David Rientjes 已提交
853 854 855
			/*
			 * oom_badness() returns 0 if the thread is unkillable
			 */
856
			child_points = oom_badness(child,
857
				oc->memcg, oc->nodemask, oc->totalpages);
858
			if (child_points > victim_points) {
859
				put_task_struct(victim);
860 861
				victim = child;
				victim_points = child_points;
862
				get_task_struct(victim);
863
			}
864
		}
865
	}
866
	read_unlock(&tasklist_lock);
867

868 869 870
	p = find_lock_task_mm(victim);
	if (!p) {
		put_task_struct(victim);
871
		return;
872 873 874 875 876
	} else if (victim != p) {
		get_task_struct(p);
		put_task_struct(victim);
		victim = p;
	}
877

878
	/* Get a reference to safely compare mm after task_unlock(victim) */
879
	mm = victim->mm;
880
	atomic_inc(&mm->mm_count);
881 882 883 884 885 886
	/*
	 * We should send SIGKILL before setting TIF_MEMDIE in order to prevent
	 * the OOM victim from depleting the memory reserves from the user
	 * space under its control.
	 */
	do_send_sig_info(SIGKILL, SEND_SIG_FORCED, victim, true);
887
	mark_oom_victim(victim);
888
	pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
889 890
		task_pid_nr(victim), victim->comm, K(victim->mm->total_vm),
		K(get_mm_counter(victim->mm, MM_ANONPAGES)),
891 892
		K(get_mm_counter(victim->mm, MM_FILEPAGES)),
		K(get_mm_counter(victim->mm, MM_SHMEMPAGES)));
893 894 895 896 897 898 899 900 901 902 903
	task_unlock(victim);

	/*
	 * Kill all user processes sharing victim->mm in other thread groups, if
	 * any.  They don't get access to memory reserves, though, to avoid
	 * depletion of all memory.  This prevents mm->mmap_sem livelock when an
	 * oom killed thread cannot exit because it requires the semaphore and
	 * its contended by another thread trying to allocate memory itself.
	 * That thread will now get access to memory reserves since it has a
	 * pending fatal signal.
	 */
904
	rcu_read_lock();
905
	for_each_process(p) {
906
		if (!process_shares_mm(p, mm))
907 908 909
			continue;
		if (same_thread_group(p, victim))
			continue;
910
		if (is_global_init(p)) {
M
Michal Hocko 已提交
911
			can_oom_reap = false;
912
			set_bit(MMF_OOM_SKIP, &mm->flags);
913 914 915
			pr_info("oom killer %d (%s) has mm pinned by %d (%s)\n",
					task_pid_nr(victim), victim->comm,
					task_pid_nr(p), p->comm);
916
			continue;
M
Michal Hocko 已提交
917
		}
918 919 920 921 922 923
		/*
		 * No use_mm() user needs to read from the userspace so we are
		 * ok to reap it.
		 */
		if (unlikely(p->flags & PF_KTHREAD))
			continue;
924 925
		do_send_sig_info(SIGKILL, SEND_SIG_FORCED, p, true);
	}
926
	rcu_read_unlock();
927

M
Michal Hocko 已提交
928
	if (can_oom_reap)
929
		wake_oom_reaper(victim);
M
Michal Hocko 已提交
930

931
	mmdrop(mm);
932
	put_task_struct(victim);
L
Linus Torvalds 已提交
933
}
934
#undef K
L
Linus Torvalds 已提交
935

936 937 938
/*
 * Determines whether the kernel must panic because of the panic_on_oom sysctl.
 */
939 940
static void check_panic_on_oom(struct oom_control *oc,
			       enum oom_constraint constraint)
941 942 943 944 945 946 947 948 949 950 951 952
{
	if (likely(!sysctl_panic_on_oom))
		return;
	if (sysctl_panic_on_oom != 2) {
		/*
		 * panic_on_oom == 1 only affects CONSTRAINT_NONE, the kernel
		 * does not panic for cpuset, mempolicy, or memcg allocation
		 * failures.
		 */
		if (constraint != CONSTRAINT_NONE)
			return;
	}
953
	/* Do not panic for oom kills triggered by sysrq */
954
	if (is_sysrq_oom(oc))
955
		return;
956
	dump_header(oc, NULL);
957 958 959 960
	panic("Out of memory: %s panic_on_oom is enabled\n",
		sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide");
}

961 962 963 964 965 966 967 968 969 970 971 972 973 974
static BLOCKING_NOTIFIER_HEAD(oom_notify_list);

int register_oom_notifier(struct notifier_block *nb)
{
	return blocking_notifier_chain_register(&oom_notify_list, nb);
}
EXPORT_SYMBOL_GPL(register_oom_notifier);

int unregister_oom_notifier(struct notifier_block *nb)
{
	return blocking_notifier_chain_unregister(&oom_notify_list, nb);
}
EXPORT_SYMBOL_GPL(unregister_oom_notifier);

L
Linus Torvalds 已提交
975
/**
976 977
 * out_of_memory - kill the "best" process when we run out of memory
 * @oc: pointer to struct oom_control
L
Linus Torvalds 已提交
978 979 980 981 982 983
 *
 * If we run out of memory, we have the choice between either
 * killing a random task (bad), letting the system crash (worse)
 * OR try to be smart about which process to kill. Note that we
 * don't have to be perfect here, we just have to be good.
 */
984
bool out_of_memory(struct oom_control *oc)
L
Linus Torvalds 已提交
985
{
986
	unsigned long freed = 0;
987
	enum oom_constraint constraint = CONSTRAINT_NONE;
988

989 990 991
	if (oom_killer_disabled)
		return false;

992 993 994 995 996 997
	if (!is_memcg_oom(oc)) {
		blocking_notifier_call_chain(&oom_notify_list, 0, &freed);
		if (freed > 0)
			/* Got some memory back in the last second. */
			return true;
	}
L
Linus Torvalds 已提交
998

999
	/*
1000 1001 1002
	 * If current has a pending SIGKILL or is exiting, then automatically
	 * select it.  The goal is to allow it to allocate so that it may
	 * quickly exit and free its memory.
1003
	 */
1004
	if (task_will_free_mem(current)) {
1005
		mark_oom_victim(current);
1006
		wake_oom_reaper(current);
1007
		return true;
1008 1009
	}

1010 1011 1012 1013 1014 1015 1016 1017 1018
	/*
	 * The OOM killer does not compensate for IO-less reclaim.
	 * pagefault_out_of_memory lost its gfp context so we have to
	 * make sure exclude 0 mask - all other users should have at least
	 * ___GFP_DIRECT_RECLAIM to get here.
	 */
	if (oc->gfp_mask && !(oc->gfp_mask & (__GFP_FS|__GFP_NOFAIL)))
		return true;

1019 1020
	/*
	 * Check if there were limitations on the allocation (only relevant for
1021
	 * NUMA and memcg) that may require different handling.
1022
	 */
1023
	constraint = constrained_alloc(oc);
1024 1025
	if (constraint != CONSTRAINT_MEMORY_POLICY)
		oc->nodemask = NULL;
1026
	check_panic_on_oom(oc, constraint);
1027

1028 1029
	if (!is_memcg_oom(oc) && sysctl_oom_kill_allocating_task &&
	    current->mm && !oom_unkillable_task(current, NULL, oc->nodemask) &&
1030
	    current->signal->oom_score_adj != OOM_SCORE_ADJ_MIN) {
1031
		get_task_struct(current);
1032 1033
		oc->chosen = current;
		oom_kill_process(oc, "Out of memory (oom_kill_allocating_task)");
1034
		return true;
1035 1036
	}

1037
	select_bad_process(oc);
1038
	/* Found nothing?!?! Either we hang forever, or we panic. */
1039
	if (!oc->chosen && !is_sysrq_oom(oc) && !is_memcg_oom(oc)) {
1040
		dump_header(oc, NULL);
1041 1042
		panic("Out of memory and no killable processes...\n");
	}
1043 1044 1045
	if (oc->chosen && oc->chosen != (void *)-1UL) {
		oom_kill_process(oc, !is_memcg_oom(oc) ? "Out of memory" :
				 "Memory cgroup out of memory");
1046 1047 1048 1049
		/*
		 * Give the killed process a good chance to exit before trying
		 * to allocate memory again.
		 */
1050
		schedule_timeout_killable(1);
1051
	}
1052
	return !!oc->chosen;
1053 1054
}

1055 1056
/*
 * The pagefault handler calls here because it is out of memory, so kill a
V
Vladimir Davydov 已提交
1057 1058
 * memory-hogging task. If oom_lock is held by somebody else, a parallel oom
 * killing is already in progress so do nothing.
1059 1060 1061
 */
void pagefault_out_of_memory(void)
{
1062 1063 1064
	struct oom_control oc = {
		.zonelist = NULL,
		.nodemask = NULL,
1065
		.memcg = NULL,
1066 1067 1068 1069
		.gfp_mask = 0,
		.order = 0,
	};

1070
	if (mem_cgroup_oom_synchronize(true))
1071
		return;
1072

1073 1074
	if (!mutex_trylock(&oom_lock))
		return;
1075
	out_of_memory(&oc);
1076
	mutex_unlock(&oom_lock);
1077
}