oom_kill.c 28.7 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6
/*
 *  linux/mm/oom_kill.c
 * 
 *  Copyright (C)  1998,2000  Rik van Riel
 *	Thanks go out to Claus Fischer for some serious inspiration and
 *	for goading me into coding this file...
D
David Rientjes 已提交
7 8
 *  Copyright (C)  2010  Google, Inc.
 *	Rewritten by David Rientjes
L
Linus Torvalds 已提交
9 10
 *
 *  The routines in this file are used to kill a process when
P
Paul Jackson 已提交
11 12
 *  we're seriously out of memory. This gets called from __alloc_pages()
 *  in mm/page_alloc.c when we really run out of memory.
L
Linus Torvalds 已提交
13 14 15 16 17 18 19
 *
 *  Since we won't call these routines often (on a well-configured
 *  machine) this file will double as a 'coding guide' and a signpost
 *  for newbie kernel hackers. It features several pointers to major
 *  kernel subsystems and hints as to where to find out what things do.
 */

20
#include <linux/oom.h>
L
Linus Torvalds 已提交
21
#include <linux/mm.h>
A
Alexey Dobriyan 已提交
22
#include <linux/err.h>
23
#include <linux/gfp.h>
L
Linus Torvalds 已提交
24 25 26 27
#include <linux/sched.h>
#include <linux/swap.h>
#include <linux/timex.h>
#include <linux/jiffies.h>
28
#include <linux/cpuset.h>
29
#include <linux/export.h>
30
#include <linux/notifier.h>
31
#include <linux/memcontrol.h>
32
#include <linux/mempolicy.h>
33
#include <linux/security.h>
34
#include <linux/ptrace.h>
35
#include <linux/freezer.h>
36
#include <linux/ftrace.h>
37
#include <linux/ratelimit.h>
M
Michal Hocko 已提交
38 39 40 41 42
#include <linux/kthread.h>
#include <linux/init.h>

#include <asm/tlb.h>
#include "internal.h"
43 44 45

#define CREATE_TRACE_POINTS
#include <trace/events/oom.h>
L
Linus Torvalds 已提交
46

47
int sysctl_panic_on_oom;
48
int sysctl_oom_kill_allocating_task;
49
int sysctl_oom_dump_tasks = 1;
50 51

DEFINE_MUTEX(oom_lock);
L
Linus Torvalds 已提交
52

53 54 55
#ifdef CONFIG_NUMA
/**
 * has_intersects_mems_allowed() - check task eligiblity for kill
56
 * @start: task struct of which task to consider
57 58 59 60 61
 * @mask: nodemask passed to page allocator for mempolicy ooms
 *
 * Task eligibility is determined by whether or not a candidate task, @tsk,
 * shares the same mempolicy nodes as current if it is bound by such a policy
 * and whether or not it has the same set of allowed cpuset nodes.
62
 */
63
static bool has_intersects_mems_allowed(struct task_struct *start,
64
					const nodemask_t *mask)
65
{
66 67
	struct task_struct *tsk;
	bool ret = false;
68

69
	rcu_read_lock();
70
	for_each_thread(start, tsk) {
71 72 73 74 75 76 77
		if (mask) {
			/*
			 * If this is a mempolicy constrained oom, tsk's
			 * cpuset is irrelevant.  Only return true if its
			 * mempolicy intersects current, otherwise it may be
			 * needlessly killed.
			 */
78
			ret = mempolicy_nodemask_intersects(tsk, mask);
79 80 81 82 83
		} else {
			/*
			 * This is not a mempolicy constrained oom, so only
			 * check the mems of tsk's cpuset.
			 */
84
			ret = cpuset_mems_allowed_intersects(current, tsk);
85
		}
86 87
		if (ret)
			break;
88
	}
89
	rcu_read_unlock();
90

91
	return ret;
92 93 94 95 96 97
}
#else
static bool has_intersects_mems_allowed(struct task_struct *tsk,
					const nodemask_t *mask)
{
	return true;
98
}
99
#endif /* CONFIG_NUMA */
100

101 102 103 104 105 106
/*
 * The process p may have detached its own ->mm while exiting or through
 * use_mm(), but one or more of its subthreads may still have a valid
 * pointer.  Return p, or any of its subthreads with a valid ->mm, with
 * task_lock() held.
 */
107
struct task_struct *find_lock_task_mm(struct task_struct *p)
108
{
109
	struct task_struct *t;
110

111 112
	rcu_read_lock();

113
	for_each_thread(p, t) {
114 115
		task_lock(t);
		if (likely(t->mm))
116
			goto found;
117
		task_unlock(t);
118
	}
119 120 121
	t = NULL;
found:
	rcu_read_unlock();
122

123
	return t;
124 125
}

126 127 128 129 130 131 132 133 134
/*
 * order == -1 means the oom kill is required by sysrq, otherwise only
 * for display purposes.
 */
static inline bool is_sysrq_oom(struct oom_control *oc)
{
	return oc->order == -1;
}

135 136 137 138 139
static inline bool is_memcg_oom(struct oom_control *oc)
{
	return oc->memcg != NULL;
}

140
/* return true if the task is not adequate as candidate victim task. */
141
static bool oom_unkillable_task(struct task_struct *p,
142
		struct mem_cgroup *memcg, const nodemask_t *nodemask)
143 144 145 146 147 148 149
{
	if (is_global_init(p))
		return true;
	if (p->flags & PF_KTHREAD)
		return true;

	/* When mem_cgroup_out_of_memory() and p is not member of the group */
150
	if (memcg && !task_in_mem_cgroup(p, memcg))
151 152 153 154 155 156 157 158 159
		return true;

	/* p may not have freeable memory in nodemask */
	if (!has_intersects_mems_allowed(p, nodemask))
		return true;

	return false;
}

L
Linus Torvalds 已提交
160
/**
D
David Rientjes 已提交
161
 * oom_badness - heuristic function to determine which candidate task to kill
L
Linus Torvalds 已提交
162
 * @p: task struct of which task we should calculate
D
David Rientjes 已提交
163
 * @totalpages: total present RAM allowed for page allocation
L
Linus Torvalds 已提交
164
 *
D
David Rientjes 已提交
165 166 167
 * The heuristic for determining which task to kill is made to be as simple and
 * predictable as possible.  The goal is to return the highest value for the
 * task consuming the most memory to avoid subsequent oom failures.
L
Linus Torvalds 已提交
168
 */
169 170
unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
			  const nodemask_t *nodemask, unsigned long totalpages)
L
Linus Torvalds 已提交
171
{
172
	long points;
173
	long adj;
174

175
	if (oom_unkillable_task(p, memcg, nodemask))
176
		return 0;
L
Linus Torvalds 已提交
177

178 179
	p = find_lock_task_mm(p);
	if (!p)
L
Linus Torvalds 已提交
180 181
		return 0;

182 183
	/*
	 * Do not even consider tasks which are explicitly marked oom
184 185
	 * unkillable or have been already oom reaped or the are in
	 * the middle of vfork
186
	 */
187
	adj = (long)p->signal->oom_score_adj;
188
	if (adj == OOM_SCORE_ADJ_MIN ||
189
			test_bit(MMF_OOM_SKIP, &p->mm->flags) ||
190
			in_vfork(p)) {
191 192 193 194
		task_unlock(p);
		return 0;
	}

L
Linus Torvalds 已提交
195
	/*
D
David Rientjes 已提交
196
	 * The baseline for the badness score is the proportion of RAM that each
197
	 * task's rss, pagetable and swap space use.
L
Linus Torvalds 已提交
198
	 */
199 200
	points = get_mm_rss(p->mm) + get_mm_counter(p->mm, MM_SWAPENTS) +
		atomic_long_read(&p->mm->nr_ptes) + mm_nr_pmds(p->mm);
D
David Rientjes 已提交
201
	task_unlock(p);
L
Linus Torvalds 已提交
202 203

	/*
D
David Rientjes 已提交
204 205
	 * Root processes get 3% bonus, just like the __vm_enough_memory()
	 * implementation used by LSMs.
L
Linus Torvalds 已提交
206
	 */
D
David Rientjes 已提交
207
	if (has_capability_noaudit(p, CAP_SYS_ADMIN))
208
		points -= (points * 3) / 100;
L
Linus Torvalds 已提交
209

210 211 212
	/* Normalize to oom_score_adj units */
	adj *= totalpages / 1000;
	points += adj;
L
Linus Torvalds 已提交
213

214
	/*
215 216
	 * Never return 0 for an eligible task regardless of the root bonus and
	 * oom_score_adj (oom_score_adj can't be OOM_SCORE_ADJ_MIN here).
217
	 */
218
	return points > 0 ? points : 1;
L
Linus Torvalds 已提交
219 220
}

221 222 223 224 225 226 227
enum oom_constraint {
	CONSTRAINT_NONE,
	CONSTRAINT_CPUSET,
	CONSTRAINT_MEMORY_POLICY,
	CONSTRAINT_MEMCG,
};

228 229 230
/*
 * Determine the type of allocation constraint.
 */
231
static enum oom_constraint constrained_alloc(struct oom_control *oc)
232
{
233
	struct zone *zone;
234
	struct zoneref *z;
235
	enum zone_type high_zoneidx = gfp_zone(oc->gfp_mask);
D
David Rientjes 已提交
236 237
	bool cpuset_limited = false;
	int nid;
238

239 240 241 242 243
	if (is_memcg_oom(oc)) {
		oc->totalpages = mem_cgroup_get_limit(oc->memcg) ?: 1;
		return CONSTRAINT_MEMCG;
	}

D
David Rientjes 已提交
244
	/* Default to all available memory */
245 246 247 248
	oc->totalpages = totalram_pages + total_swap_pages;

	if (!IS_ENABLED(CONFIG_NUMA))
		return CONSTRAINT_NONE;
D
David Rientjes 已提交
249

250
	if (!oc->zonelist)
D
David Rientjes 已提交
251
		return CONSTRAINT_NONE;
252 253 254 255 256
	/*
	 * Reach here only when __GFP_NOFAIL is used. So, we should avoid
	 * to kill current.We have to random task kill in this case.
	 * Hopefully, CONSTRAINT_THISNODE...but no way to handle it, now.
	 */
257
	if (oc->gfp_mask & __GFP_THISNODE)
258
		return CONSTRAINT_NONE;
259

260
	/*
D
David Rientjes 已提交
261 262 263
	 * This is not a __GFP_THISNODE allocation, so a truncated nodemask in
	 * the page allocator means a mempolicy is in effect.  Cpuset policy
	 * is enforced in get_page_from_freelist().
264
	 */
265 266
	if (oc->nodemask &&
	    !nodes_subset(node_states[N_MEMORY], *oc->nodemask)) {
267
		oc->totalpages = total_swap_pages;
268
		for_each_node_mask(nid, *oc->nodemask)
269
			oc->totalpages += node_spanned_pages(nid);
270
		return CONSTRAINT_MEMORY_POLICY;
D
David Rientjes 已提交
271
	}
272 273

	/* Check this allocation failure is caused by cpuset's wall function */
274 275 276
	for_each_zone_zonelist_nodemask(zone, z, oc->zonelist,
			high_zoneidx, oc->nodemask)
		if (!cpuset_zone_allowed(zone, oc->gfp_mask))
D
David Rientjes 已提交
277
			cpuset_limited = true;
278

D
David Rientjes 已提交
279
	if (cpuset_limited) {
280
		oc->totalpages = total_swap_pages;
D
David Rientjes 已提交
281
		for_each_node_mask(nid, cpuset_current_mems_allowed)
282
			oc->totalpages += node_spanned_pages(nid);
D
David Rientjes 已提交
283 284
		return CONSTRAINT_CPUSET;
	}
285 286 287
	return CONSTRAINT_NONE;
}

288
static int oom_evaluate_task(struct task_struct *task, void *arg)
289
{
290 291 292
	struct oom_control *oc = arg;
	unsigned long points;

293
	if (oom_unkillable_task(task, NULL, oc->nodemask))
294
		goto next;
295 296 297

	/*
	 * This task already has access to memory reserves and is being killed.
298
	 * Don't allow any other task to have access to the reserves unless
299
	 * the task has MMF_OOM_SKIP because chances that it would release
300
	 * any memory is quite low.
301
	 */
302 303
	if (!is_sysrq_oom(oc) && tsk_is_oom_victim(task)) {
		if (test_bit(MMF_OOM_SKIP, &task->signal->oom_mm->flags))
304 305
			goto next;
		goto abort;
306
	}
307

308 309 310 311
	/*
	 * If task is allocating a lot of memory and has been marked to be
	 * killed first if it triggers an oom, then select it.
	 */
312 313 314 315
	if (oom_task_origin(task)) {
		points = ULONG_MAX;
		goto select;
	}
316

317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336
	points = oom_badness(task, NULL, oc->nodemask, oc->totalpages);
	if (!points || points < oc->chosen_points)
		goto next;

	/* Prefer thread group leaders for display purposes */
	if (points == oc->chosen_points && thread_group_leader(oc->chosen))
		goto next;
select:
	if (oc->chosen)
		put_task_struct(oc->chosen);
	get_task_struct(task);
	oc->chosen = task;
	oc->chosen_points = points;
next:
	return 0;
abort:
	if (oc->chosen)
		put_task_struct(oc->chosen);
	oc->chosen = (void *)-1UL;
	return 1;
337 338
}

L
Linus Torvalds 已提交
339
/*
340 341
 * Simple selection loop. We choose the process with the highest number of
 * 'points'. In case scan was aborted, oc->chosen is set to -1.
L
Linus Torvalds 已提交
342
 */
343
static void select_bad_process(struct oom_control *oc)
L
Linus Torvalds 已提交
344
{
345 346 347 348
	if (is_memcg_oom(oc))
		mem_cgroup_scan_tasks(oc->memcg, oom_evaluate_task, oc);
	else {
		struct task_struct *p;
349

350 351 352 353 354
		rcu_read_lock();
		for_each_process(p)
			if (oom_evaluate_task(p, oc))
				break;
		rcu_read_unlock();
355
	}
356

357
	oc->chosen_points = oc->chosen_points * 1000 / oc->totalpages;
L
Linus Torvalds 已提交
358 359
}

360
/**
R
Randy Dunlap 已提交
361
 * dump_tasks - dump current memory state of all system tasks
W
Wanpeng Li 已提交
362
 * @memcg: current's memory controller, if constrained
363
 * @nodemask: nodemask passed to page allocator for mempolicy ooms
R
Randy Dunlap 已提交
364
 *
365 366 367
 * Dumps the current memory state of all eligible tasks.  Tasks not in the same
 * memcg, not in the same cpuset, or bound to a disjoint set of mempolicy nodes
 * are not shown.
368 369
 * State information includes task's pid, uid, tgid, vm size, rss, nr_ptes,
 * swapents, oom_score_adj value, and name.
370
 */
371
static void dump_tasks(struct mem_cgroup *memcg, const nodemask_t *nodemask)
372
{
373 374
	struct task_struct *p;
	struct task_struct *task;
375

376
	pr_info("[ pid ]   uid  tgid total_vm      rss nr_ptes nr_pmds swapents oom_score_adj name\n");
377
	rcu_read_lock();
378
	for_each_process(p) {
379
		if (oom_unkillable_task(p, memcg, nodemask))
380
			continue;
381

382 383
		task = find_lock_task_mm(p);
		if (!task) {
384
			/*
385 386
			 * This is a kthread or all of p's threads have already
			 * detached their mm's.  There's no need to report
387
			 * them; they can't be oom killed anyway.
388 389 390
			 */
			continue;
		}
391

392
		pr_info("[%5d] %5d %5d %8lu %8lu %7ld %7ld %8lu         %5hd %s\n",
393 394
			task->pid, from_kuid(&init_user_ns, task_uid(task)),
			task->tgid, task->mm->total_vm, get_mm_rss(task->mm),
395
			atomic_long_read(&task->mm->nr_ptes),
396
			mm_nr_pmds(task->mm),
397
			get_mm_counter(task->mm, MM_SWAPENTS),
D
David Rientjes 已提交
398
			task->signal->oom_score_adj, task->comm);
399 400
		task_unlock(task);
	}
401
	rcu_read_unlock();
402 403
}

404
static void dump_header(struct oom_control *oc, struct task_struct *p)
405
{
J
Joe Perches 已提交
406
	pr_warn("%s invoked oom-killer: gfp_mask=%#x(%pGg), order=%d, oom_score_adj=%hd\n",
407
		current->comm, oc->gfp_mask, &oc->gfp_mask, oc->order,
D
David Rientjes 已提交
408
		current->signal->oom_score_adj);
409 410
	if (!IS_ENABLED(CONFIG_COMPACTION) && oc->order)
		pr_warn("COMPACTION is disabled!!!\n");
411

412
	cpuset_print_current_mems_allowed();
413
	dump_stack();
414 415
	if (oc->memcg)
		mem_cgroup_print_oom_info(oc->memcg, p);
416 417
	else
		show_mem(SHOW_MEM_FILTER_NODES);
418
	if (sysctl_oom_dump_tasks)
419
		dump_tasks(oc->memcg, oc->nodemask);
420 421
}

422
/*
423
 * Number of OOM victims in flight
424
 */
425 426
static atomic_t oom_victims = ATOMIC_INIT(0);
static DECLARE_WAIT_QUEUE_HEAD(oom_victims_wait);
427

428
static bool oom_killer_disabled __read_mostly;
429

430 431
#define K(x) ((x) << (PAGE_SHIFT-10))

432 433 434 435 436 437
/*
 * task->mm can be NULL if the task is the exited group leader.  So to
 * determine whether the task is using a particular mm, we examine all the
 * task's threads: if one of those is using this mm then this task was also
 * using it.
 */
438
bool process_shares_mm(struct task_struct *p, struct mm_struct *mm)
439 440 441 442 443 444 445 446 447 448 449 450
{
	struct task_struct *t;

	for_each_thread(p, t) {
		struct mm_struct *t_mm = READ_ONCE(t->mm);
		if (t_mm)
			return t_mm == mm;
	}
	return false;
}


M
Michal Hocko 已提交
451 452 453 454 455 456 457
#ifdef CONFIG_MMU
/*
 * OOM Reaper kernel thread which tries to reap the memory used by the OOM
 * victim (if that is possible) to help the OOM killer to move on.
 */
static struct task_struct *oom_reaper_th;
static DECLARE_WAIT_QUEUE_HEAD(oom_reaper_wait);
458
static struct task_struct *oom_reaper_list;
459 460
static DEFINE_SPINLOCK(oom_reaper_lock);

461
static bool __oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
M
Michal Hocko 已提交
462 463 464 465 466 467 468
{
	struct mmu_gather tlb;
	struct vm_area_struct *vma;
	struct zap_details details = {.check_swap_entries = true,
				      .ignore_dirty = true};
	bool ret = true;

469 470 471
	/*
	 * We have to make sure to not race with the victim exit path
	 * and cause premature new oom victim selection:
472
	 * __oom_reap_task_mm		exit_mm
473
	 *   mmget_not_zero
474 475 476 477 478 479 480 481 482 483 484
	 *				  mmput
	 *				    atomic_dec_and_test
	 *				  exit_oom_victim
	 *				[...]
	 *				out_of_memory
	 *				  select_bad_process
	 *				    # no TIF_MEMDIE task selects new victim
	 *  unmap_page_range # frees some memory
	 */
	mutex_lock(&oom_lock);

M
Michal Hocko 已提交
485 486
	if (!down_read_trylock(&mm->mmap_sem)) {
		ret = false;
487
		goto unlock_oom;
488 489 490 491 492 493 494 495 496
	}

	/*
	 * increase mm_users only after we know we will reap something so
	 * that the mmput_async is called only when we have reaped something
	 * and delayed __mmput doesn't matter that much
	 */
	if (!mmget_not_zero(mm)) {
		up_read(&mm->mmap_sem);
497
		goto unlock_oom;
M
Michal Hocko 已提交
498 499
	}

500 501 502 503 504 505 506 507
	/*
	 * Tell all users of get_user/copy_from_user etc... that the content
	 * is no longer stable. No barriers really needed because unmapping
	 * should imply barriers already and the reader would hit a page fault
	 * if it stumbled over a reaped memory.
	 */
	set_bit(MMF_UNSTABLE, &mm->flags);

M
Michal Hocko 已提交
508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534
	tlb_gather_mmu(&tlb, mm, 0, -1);
	for (vma = mm->mmap ; vma; vma = vma->vm_next) {
		if (is_vm_hugetlb_page(vma))
			continue;

		/*
		 * mlocked VMAs require explicit munlocking before unmap.
		 * Let's keep it simple here and skip such VMAs.
		 */
		if (vma->vm_flags & VM_LOCKED)
			continue;

		/*
		 * Only anonymous pages have a good chance to be dropped
		 * without additional steps which we cannot afford as we
		 * are OOM already.
		 *
		 * We do not even care about fs backed pages because all
		 * which are reclaimable have already been reclaimed and
		 * we do not want to block exit_mmap by keeping mm ref
		 * count elevated without a good reason.
		 */
		if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED))
			unmap_page_range(&tlb, vma, vma->vm_start, vma->vm_end,
					 &details);
	}
	tlb_finish_mmu(&tlb, 0, -1);
535 536 537 538 539
	pr_info("oom_reaper: reaped process %d (%s), now anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
			task_pid_nr(tsk), tsk->comm,
			K(get_mm_counter(mm, MM_ANONPAGES)),
			K(get_mm_counter(mm, MM_FILEPAGES)),
			K(get_mm_counter(mm, MM_SHMEMPAGES)));
M
Michal Hocko 已提交
540
	up_read(&mm->mmap_sem);
541

542 543 544 545 546
	/*
	 * Drop our reference but make sure the mmput slow path is called from a
	 * different context because we shouldn't risk we get stuck there and
	 * put the oom_reaper out of the way.
	 */
547 548 549
	mmput_async(mm);
unlock_oom:
	mutex_unlock(&oom_lock);
M
Michal Hocko 已提交
550 551 552
	return ret;
}

553
#define MAX_OOM_REAP_RETRIES 10
554
static void oom_reap_task(struct task_struct *tsk)
M
Michal Hocko 已提交
555 556
{
	int attempts = 0;
557
	struct mm_struct *mm = tsk->signal->oom_mm;
M
Michal Hocko 已提交
558 559

	/* Retry the down_read_trylock(mmap_sem) a few times */
560
	while (attempts++ < MAX_OOM_REAP_RETRIES && !__oom_reap_task_mm(tsk, mm))
M
Michal Hocko 已提交
561 562
		schedule_timeout_idle(HZ/10);

563 564
	if (attempts <= MAX_OOM_REAP_RETRIES)
		goto done;
565

566

567 568 569
	pr_info("oom_reaper: unable to reap pid:%d (%s)\n",
		task_pid_nr(tsk), tsk->comm);
	debug_show_all_locks();
570

571
done:
572 573
	tsk->oom_reaper_list = NULL;

574 575 576 577
	/*
	 * Hide this mm from OOM killer because it has been either reaped or
	 * somebody can't call up_write(mmap_sem).
	 */
578
	set_bit(MMF_OOM_SKIP, &mm->flags);
579

M
Michal Hocko 已提交
580
	/* Drop a reference taken by wake_oom_reaper */
581
	put_task_struct(tsk);
M
Michal Hocko 已提交
582 583 584 585 586
}

static int oom_reaper(void *unused)
{
	while (true) {
587
		struct task_struct *tsk = NULL;
M
Michal Hocko 已提交
588

589
		wait_event_freezable(oom_reaper_wait, oom_reaper_list != NULL);
590
		spin_lock(&oom_reaper_lock);
591 592 593
		if (oom_reaper_list != NULL) {
			tsk = oom_reaper_list;
			oom_reaper_list = tsk->oom_reaper_list;
594 595 596 597 598
		}
		spin_unlock(&oom_reaper_lock);

		if (tsk)
			oom_reap_task(tsk);
M
Michal Hocko 已提交
599 600 601 602 603
	}

	return 0;
}

604
static void wake_oom_reaper(struct task_struct *tsk)
M
Michal Hocko 已提交
605
{
606 607 608 609 610
	if (!oom_reaper_th)
		return;

	/* tsk is already queued? */
	if (tsk == oom_reaper_list || tsk->oom_reaper_list)
M
Michal Hocko 已提交
611 612
		return;

613
	get_task_struct(tsk);
M
Michal Hocko 已提交
614

615
	spin_lock(&oom_reaper_lock);
616 617
	tsk->oom_reaper_list = oom_reaper_list;
	oom_reaper_list = tsk;
618 619
	spin_unlock(&oom_reaper_lock);
	wake_up(&oom_reaper_wait);
M
Michal Hocko 已提交
620 621 622 623 624 625 626 627 628 629 630 631 632
}

static int __init oom_init(void)
{
	oom_reaper_th = kthread_run(oom_reaper, NULL, "oom_reaper");
	if (IS_ERR(oom_reaper_th)) {
		pr_err("Unable to start OOM reaper %ld. Continuing regardless\n",
				PTR_ERR(oom_reaper_th));
		oom_reaper_th = NULL;
	}
	return 0;
}
subsys_initcall(oom_init)
633 634 635 636 637
#else
static inline void wake_oom_reaper(struct task_struct *tsk)
{
}
#endif /* CONFIG_MMU */
M
Michal Hocko 已提交
638

639
/**
640
 * mark_oom_victim - mark the given task as OOM victim
641
 * @tsk: task to mark
642
 *
643
 * Has to be called with oom_lock held and never after
644
 * oom has been disabled already.
645 646 647
 *
 * tsk->mm has to be non NULL and caller has to guarantee it is stable (either
 * under task_lock or operate on the current).
648
 */
649
static void mark_oom_victim(struct task_struct *tsk)
650
{
651 652
	struct mm_struct *mm = tsk->mm;

653 654 655 656
	WARN_ON(oom_killer_disabled);
	/* OOM killer might race with memcg OOM */
	if (test_and_set_tsk_thread_flag(tsk, TIF_MEMDIE))
		return;
657 658 659 660 661

	/* oom_mm is bound to the signal struct life time. */
	if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm))
		atomic_inc(&tsk->signal->oom_mm->mm_count);

662 663 664 665 666 667 668
	/*
	 * Make sure that the task is woken up from uninterruptible sleep
	 * if it is frozen because OOM killer wouldn't be able to free
	 * any memory and livelock. freezing_slow_path will tell the freezer
	 * that TIF_MEMDIE tasks should be ignored.
	 */
	__thaw_task(tsk);
669
	atomic_inc(&oom_victims);
670 671 672
}

/**
673
 * exit_oom_victim - note the exit of an OOM victim
674
 */
675
void exit_oom_victim(void)
676
{
677
	clear_thread_flag(TIF_MEMDIE);
678

679
	if (!atomic_dec_return(&oom_victims))
680 681 682
		wake_up_all(&oom_victims_wait);
}

683 684 685 686 687 688 689 690
/**
 * oom_killer_enable - enable OOM killer
 */
void oom_killer_enable(void)
{
	oom_killer_disabled = false;
}

691 692
/**
 * oom_killer_disable - disable OOM killer
693
 * @timeout: maximum timeout to wait for oom victims in jiffies
694 695
 *
 * Forces all page allocations to fail rather than trigger OOM killer.
696 697
 * Will block and wait until all OOM victims are killed or the given
 * timeout expires.
698 699 700 701 702 703 704 705
 *
 * The function cannot be called when there are runnable user tasks because
 * the userspace would see unexpected allocation failures as a result. Any
 * new usage of this function should be consulted with MM people.
 *
 * Returns true if successful and false if the OOM killer cannot be
 * disabled.
 */
706
bool oom_killer_disable(signed long timeout)
707
{
708 709
	signed long ret;

710
	/*
711 712
	 * Make sure to not race with an ongoing OOM killer. Check that the
	 * current is not killed (possibly due to sharing the victim's memory).
713
	 */
714
	if (mutex_lock_killable(&oom_lock))
715 716
		return false;
	oom_killer_disabled = true;
717
	mutex_unlock(&oom_lock);
718

719 720 721 722 723 724
	ret = wait_event_interruptible_timeout(oom_victims_wait,
			!atomic_read(&oom_victims), timeout);
	if (ret <= 0) {
		oom_killer_enable();
		return false;
	}
725 726 727 728

	return true;
}

729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753
static inline bool __task_will_free_mem(struct task_struct *task)
{
	struct signal_struct *sig = task->signal;

	/*
	 * A coredumping process may sleep for an extended period in exit_mm(),
	 * so the oom killer cannot assume that the process will promptly exit
	 * and release memory.
	 */
	if (sig->flags & SIGNAL_GROUP_COREDUMP)
		return false;

	if (sig->flags & SIGNAL_GROUP_EXIT)
		return true;

	if (thread_group_empty(task) && (task->flags & PF_EXITING))
		return true;

	return false;
}

/*
 * Checks whether the given task is dying or exiting and likely to
 * release its address space. This means that all threads and processes
 * sharing the same mm have to be killed or exiting.
754 755
 * Caller has to make sure that task->mm is stable (hold task_lock or
 * it operates on the current).
756
 */
757
static bool task_will_free_mem(struct task_struct *task)
758
{
759
	struct mm_struct *mm = task->mm;
760
	struct task_struct *p;
761
	bool ret = true;
762 763

	/*
764 765 766
	 * Skip tasks without mm because it might have passed its exit_mm and
	 * exit_oom_victim. oom_reaper could have rescued that but do not rely
	 * on that for now. We can consider find_lock_task_mm in future.
767
	 */
768
	if (!mm)
769 770
		return false;

771 772
	if (!__task_will_free_mem(task))
		return false;
773 774 775 776 777

	/*
	 * This task has already been drained by the oom reaper so there are
	 * only small chances it will free some more
	 */
778
	if (test_bit(MMF_OOM_SKIP, &mm->flags))
779 780
		return false;

781
	if (atomic_read(&mm->mm_users) <= 1)
782 783 784
		return true;

	/*
785 786 787
	 * Make sure that all tasks which share the mm with the given tasks
	 * are dying as well to make sure that a) nobody pins its mm and
	 * b) the task is also reapable by the oom reaper.
788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803
	 */
	rcu_read_lock();
	for_each_process(p) {
		if (!process_shares_mm(p, mm))
			continue;
		if (same_thread_group(task, p))
			continue;
		ret = __task_will_free_mem(p);
		if (!ret)
			break;
	}
	rcu_read_unlock();

	return ret;
}

804
static void oom_kill_process(struct oom_control *oc, const char *message)
L
Linus Torvalds 已提交
805
{
806 807
	struct task_struct *p = oc->chosen;
	unsigned int points = oc->chosen_points;
808
	struct task_struct *victim = p;
809
	struct task_struct *child;
810
	struct task_struct *t;
811
	struct mm_struct *mm;
812
	unsigned int victim_points = 0;
813 814
	static DEFINE_RATELIMIT_STATE(oom_rs, DEFAULT_RATELIMIT_INTERVAL,
					      DEFAULT_RATELIMIT_BURST);
815
	bool can_oom_reap = true;
L
Linus Torvalds 已提交
816

817 818 819 820
	/*
	 * If the task is already exiting, don't alarm the sysadmin or kill
	 * its children or threads, just set TIF_MEMDIE so it can die quickly
	 */
821
	task_lock(p);
822
	if (task_will_free_mem(p)) {
823
		mark_oom_victim(p);
824
		wake_oom_reaper(p);
825
		task_unlock(p);
826
		put_task_struct(p);
827
		return;
828
	}
829
	task_unlock(p);
830

831
	if (__ratelimit(&oom_rs))
832
		dump_header(oc, p);
833

834
	pr_err("%s: Kill process %d (%s) score %u or sacrifice child\n",
835
		message, task_pid_nr(p), p->comm, points);
N
Nick Piggin 已提交
836

837 838
	/*
	 * If any of p's children has a different mm and is eligible for kill,
839
	 * the one with the highest oom_badness() score is sacrificed for its
840 841 842
	 * parent.  This attempts to lose the minimal amount of work done while
	 * still freeing memory.
	 */
843
	read_lock(&tasklist_lock);
844
	for_each_thread(p, t) {
845
		list_for_each_entry(child, &t->children, sibling) {
D
David Rientjes 已提交
846
			unsigned int child_points;
847

848
			if (process_shares_mm(child, p->mm))
849
				continue;
D
David Rientjes 已提交
850 851 852
			/*
			 * oom_badness() returns 0 if the thread is unkillable
			 */
853
			child_points = oom_badness(child,
854
				oc->memcg, oc->nodemask, oc->totalpages);
855
			if (child_points > victim_points) {
856
				put_task_struct(victim);
857 858
				victim = child;
				victim_points = child_points;
859
				get_task_struct(victim);
860
			}
861
		}
862
	}
863
	read_unlock(&tasklist_lock);
864

865 866 867
	p = find_lock_task_mm(victim);
	if (!p) {
		put_task_struct(victim);
868
		return;
869 870 871 872 873
	} else if (victim != p) {
		get_task_struct(p);
		put_task_struct(victim);
		victim = p;
	}
874

875
	/* Get a reference to safely compare mm after task_unlock(victim) */
876
	mm = victim->mm;
877
	atomic_inc(&mm->mm_count);
878 879 880 881 882 883
	/*
	 * We should send SIGKILL before setting TIF_MEMDIE in order to prevent
	 * the OOM victim from depleting the memory reserves from the user
	 * space under its control.
	 */
	do_send_sig_info(SIGKILL, SEND_SIG_FORCED, victim, true);
884
	mark_oom_victim(victim);
885
	pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
886 887
		task_pid_nr(victim), victim->comm, K(victim->mm->total_vm),
		K(get_mm_counter(victim->mm, MM_ANONPAGES)),
888 889
		K(get_mm_counter(victim->mm, MM_FILEPAGES)),
		K(get_mm_counter(victim->mm, MM_SHMEMPAGES)));
890 891 892 893 894 895 896 897 898 899 900
	task_unlock(victim);

	/*
	 * Kill all user processes sharing victim->mm in other thread groups, if
	 * any.  They don't get access to memory reserves, though, to avoid
	 * depletion of all memory.  This prevents mm->mmap_sem livelock when an
	 * oom killed thread cannot exit because it requires the semaphore and
	 * its contended by another thread trying to allocate memory itself.
	 * That thread will now get access to memory reserves since it has a
	 * pending fatal signal.
	 */
901
	rcu_read_lock();
902
	for_each_process(p) {
903
		if (!process_shares_mm(p, mm))
904 905 906
			continue;
		if (same_thread_group(p, victim))
			continue;
907
		if (is_global_init(p)) {
M
Michal Hocko 已提交
908
			can_oom_reap = false;
909
			set_bit(MMF_OOM_SKIP, &mm->flags);
910 911 912
			pr_info("oom killer %d (%s) has mm pinned by %d (%s)\n",
					task_pid_nr(victim), victim->comm,
					task_pid_nr(p), p->comm);
913
			continue;
M
Michal Hocko 已提交
914
		}
915 916 917 918 919 920
		/*
		 * No use_mm() user needs to read from the userspace so we are
		 * ok to reap it.
		 */
		if (unlikely(p->flags & PF_KTHREAD))
			continue;
921 922
		do_send_sig_info(SIGKILL, SEND_SIG_FORCED, p, true);
	}
923
	rcu_read_unlock();
924

M
Michal Hocko 已提交
925
	if (can_oom_reap)
926
		wake_oom_reaper(victim);
M
Michal Hocko 已提交
927

928
	mmdrop(mm);
929
	put_task_struct(victim);
L
Linus Torvalds 已提交
930
}
931
#undef K
L
Linus Torvalds 已提交
932

933 934 935
/*
 * Determines whether the kernel must panic because of the panic_on_oom sysctl.
 */
936 937
static void check_panic_on_oom(struct oom_control *oc,
			       enum oom_constraint constraint)
938 939 940 941 942 943 944 945 946 947 948 949
{
	if (likely(!sysctl_panic_on_oom))
		return;
	if (sysctl_panic_on_oom != 2) {
		/*
		 * panic_on_oom == 1 only affects CONSTRAINT_NONE, the kernel
		 * does not panic for cpuset, mempolicy, or memcg allocation
		 * failures.
		 */
		if (constraint != CONSTRAINT_NONE)
			return;
	}
950
	/* Do not panic for oom kills triggered by sysrq */
951
	if (is_sysrq_oom(oc))
952
		return;
953
	dump_header(oc, NULL);
954 955 956 957
	panic("Out of memory: %s panic_on_oom is enabled\n",
		sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide");
}

958 959 960 961 962 963 964 965 966 967 968 969 970 971
static BLOCKING_NOTIFIER_HEAD(oom_notify_list);

int register_oom_notifier(struct notifier_block *nb)
{
	return blocking_notifier_chain_register(&oom_notify_list, nb);
}
EXPORT_SYMBOL_GPL(register_oom_notifier);

int unregister_oom_notifier(struct notifier_block *nb)
{
	return blocking_notifier_chain_unregister(&oom_notify_list, nb);
}
EXPORT_SYMBOL_GPL(unregister_oom_notifier);

L
Linus Torvalds 已提交
972
/**
973 974
 * out_of_memory - kill the "best" process when we run out of memory
 * @oc: pointer to struct oom_control
L
Linus Torvalds 已提交
975 976 977 978 979 980
 *
 * If we run out of memory, we have the choice between either
 * killing a random task (bad), letting the system crash (worse)
 * OR try to be smart about which process to kill. Note that we
 * don't have to be perfect here, we just have to be good.
 */
981
bool out_of_memory(struct oom_control *oc)
L
Linus Torvalds 已提交
982
{
983
	unsigned long freed = 0;
984
	enum oom_constraint constraint = CONSTRAINT_NONE;
985

986 987 988
	if (oom_killer_disabled)
		return false;

989 990 991 992 993 994
	if (!is_memcg_oom(oc)) {
		blocking_notifier_call_chain(&oom_notify_list, 0, &freed);
		if (freed > 0)
			/* Got some memory back in the last second. */
			return true;
	}
L
Linus Torvalds 已提交
995

996
	/*
997 998 999
	 * If current has a pending SIGKILL or is exiting, then automatically
	 * select it.  The goal is to allow it to allocate so that it may
	 * quickly exit and free its memory.
1000
	 */
1001
	if (task_will_free_mem(current)) {
1002
		mark_oom_victim(current);
1003
		wake_oom_reaper(current);
1004
		return true;
1005 1006
	}

1007 1008 1009 1010 1011 1012 1013 1014 1015
	/*
	 * The OOM killer does not compensate for IO-less reclaim.
	 * pagefault_out_of_memory lost its gfp context so we have to
	 * make sure exclude 0 mask - all other users should have at least
	 * ___GFP_DIRECT_RECLAIM to get here.
	 */
	if (oc->gfp_mask && !(oc->gfp_mask & (__GFP_FS|__GFP_NOFAIL)))
		return true;

1016 1017
	/*
	 * Check if there were limitations on the allocation (only relevant for
1018
	 * NUMA and memcg) that may require different handling.
1019
	 */
1020
	constraint = constrained_alloc(oc);
1021 1022
	if (constraint != CONSTRAINT_MEMORY_POLICY)
		oc->nodemask = NULL;
1023
	check_panic_on_oom(oc, constraint);
1024

1025 1026
	if (!is_memcg_oom(oc) && sysctl_oom_kill_allocating_task &&
	    current->mm && !oom_unkillable_task(current, NULL, oc->nodemask) &&
1027
	    current->signal->oom_score_adj != OOM_SCORE_ADJ_MIN) {
1028
		get_task_struct(current);
1029 1030
		oc->chosen = current;
		oom_kill_process(oc, "Out of memory (oom_kill_allocating_task)");
1031
		return true;
1032 1033
	}

1034
	select_bad_process(oc);
1035
	/* Found nothing?!?! Either we hang forever, or we panic. */
1036
	if (!oc->chosen && !is_sysrq_oom(oc) && !is_memcg_oom(oc)) {
1037
		dump_header(oc, NULL);
1038 1039
		panic("Out of memory and no killable processes...\n");
	}
1040 1041 1042
	if (oc->chosen && oc->chosen != (void *)-1UL) {
		oom_kill_process(oc, !is_memcg_oom(oc) ? "Out of memory" :
				 "Memory cgroup out of memory");
1043 1044 1045 1046
		/*
		 * Give the killed process a good chance to exit before trying
		 * to allocate memory again.
		 */
1047
		schedule_timeout_killable(1);
1048
	}
1049
	return !!oc->chosen;
1050 1051
}

1052 1053
/*
 * The pagefault handler calls here because it is out of memory, so kill a
V
Vladimir Davydov 已提交
1054 1055
 * memory-hogging task. If oom_lock is held by somebody else, a parallel oom
 * killing is already in progress so do nothing.
1056 1057 1058
 */
void pagefault_out_of_memory(void)
{
1059 1060 1061
	struct oom_control oc = {
		.zonelist = NULL,
		.nodemask = NULL,
1062
		.memcg = NULL,
1063 1064 1065 1066
		.gfp_mask = 0,
		.order = 0,
	};

1067
	if (mem_cgroup_oom_synchronize(true))
1068
		return;
1069

1070 1071
	if (!mutex_trylock(&oom_lock))
		return;
1072

1073
	if (!out_of_memory(&oc)) {
1074 1075 1076 1077 1078 1079 1080
		/*
		 * There shouldn't be any user tasks runnable while the
		 * OOM killer is disabled, so the current task has to
		 * be a racing OOM victim for which oom_killer_disable()
		 * is waiting for.
		 */
		WARN_ON(test_thread_flag(TIF_MEMDIE));
1081
	}
1082 1083

	mutex_unlock(&oom_lock);
1084
}