oom_kill.c 20.9 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8
/*
 *  linux/mm/oom_kill.c
 * 
 *  Copyright (C)  1998,2000  Rik van Riel
 *	Thanks go out to Claus Fischer for some serious inspiration and
 *	for goading me into coding this file...
 *
 *  The routines in this file are used to kill a process when
P
Paul Jackson 已提交
9 10
 *  we're seriously out of memory. This gets called from __alloc_pages()
 *  in mm/page_alloc.c when we really run out of memory.
L
Linus Torvalds 已提交
11 12 13 14 15 16 17
 *
 *  Since we won't call these routines often (on a well-configured
 *  machine) this file will double as a 'coding guide' and a signpost
 *  for newbie kernel hackers. It features several pointers to major
 *  kernel subsystems and hints as to where to find out what things do.
 */

18
#include <linux/oom.h>
L
Linus Torvalds 已提交
19
#include <linux/mm.h>
A
Alexey Dobriyan 已提交
20
#include <linux/err.h>
21
#include <linux/gfp.h>
L
Linus Torvalds 已提交
22 23 24 25
#include <linux/sched.h>
#include <linux/swap.h>
#include <linux/timex.h>
#include <linux/jiffies.h>
26
#include <linux/cpuset.h>
27 28
#include <linux/module.h>
#include <linux/notifier.h>
29
#include <linux/memcontrol.h>
30
#include <linux/mempolicy.h>
31
#include <linux/security.h>
L
Linus Torvalds 已提交
32

33
int sysctl_panic_on_oom;
34
int sysctl_oom_kill_allocating_task;
35
int sysctl_oom_dump_tasks = 1;
D
David Rientjes 已提交
36
static DEFINE_SPINLOCK(zone_scan_lock);
L
Linus Torvalds 已提交
37 38
/* #define DEBUG */

39 40 41 42 43 44 45 46 47
#ifdef CONFIG_NUMA
/**
 * has_intersects_mems_allowed() - check task eligiblity for kill
 * @tsk: task struct of which task to consider
 * @mask: nodemask passed to page allocator for mempolicy ooms
 *
 * Task eligibility is determined by whether or not a candidate task, @tsk,
 * shares the same mempolicy nodes as current if it is bound by such a policy
 * and whether or not it has the same set of allowed cpuset nodes.
48
 */
49 50
static bool has_intersects_mems_allowed(struct task_struct *tsk,
					const nodemask_t *mask)
51
{
52
	struct task_struct *start = tsk;
53 54

	do {
55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71
		if (mask) {
			/*
			 * If this is a mempolicy constrained oom, tsk's
			 * cpuset is irrelevant.  Only return true if its
			 * mempolicy intersects current, otherwise it may be
			 * needlessly killed.
			 */
			if (mempolicy_nodemask_intersects(tsk, mask))
				return true;
		} else {
			/*
			 * This is not a mempolicy constrained oom, so only
			 * check the mems of tsk's cpuset.
			 */
			if (cpuset_mems_allowed_intersects(current, tsk))
				return true;
		}
72 73
	} while_each_thread(start, tsk);

74 75 76 77 78 79 80
	return false;
}
#else
static bool has_intersects_mems_allowed(struct task_struct *tsk,
					const nodemask_t *mask)
{
	return true;
81
}
82
#endif /* CONFIG_NUMA */
83

84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101
/*
 * If this is a system OOM (not a memcg OOM) and the task selected to be
 * killed is not already running at high (RT) priorities, speed up the
 * recovery by boosting the dying task to the lowest FIFO priority.
 * That helps with the recovery and avoids interfering with RT tasks.
 */
static void boost_dying_task_prio(struct task_struct *p,
				  struct mem_cgroup *mem)
{
	struct sched_param param = { .sched_priority = 1 };

	if (mem)
		return;

	if (!rt_task(p))
		sched_setscheduler_nocheck(p, SCHED_FIFO, &param);
}

102 103 104 105 106 107
/*
 * The process p may have detached its own ->mm while exiting or through
 * use_mm(), but one or more of its subthreads may still have a valid
 * pointer.  Return p, or any of its subthreads with a valid ->mm, with
 * task_lock() held.
 */
108 109 110 111 112 113 114 115 116 117 118 119 120 121
static struct task_struct *find_lock_task_mm(struct task_struct *p)
{
	struct task_struct *t = p;

	do {
		task_lock(t);
		if (likely(t->mm))
			return t;
		task_unlock(t);
	} while_each_thread(p, t);

	return NULL;
}

122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141
/* return true if the task is not adequate as candidate victim task. */
static bool oom_unkillable_task(struct task_struct *p, struct mem_cgroup *mem,
			   const nodemask_t *nodemask)
{
	if (is_global_init(p))
		return true;
	if (p->flags & PF_KTHREAD)
		return true;

	/* When mem_cgroup_out_of_memory() and p is not member of the group */
	if (mem && !task_in_mem_cgroup(p, mem))
		return true;

	/* p may not have freeable memory in nodemask */
	if (!has_intersects_mems_allowed(p, nodemask))
		return true;

	return false;
}

L
Linus Torvalds 已提交
142
/**
143
 * badness - calculate a numeric value for how bad this task has been
L
Linus Torvalds 已提交
144
 * @p: task struct of which task we should calculate
P
Paul Jackson 已提交
145
 * @uptime: current uptime in seconds
L
Linus Torvalds 已提交
146 147 148 149 150 151 152 153 154 155 156 157 158 159
 *
 * The formula used is relatively simple and documented inline in the
 * function. The main rationale is that we want to select a good task
 * to kill when we run out of memory.
 *
 * Good in this context means that:
 * 1) we lose the minimum amount of work done
 * 2) we recover a large amount of memory
 * 3) we don't kill anything innocent of eating tons of memory
 * 4) we want to kill the minimum amount of processes (one)
 * 5) we try to kill the process the user expects us to kill, this
 *    algorithm has been meticulously tuned to meet the principle
 *    of least surprise ... (be careful when you change it)
 */
160 161
unsigned long badness(struct task_struct *p, struct mem_cgroup *mem,
		      const nodemask_t *nodemask, unsigned long uptime)
L
Linus Torvalds 已提交
162
{
163
	unsigned long points, cpu_time, run_time;
A
Andrew Morton 已提交
164
	struct task_struct *child;
165
	struct task_struct *c, *t;
166
	int oom_adj = p->signal->oom_adj;
167 168 169
	struct task_cputime task_time;
	unsigned long utime;
	unsigned long stime;
170

171 172
	if (oom_unkillable_task(p, mem, nodemask))
		return 0;
173 174
	if (oom_adj == OOM_DISABLE)
		return 0;
L
Linus Torvalds 已提交
175

176 177
	p = find_lock_task_mm(p);
	if (!p)
L
Linus Torvalds 已提交
178 179 180 181 182
		return 0;

	/*
	 * The memory size of the process is the basis for the badness.
	 */
183
	points = p->mm->total_vm;
A
Andrew Morton 已提交
184
	task_unlock(p);
L
Linus Torvalds 已提交
185

186 187 188
	/*
	 * swapoff can easily use up all memory, so kill those first.
	 */
H
Hugh Dickins 已提交
189
	if (p->flags & PF_OOM_ORIGIN)
190 191
		return ULONG_MAX;

L
Linus Torvalds 已提交
192 193
	/*
	 * Processes which fork a lot of child processes are likely
194
	 * a good choice. We add half the vmsize of the children if they
L
Linus Torvalds 已提交
195
	 * have an own mm. This prevents forking servers to flood the
196 197 198
	 * machine with an endless amount of children. In case a single
	 * child is eating the vast majority of memory, adding only half
	 * to the parents will make the child our kill candidate of choice.
L
Linus Torvalds 已提交
199
	 */
200 201 202 203 204 205 206 207 208 209 210
	t = p;
	do {
		list_for_each_entry(c, &t->children, sibling) {
			child = find_lock_task_mm(c);
			if (child) {
				if (child->mm != p->mm)
					points += child->mm->total_vm/2 + 1;
				task_unlock(child);
			}
		}
	} while_each_thread(p, t);
L
Linus Torvalds 已提交
211 212 213 214 215 216

	/*
	 * CPU time is in tens of seconds and run time is in thousands
         * of seconds. There is no particular reason for this other than
         * that it turned out to work very well in practice.
	 */
217 218 219 220 221
	thread_group_cputime(p, &task_time);
	utime = cputime_to_jiffies(task_time.utime);
	stime = cputime_to_jiffies(task_time.stime);
	cpu_time = (utime + stime) >> (SHIFT_HZ + 3);

L
Linus Torvalds 已提交
222 223 224 225 226 227

	if (uptime >= p->start_time.tv_sec)
		run_time = (uptime - p->start_time.tv_sec) >> 10;
	else
		run_time = 0;

228 229 230 231
	if (cpu_time)
		points /= int_sqrt(cpu_time);
	if (run_time)
		points /= int_sqrt(int_sqrt(run_time));
L
Linus Torvalds 已提交
232 233 234 235 236 237 238 239 240 241 242 243

	/*
	 * Niced processes are most likely less important, so double
	 * their badness points.
	 */
	if (task_nice(p) > 0)
		points *= 2;

	/*
	 * Superuser processes are usually more important, so we make it
	 * less likely that we kill those.
	 */
244 245
	if (has_capability_noaudit(p, CAP_SYS_ADMIN) ||
	    has_capability_noaudit(p, CAP_SYS_RESOURCE))
L
Linus Torvalds 已提交
246 247 248 249 250 251 252 253
		points /= 4;

	/*
	 * We don't want to kill a process with direct hardware access.
	 * Not only could that mess up the hardware, but usually users
	 * tend to only have this flag set on applications they think
	 * of as important.
	 */
254
	if (has_capability_noaudit(p, CAP_SYS_RAWIO))
L
Linus Torvalds 已提交
255 256 257
		points /= 4;

	/*
258
	 * Adjust the score by oom_adj.
L
Linus Torvalds 已提交
259
	 */
260 261
	if (oom_adj) {
		if (oom_adj > 0) {
262 263
			if (!points)
				points = 1;
264
			points <<= oom_adj;
265
		} else
266
			points >>= -(oom_adj);
L
Linus Torvalds 已提交
267 268 269
	}

#ifdef DEBUG
270
	printk(KERN_DEBUG "OOMkill: task %d (%s) got %lu points\n",
L
Linus Torvalds 已提交
271 272 273 274 275
	p->pid, p->comm, points);
#endif
	return points;
}

276 277 278 279
/*
 * Determine the type of allocation constraint.
 */
#ifdef CONFIG_NUMA
280 281 282
static enum oom_constraint constrained_alloc(struct zonelist *zonelist,
				    gfp_t gfp_mask, nodemask_t *nodemask)
{
283
	struct zone *zone;
284
	struct zoneref *z;
285
	enum zone_type high_zoneidx = gfp_zone(gfp_mask);
286

287 288 289 290 291 292 293
	/*
	 * Reach here only when __GFP_NOFAIL is used. So, we should avoid
	 * to kill current.We have to random task kill in this case.
	 * Hopefully, CONSTRAINT_THISNODE...but no way to handle it, now.
	 */
	if (gfp_mask & __GFP_THISNODE)
		return CONSTRAINT_NONE;
294

295 296 297 298 299 300 301
	/*
	 * The nodemask here is a nodemask passed to alloc_pages(). Now,
	 * cpuset doesn't use this nodemask for its hardwall/softwall/hierarchy
	 * feature. mempolicy is an only user of nodemask here.
	 * check mempolicy's nodemask contains all N_HIGH_MEMORY
	 */
	if (nodemask && !nodes_subset(node_states[N_HIGH_MEMORY], *nodemask))
302
		return CONSTRAINT_MEMORY_POLICY;
303 304 305 306 307 308

	/* Check this allocation failure is caused by cpuset's wall function */
	for_each_zone_zonelist_nodemask(zone, z, zonelist,
			high_zoneidx, nodemask)
		if (!cpuset_zone_allowed_softwall(zone, gfp_mask))
			return CONSTRAINT_CPUSET;
309 310 311

	return CONSTRAINT_NONE;
}
312 313 314 315 316 317 318
#else
static enum oom_constraint constrained_alloc(struct zonelist *zonelist,
				gfp_t gfp_mask, nodemask_t *nodemask)
{
	return CONSTRAINT_NONE;
}
#endif
319

L
Linus Torvalds 已提交
320 321 322 323 324 325
/*
 * Simple selection loop. We chose the process with the highest
 * number of 'points'. We expect the caller will lock the tasklist.
 *
 * (not docbooked, we don't want this one cluttering up the manual)
 */
326
static struct task_struct *select_bad_process(unsigned long *ppoints,
327
		struct mem_cgroup *mem, const nodemask_t *nodemask)
L
Linus Torvalds 已提交
328
{
329
	struct task_struct *p;
L
Linus Torvalds 已提交
330 331
	struct task_struct *chosen = NULL;
	struct timespec uptime;
332
	*ppoints = 0;
L
Linus Torvalds 已提交
333 334

	do_posix_clock_monotonic_gettime(&uptime);
335
	for_each_process(p) {
P
Paul Jackson 已提交
336 337
		unsigned long points;

338
		if (oom_unkillable_task(p, mem, nodemask))
339
			continue;
340

341 342 343 344 345 346 347 348 349 350 351 352
		/*
		 * This task already has access to memory reserves and is
		 * being killed. Don't allow any other task access to the
		 * memory reserve.
		 *
		 * Note: this may have a chance of deadlock if it gets
		 * blocked waiting for another task which itself is waiting
		 * for memory. Is there a better alternative?
		 */
		if (test_tsk_thread_flag(p, TIF_MEMDIE))
			return ERR_PTR(-1UL);

P
Paul Jackson 已提交
353
		/*
354
		 * This is in the process of releasing memory so wait for it
P
Paul Jackson 已提交
355
		 * to finish before killing some other task by mistake.
356 357 358 359 360
		 *
		 * However, if p is the current task, we allow the 'kill' to
		 * go ahead if it is exiting: this will simply set TIF_MEMDIE,
		 * which will allow it to gain access to memory reserves in
		 * the process of exiting and releasing its resources.
361
		 * Otherwise we could get an easy OOM deadlock.
P
Paul Jackson 已提交
362
		 */
363
		if ((p->flags & PF_EXITING) && p->mm) {
364 365 366
			if (p != current)
				return ERR_PTR(-1UL);

367 368
			chosen = p;
			*ppoints = ULONG_MAX;
369
		}
370

371
		points = badness(p, mem, nodemask, uptime.tv_sec);
372
		if (points > *ppoints || !chosen) {
P
Paul Jackson 已提交
373
			chosen = p;
374
			*ppoints = points;
L
Linus Torvalds 已提交
375
		}
376
	}
377

L
Linus Torvalds 已提交
378 379 380
	return chosen;
}

381
/**
R
Randy Dunlap 已提交
382
 * dump_tasks - dump current memory state of all system tasks
383
 * @mem: current's memory controller, if constrained
R
Randy Dunlap 已提交
384
 *
385 386 387 388 389 390 391 392 393 394 395
 * Dumps the current memory state of all system tasks, excluding kernel threads.
 * State information includes task's pid, uid, tgid, vm size, rss, cpu, oom_adj
 * score, and name.
 *
 * If the actual is non-NULL, only tasks that are a member of the mem_cgroup are
 * shown.
 *
 * Call with tasklist_lock read-locked.
 */
static void dump_tasks(const struct mem_cgroup *mem)
{
396 397
	struct task_struct *p;
	struct task_struct *task;
398 399 400

	printk(KERN_INFO "[ pid ]   uid  tgid total_vm      rss cpu oom_adj "
	       "name\n");
401 402
	for_each_process(p) {
		if (p->flags & PF_KTHREAD)
403
			continue;
404
		if (mem && !task_in_mem_cgroup(p, mem))
405
			continue;
406

407 408
		task = find_lock_task_mm(p);
		if (!task) {
409
			/*
410 411
			 * This is a kthread or all of p's threads have already
			 * detached their mm's.  There's no need to report
412
			 * them; they can't be oom killed anyway.
413 414 415
			 */
			continue;
		}
416

417
		printk(KERN_INFO "[%5d] %5d %5d %8lu %8lu %3u     %3d %s\n",
418 419
		       task->pid, __task_cred(task)->uid, task->tgid,
		       task->mm->total_vm, get_mm_rss(task->mm),
420
		       task_cpu(task), task->signal->oom_adj, task->comm);
421 422
		task_unlock(task);
	}
423 424
}

425 426
static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order,
							struct mem_cgroup *mem)
427
{
428
	task_lock(current);
429 430 431 432 433 434
	pr_warning("%s invoked oom-killer: gfp_mask=0x%x, order=%d, "
		"oom_adj=%d\n",
		current->comm, gfp_mask, order, current->signal->oom_adj);
	cpuset_print_task_mems_allowed(current);
	task_unlock(current);
	dump_stack();
435
	mem_cgroup_print_oom_info(mem, p);
436 437 438 439 440
	show_mem();
	if (sysctl_oom_dump_tasks)
		dump_tasks(mem);
}

441
#define K(x) ((x) << (PAGE_SHIFT-10))
442
static int oom_kill_task(struct task_struct *p, struct mem_cgroup *mem)
L
Linus Torvalds 已提交
443
{
444
	p = find_lock_task_mm(p);
445
	if (!p) {
446 447 448 449 450 451 452
		task_unlock(p);
		return 1;
	}
	pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB\n",
		task_pid_nr(p), p->comm, K(p->mm->total_vm),
		K(get_mm_counter(p->mm, MM_ANONPAGES)),
		K(get_mm_counter(p->mm, MM_FILEPAGES)));
453
	task_unlock(p);
L
Linus Torvalds 已提交
454

455

L
Linus Torvalds 已提交
456 457
	set_tsk_thread_flag(p, TIF_MEMDIE);
	force_sig(SIGKILL, p);
458 459 460 461 462 463 464 465

	/*
	 * We give our sacrificial lamb high priority and access to
	 * all the memory it needs. That way it should be able to
	 * exit() and clear out its resources quickly...
	 */
	boost_dying_task_prio(p, mem);

466
	return 0;
L
Linus Torvalds 已提交
467
}
468
#undef K
L
Linus Torvalds 已提交
469

470
static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
471
			    unsigned long points, struct mem_cgroup *mem,
472
			    nodemask_t *nodemask, const char *message)
L
Linus Torvalds 已提交
473
{
474 475
	struct task_struct *victim = p;
	struct task_struct *child;
476
	struct task_struct *t = p;
477 478
	unsigned long victim_points = 0;
	struct timespec uptime;
L
Linus Torvalds 已提交
479

480
	if (printk_ratelimit())
481
		dump_header(p, gfp_mask, order, mem);
482

483 484 485 486
	/*
	 * If the task is already exiting, don't alarm the sysadmin or kill
	 * its children or threads, just set TIF_MEMDIE so it can die quickly
	 */
487
	if (p->flags & PF_EXITING) {
488
		set_tsk_thread_flag(p, TIF_MEMDIE);
489
		boost_dying_task_prio(p, mem);
490 491 492
		return 0;
	}

493 494 495 496
	task_lock(p);
	pr_err("%s: Kill process %d (%s) score %lu or sacrifice child\n",
		message, task_pid_nr(p), p->comm, points);
	task_unlock(p);
N
Nick Piggin 已提交
497

498 499 500 501 502 503 504
	/*
	 * If any of p's children has a different mm and is eligible for kill,
	 * the one with the highest badness() score is sacrificed for its
	 * parent.  This attempts to lose the minimal amount of work done while
	 * still freeing memory.
	 */
	do_posix_clock_monotonic_gettime(&uptime);
505
	do {
506 507 508 509
		list_for_each_entry(child, &t->children, sibling) {
			unsigned long child_points;

			/* badness() returns 0 if the thread is unkillable */
510 511
			child_points = badness(child, mem, nodemask,
					       uptime.tv_sec);
512 513 514 515
			if (child_points > victim_points) {
				victim = child;
				victim_points = child_points;
			}
516 517 518
		}
	} while_each_thread(p, t);

519
	return oom_kill_task(victim, mem);
L
Linus Torvalds 已提交
520 521
}

522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545
/*
 * Determines whether the kernel must panic because of the panic_on_oom sysctl.
 */
static void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask,
				int order)
{
	if (likely(!sysctl_panic_on_oom))
		return;
	if (sysctl_panic_on_oom != 2) {
		/*
		 * panic_on_oom == 1 only affects CONSTRAINT_NONE, the kernel
		 * does not panic for cpuset, mempolicy, or memcg allocation
		 * failures.
		 */
		if (constraint != CONSTRAINT_NONE)
			return;
	}
	read_lock(&tasklist_lock);
	dump_header(NULL, gfp_mask, order, NULL);
	read_unlock(&tasklist_lock);
	panic("Out of memory: %s panic_on_oom is enabled\n",
		sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide");
}

546
#ifdef CONFIG_CGROUP_MEM_RES_CTLR
547 548 549 550 551
void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask)
{
	unsigned long points = 0;
	struct task_struct *p;

552
	check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, 0);
L
Li Zefan 已提交
553
	read_lock(&tasklist_lock);
554
retry:
555
	p = select_bad_process(&points, mem, NULL);
556
	if (!p || PTR_ERR(p) == -1UL)
557 558
		goto out;

559
	if (oom_kill_process(p, gfp_mask, 0, points, mem, NULL,
560 561 562
				"Memory cgroup out of memory"))
		goto retry;
out:
L
Li Zefan 已提交
563
	read_unlock(&tasklist_lock);
564 565 566
}
#endif

567 568 569 570 571 572 573 574 575 576 577 578 579 580
static BLOCKING_NOTIFIER_HEAD(oom_notify_list);

int register_oom_notifier(struct notifier_block *nb)
{
	return blocking_notifier_chain_register(&oom_notify_list, nb);
}
EXPORT_SYMBOL_GPL(register_oom_notifier);

int unregister_oom_notifier(struct notifier_block *nb)
{
	return blocking_notifier_chain_unregister(&oom_notify_list, nb);
}
EXPORT_SYMBOL_GPL(unregister_oom_notifier);

D
David Rientjes 已提交
581 582 583 584 585
/*
 * Try to acquire the OOM killer lock for the zones in zonelist.  Returns zero
 * if a parallel OOM killing is already taking place that includes a zone in
 * the zonelist.  Otherwise, locks all zones in the zonelist and returns 1.
 */
586
int try_set_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_mask)
D
David Rientjes 已提交
587
{
588 589
	struct zoneref *z;
	struct zone *zone;
D
David Rientjes 已提交
590 591
	int ret = 1;

D
David Rientjes 已提交
592
	spin_lock(&zone_scan_lock);
593 594
	for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
		if (zone_is_oom_locked(zone)) {
D
David Rientjes 已提交
595 596 597
			ret = 0;
			goto out;
		}
598 599 600 601
	}

	for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
		/*
D
David Rientjes 已提交
602
		 * Lock each zone in the zonelist under zone_scan_lock so a
603
		 * parallel invocation of try_set_zonelist_oom() doesn't succeed
604 605 606 607
		 * when it shouldn't.
		 */
		zone_set_flag(zone, ZONE_OOM_LOCKED);
	}
D
David Rientjes 已提交
608 609

out:
D
David Rientjes 已提交
610
	spin_unlock(&zone_scan_lock);
D
David Rientjes 已提交
611 612 613 614 615 616 617 618
	return ret;
}

/*
 * Clears the ZONE_OOM_LOCKED flag for all zones in the zonelist so that failed
 * allocation attempts with zonelists containing them may now recall the OOM
 * killer, if necessary.
 */
619
void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_mask)
D
David Rientjes 已提交
620
{
621 622
	struct zoneref *z;
	struct zone *zone;
D
David Rientjes 已提交
623

D
David Rientjes 已提交
624
	spin_lock(&zone_scan_lock);
625 626 627
	for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
		zone_clear_flag(zone, ZONE_OOM_LOCKED);
	}
D
David Rientjes 已提交
628
	spin_unlock(&zone_scan_lock);
D
David Rientjes 已提交
629 630
}

631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667
/*
 * Try to acquire the oom killer lock for all system zones.  Returns zero if a
 * parallel oom killing is taking place, otherwise locks all zones and returns
 * non-zero.
 */
static int try_set_system_oom(void)
{
	struct zone *zone;
	int ret = 1;

	spin_lock(&zone_scan_lock);
	for_each_populated_zone(zone)
		if (zone_is_oom_locked(zone)) {
			ret = 0;
			goto out;
		}
	for_each_populated_zone(zone)
		zone_set_flag(zone, ZONE_OOM_LOCKED);
out:
	spin_unlock(&zone_scan_lock);
	return ret;
}

/*
 * Clears ZONE_OOM_LOCKED for all system zones so that failed allocation
 * attempts or page faults may now recall the oom killer, if necessary.
 */
static void clear_system_oom(void)
{
	struct zone *zone;

	spin_lock(&zone_scan_lock);
	for_each_populated_zone(zone)
		zone_clear_flag(zone, ZONE_OOM_LOCKED);
	spin_unlock(&zone_scan_lock);
}

L
Linus Torvalds 已提交
668
/**
669
 * out_of_memory - kill the "best" process when we run out of memory
R
Randy Dunlap 已提交
670 671 672
 * @zonelist: zonelist pointer
 * @gfp_mask: memory allocation flags
 * @order: amount of memory being requested as a power of 2
673
 * @nodemask: nodemask passed to page allocator
L
Linus Torvalds 已提交
674 675 676 677 678 679
 *
 * If we run out of memory, we have the choice between either
 * killing a random task (bad), letting the system crash (worse)
 * OR try to be smart about which process to kill. Note that we
 * don't have to be perfect here, we just have to be good.
 */
680 681
void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
		int order, nodemask_t *nodemask)
L
Linus Torvalds 已提交
682
{
683
	struct task_struct *p;
684
	unsigned long freed = 0;
685
	unsigned long points;
686
	enum oom_constraint constraint = CONSTRAINT_NONE;
687 688 689 690 691

	blocking_notifier_call_chain(&oom_notify_list, 0, &freed);
	if (freed > 0)
		/* Got some memory back in the last second. */
		return;
L
Linus Torvalds 已提交
692

693 694 695 696 697 698 699
	/*
	 * If current has a pending SIGKILL, then automatically select it.  The
	 * goal is to allow it to allocate so that it may quickly exit and free
	 * its memory.
	 */
	if (fatal_signal_pending(current)) {
		set_thread_flag(TIF_MEMDIE);
700
		boost_dying_task_prio(current, NULL);
701 702 703
		return;
	}

704 705 706 707
	/*
	 * Check if there were limitations on the allocation (only relevant for
	 * NUMA) that may require different handling.
	 */
708 709
	if (zonelist)
		constraint = constrained_alloc(zonelist, gfp_mask, nodemask);
710
	check_panic_on_oom(constraint, gfp_mask, order);
711

D
David Rientjes 已提交
712
	read_lock(&tasklist_lock);
713
	if (sysctl_oom_kill_allocating_task &&
714 715
	    !oom_unkillable_task(current, NULL, nodemask) &&
	    (current->signal->oom_adj != OOM_DISABLE)) {
716 717 718 719 720 721
		/*
		 * oom_kill_process() needs tasklist_lock held.  If it returns
		 * non-zero, current could not be killed so we must fallback to
		 * the tasklist scan.
		 */
		if (!oom_kill_process(current, gfp_mask, order, 0, NULL,
722
				nodemask,
723 724 725 726 727 728
				"Out of memory (oom_kill_allocating_task)"))
			return;
	}

retry:
	p = select_bad_process(&points, NULL,
729 730
			constraint == CONSTRAINT_MEMORY_POLICY ? nodemask :
								 NULL);
731 732 733 734 735 736 737 738 739 740
	if (PTR_ERR(p) == -1UL)
		return;

	/* Found nothing?!?! Either we hang forever, or we panic. */
	if (!p) {
		dump_header(NULL, gfp_mask, order, NULL);
		read_unlock(&tasklist_lock);
		panic("Out of memory and no killable processes...\n");
	}

741
	if (oom_kill_process(p, gfp_mask, order, points, NULL, nodemask,
742 743
			     "Out of memory"))
		goto retry;
744
	read_unlock(&tasklist_lock);
L
Linus Torvalds 已提交
745 746 747

	/*
	 * Give "p" a good chance of killing itself before we
748
	 * retry to allocate memory unless "p" is current
L
Linus Torvalds 已提交
749
	 */
750
	if (!test_thread_flag(TIF_MEMDIE))
751
		schedule_timeout_uninterruptible(1);
L
Linus Torvalds 已提交
752
}
753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768

/*
 * The pagefault handler calls here because it is out of memory, so kill a
 * memory-hogging task.  If a populated zone has ZONE_OOM_LOCKED set, a parallel
 * oom killing is already in progress so do nothing.  If a task is found with
 * TIF_MEMDIE set, it has been killed so do nothing and allow it to exit.
 */
void pagefault_out_of_memory(void)
{
	if (try_set_system_oom()) {
		out_of_memory(NULL, 0, 0, NULL);
		clear_system_oom();
	}
	if (!test_thread_flag(TIF_MEMDIE))
		schedule_timeout_uninterruptible(1);
}