oom_kill.c 19.8 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8
/*
 *  linux/mm/oom_kill.c
 * 
 *  Copyright (C)  1998,2000  Rik van Riel
 *	Thanks go out to Claus Fischer for some serious inspiration and
 *	for goading me into coding this file...
 *
 *  The routines in this file are used to kill a process when
P
Paul Jackson 已提交
9 10
 *  we're seriously out of memory. This gets called from __alloc_pages()
 *  in mm/page_alloc.c when we really run out of memory.
L
Linus Torvalds 已提交
11 12 13 14 15 16 17
 *
 *  Since we won't call these routines often (on a well-configured
 *  machine) this file will double as a 'coding guide' and a signpost
 *  for newbie kernel hackers. It features several pointers to major
 *  kernel subsystems and hints as to where to find out what things do.
 */

18
#include <linux/oom.h>
L
Linus Torvalds 已提交
19
#include <linux/mm.h>
A
Alexey Dobriyan 已提交
20
#include <linux/err.h>
21
#include <linux/gfp.h>
L
Linus Torvalds 已提交
22 23 24 25
#include <linux/sched.h>
#include <linux/swap.h>
#include <linux/timex.h>
#include <linux/jiffies.h>
26
#include <linux/cpuset.h>
27 28
#include <linux/module.h>
#include <linux/notifier.h>
29
#include <linux/memcontrol.h>
30
#include <linux/mempolicy.h>
31
#include <linux/security.h>
L
Linus Torvalds 已提交
32

33
int sysctl_panic_on_oom;
34
int sysctl_oom_kill_allocating_task;
35
int sysctl_oom_dump_tasks = 1;
D
David Rientjes 已提交
36
static DEFINE_SPINLOCK(zone_scan_lock);
L
Linus Torvalds 已提交
37 38
/* #define DEBUG */

39 40 41 42 43 44 45 46 47
#ifdef CONFIG_NUMA
/**
 * has_intersects_mems_allowed() - check task eligiblity for kill
 * @tsk: task struct of which task to consider
 * @mask: nodemask passed to page allocator for mempolicy ooms
 *
 * Task eligibility is determined by whether or not a candidate task, @tsk,
 * shares the same mempolicy nodes as current if it is bound by such a policy
 * and whether or not it has the same set of allowed cpuset nodes.
48
 */
49 50
static bool has_intersects_mems_allowed(struct task_struct *tsk,
					const nodemask_t *mask)
51
{
52
	struct task_struct *start = tsk;
53 54

	do {
55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80
		if (mask) {
			/*
			 * If this is a mempolicy constrained oom, tsk's
			 * cpuset is irrelevant.  Only return true if its
			 * mempolicy intersects current, otherwise it may be
			 * needlessly killed.
			 */
			if (mempolicy_nodemask_intersects(tsk, mask))
				return true;
		} else {
			/*
			 * This is not a mempolicy constrained oom, so only
			 * check the mems of tsk's cpuset.
			 */
			if (cpuset_mems_allowed_intersects(current, tsk))
				return true;
		}
		tsk = next_thread(tsk);
	} while (tsk != start);
	return false;
}
#else
static bool has_intersects_mems_allowed(struct task_struct *tsk,
					const nodemask_t *mask)
{
	return true;
81
}
82
#endif /* CONFIG_NUMA */
83

84 85 86 87 88 89
/*
 * The process p may have detached its own ->mm while exiting or through
 * use_mm(), but one or more of its subthreads may still have a valid
 * pointer.  Return p, or any of its subthreads with a valid ->mm, with
 * task_lock() held.
 */
90 91 92 93 94 95 96 97 98 99 100 101 102 103
static struct task_struct *find_lock_task_mm(struct task_struct *p)
{
	struct task_struct *t = p;

	do {
		task_lock(t);
		if (likely(t->mm))
			return t;
		task_unlock(t);
	} while_each_thread(p, t);

	return NULL;
}

L
Linus Torvalds 已提交
104
/**
105
 * badness - calculate a numeric value for how bad this task has been
L
Linus Torvalds 已提交
106
 * @p: task struct of which task we should calculate
P
Paul Jackson 已提交
107
 * @uptime: current uptime in seconds
L
Linus Torvalds 已提交
108 109 110 111 112 113 114 115 116 117 118 119 120 121 122
 *
 * The formula used is relatively simple and documented inline in the
 * function. The main rationale is that we want to select a good task
 * to kill when we run out of memory.
 *
 * Good in this context means that:
 * 1) we lose the minimum amount of work done
 * 2) we recover a large amount of memory
 * 3) we don't kill anything innocent of eating tons of memory
 * 4) we want to kill the minimum amount of processes (one)
 * 5) we try to kill the process the user expects us to kill, this
 *    algorithm has been meticulously tuned to meet the principle
 *    of least surprise ... (be careful when you change it)
 */

123
unsigned long badness(struct task_struct *p, unsigned long uptime)
L
Linus Torvalds 已提交
124
{
125
	unsigned long points, cpu_time, run_time;
A
Andrew Morton 已提交
126
	struct task_struct *child;
127
	struct task_struct *c, *t;
128
	int oom_adj = p->signal->oom_adj;
129 130 131
	struct task_cputime task_time;
	unsigned long utime;
	unsigned long stime;
132 133 134

	if (oom_adj == OOM_DISABLE)
		return 0;
L
Linus Torvalds 已提交
135

136 137
	p = find_lock_task_mm(p);
	if (!p)
L
Linus Torvalds 已提交
138 139 140 141 142
		return 0;

	/*
	 * The memory size of the process is the basis for the badness.
	 */
143
	points = p->mm->total_vm;
A
Andrew Morton 已提交
144
	task_unlock(p);
L
Linus Torvalds 已提交
145

146 147 148
	/*
	 * swapoff can easily use up all memory, so kill those first.
	 */
H
Hugh Dickins 已提交
149
	if (p->flags & PF_OOM_ORIGIN)
150 151
		return ULONG_MAX;

L
Linus Torvalds 已提交
152 153
	/*
	 * Processes which fork a lot of child processes are likely
154
	 * a good choice. We add half the vmsize of the children if they
L
Linus Torvalds 已提交
155
	 * have an own mm. This prevents forking servers to flood the
156 157 158
	 * machine with an endless amount of children. In case a single
	 * child is eating the vast majority of memory, adding only half
	 * to the parents will make the child our kill candidate of choice.
L
Linus Torvalds 已提交
159
	 */
160 161 162 163 164 165 166 167 168 169 170
	t = p;
	do {
		list_for_each_entry(c, &t->children, sibling) {
			child = find_lock_task_mm(c);
			if (child) {
				if (child->mm != p->mm)
					points += child->mm->total_vm/2 + 1;
				task_unlock(child);
			}
		}
	} while_each_thread(p, t);
L
Linus Torvalds 已提交
171 172 173 174 175 176

	/*
	 * CPU time is in tens of seconds and run time is in thousands
         * of seconds. There is no particular reason for this other than
         * that it turned out to work very well in practice.
	 */
177 178 179 180 181
	thread_group_cputime(p, &task_time);
	utime = cputime_to_jiffies(task_time.utime);
	stime = cputime_to_jiffies(task_time.stime);
	cpu_time = (utime + stime) >> (SHIFT_HZ + 3);

L
Linus Torvalds 已提交
182 183 184 185 186 187

	if (uptime >= p->start_time.tv_sec)
		run_time = (uptime - p->start_time.tv_sec) >> 10;
	else
		run_time = 0;

188 189 190 191
	if (cpu_time)
		points /= int_sqrt(cpu_time);
	if (run_time)
		points /= int_sqrt(int_sqrt(run_time));
L
Linus Torvalds 已提交
192 193 194 195 196 197 198 199 200 201 202 203

	/*
	 * Niced processes are most likely less important, so double
	 * their badness points.
	 */
	if (task_nice(p) > 0)
		points *= 2;

	/*
	 * Superuser processes are usually more important, so we make it
	 * less likely that we kill those.
	 */
204 205
	if (has_capability_noaudit(p, CAP_SYS_ADMIN) ||
	    has_capability_noaudit(p, CAP_SYS_RESOURCE))
L
Linus Torvalds 已提交
206 207 208 209 210 211 212 213
		points /= 4;

	/*
	 * We don't want to kill a process with direct hardware access.
	 * Not only could that mess up the hardware, but usually users
	 * tend to only have this flag set on applications they think
	 * of as important.
	 */
214
	if (has_capability_noaudit(p, CAP_SYS_RAWIO))
L
Linus Torvalds 已提交
215 216 217
		points /= 4;

	/*
218
	 * Adjust the score by oom_adj.
L
Linus Torvalds 已提交
219
	 */
220 221
	if (oom_adj) {
		if (oom_adj > 0) {
222 223
			if (!points)
				points = 1;
224
			points <<= oom_adj;
225
		} else
226
			points >>= -(oom_adj);
L
Linus Torvalds 已提交
227 228 229
	}

#ifdef DEBUG
230
	printk(KERN_DEBUG "OOMkill: task %d (%s) got %lu points\n",
L
Linus Torvalds 已提交
231 232 233 234 235
	p->pid, p->comm, points);
#endif
	return points;
}

236 237 238 239
/*
 * Determine the type of allocation constraint.
 */
#ifdef CONFIG_NUMA
240 241 242
static enum oom_constraint constrained_alloc(struct zonelist *zonelist,
				    gfp_t gfp_mask, nodemask_t *nodemask)
{
243
	struct zone *zone;
244
	struct zoneref *z;
245
	enum zone_type high_zoneidx = gfp_zone(gfp_mask);
246

247 248 249 250 251 252 253
	/*
	 * Reach here only when __GFP_NOFAIL is used. So, we should avoid
	 * to kill current.We have to random task kill in this case.
	 * Hopefully, CONSTRAINT_THISNODE...but no way to handle it, now.
	 */
	if (gfp_mask & __GFP_THISNODE)
		return CONSTRAINT_NONE;
254

255 256 257 258 259 260 261
	/*
	 * The nodemask here is a nodemask passed to alloc_pages(). Now,
	 * cpuset doesn't use this nodemask for its hardwall/softwall/hierarchy
	 * feature. mempolicy is an only user of nodemask here.
	 * check mempolicy's nodemask contains all N_HIGH_MEMORY
	 */
	if (nodemask && !nodes_subset(node_states[N_HIGH_MEMORY], *nodemask))
262
		return CONSTRAINT_MEMORY_POLICY;
263 264 265 266 267 268

	/* Check this allocation failure is caused by cpuset's wall function */
	for_each_zone_zonelist_nodemask(zone, z, zonelist,
			high_zoneidx, nodemask)
		if (!cpuset_zone_allowed_softwall(zone, gfp_mask))
			return CONSTRAINT_CPUSET;
269 270 271

	return CONSTRAINT_NONE;
}
272 273 274 275 276 277 278
#else
static enum oom_constraint constrained_alloc(struct zonelist *zonelist,
				gfp_t gfp_mask, nodemask_t *nodemask)
{
	return CONSTRAINT_NONE;
}
#endif
279

L
Linus Torvalds 已提交
280 281 282 283 284 285
/*
 * Simple selection loop. We chose the process with the highest
 * number of 'points'. We expect the caller will lock the tasklist.
 *
 * (not docbooked, we don't want this one cluttering up the manual)
 */
286
static struct task_struct *select_bad_process(unsigned long *ppoints,
287
		struct mem_cgroup *mem, const nodemask_t *nodemask)
L
Linus Torvalds 已提交
288
{
289
	struct task_struct *p;
L
Linus Torvalds 已提交
290 291
	struct task_struct *chosen = NULL;
	struct timespec uptime;
292
	*ppoints = 0;
L
Linus Torvalds 已提交
293 294

	do_posix_clock_monotonic_gettime(&uptime);
295
	for_each_process(p) {
P
Paul Jackson 已提交
296 297
		unsigned long points;

298 299
		/* skip the init task and kthreads */
		if (is_global_init(p) || (p->flags & PF_KTHREAD))
P
Paul Jackson 已提交
300
			continue;
301 302
		if (mem && !task_in_mem_cgroup(p, mem))
			continue;
303
		if (!has_intersects_mems_allowed(p, nodemask))
304
			continue;
305

306 307 308 309 310 311 312 313 314 315 316 317
		/*
		 * This task already has access to memory reserves and is
		 * being killed. Don't allow any other task access to the
		 * memory reserve.
		 *
		 * Note: this may have a chance of deadlock if it gets
		 * blocked waiting for another task which itself is waiting
		 * for memory. Is there a better alternative?
		 */
		if (test_tsk_thread_flag(p, TIF_MEMDIE))
			return ERR_PTR(-1UL);

P
Paul Jackson 已提交
318
		/*
319
		 * This is in the process of releasing memory so wait for it
P
Paul Jackson 已提交
320
		 * to finish before killing some other task by mistake.
321 322 323 324 325
		 *
		 * However, if p is the current task, we allow the 'kill' to
		 * go ahead if it is exiting: this will simply set TIF_MEMDIE,
		 * which will allow it to gain access to memory reserves in
		 * the process of exiting and releasing its resources.
326
		 * Otherwise we could get an easy OOM deadlock.
P
Paul Jackson 已提交
327
		 */
328
		if ((p->flags & PF_EXITING) && p->mm) {
329 330 331
			if (p != current)
				return ERR_PTR(-1UL);

332 333
			chosen = p;
			*ppoints = ULONG_MAX;
334
		}
335

336
		if (p->signal->oom_adj == OOM_DISABLE)
337 338
			continue;

339
		points = badness(p, uptime.tv_sec);
340
		if (points > *ppoints || !chosen) {
P
Paul Jackson 已提交
341
			chosen = p;
342
			*ppoints = points;
L
Linus Torvalds 已提交
343
		}
344
	}
345

L
Linus Torvalds 已提交
346 347 348
	return chosen;
}

349
/**
R
Randy Dunlap 已提交
350
 * dump_tasks - dump current memory state of all system tasks
351
 * @mem: current's memory controller, if constrained
R
Randy Dunlap 已提交
352
 *
353 354 355 356 357 358 359 360 361 362 363
 * Dumps the current memory state of all system tasks, excluding kernel threads.
 * State information includes task's pid, uid, tgid, vm size, rss, cpu, oom_adj
 * score, and name.
 *
 * If the actual is non-NULL, only tasks that are a member of the mem_cgroup are
 * shown.
 *
 * Call with tasklist_lock read-locked.
 */
static void dump_tasks(const struct mem_cgroup *mem)
{
364 365
	struct task_struct *p;
	struct task_struct *task;
366 367 368

	printk(KERN_INFO "[ pid ]   uid  tgid total_vm      rss cpu oom_adj "
	       "name\n");
369 370
	for_each_process(p) {
		if (p->flags & PF_KTHREAD)
371
			continue;
372
		if (mem && !task_in_mem_cgroup(p, mem))
373
			continue;
374

375 376
		task = find_lock_task_mm(p);
		if (!task) {
377
			/*
378 379
			 * This is a kthread or all of p's threads have already
			 * detached their mm's.  There's no need to report
380
			 * them; they can't be oom killed anyway.
381 382 383
			 */
			continue;
		}
384

385
		printk(KERN_INFO "[%5d] %5d %5d %8lu %8lu %3u     %3d %s\n",
386 387
		       task->pid, __task_cred(task)->uid, task->tgid,
		       task->mm->total_vm, get_mm_rss(task->mm),
388
		       task_cpu(task), task->signal->oom_adj, task->comm);
389 390
		task_unlock(task);
	}
391 392
}

393 394
static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order,
							struct mem_cgroup *mem)
395
{
396
	task_lock(current);
397 398 399 400 401 402
	pr_warning("%s invoked oom-killer: gfp_mask=0x%x, order=%d, "
		"oom_adj=%d\n",
		current->comm, gfp_mask, order, current->signal->oom_adj);
	cpuset_print_task_mems_allowed(current);
	task_unlock(current);
	dump_stack();
403
	mem_cgroup_print_oom_info(mem, p);
404 405 406 407 408
	show_mem();
	if (sysctl_oom_dump_tasks)
		dump_tasks(mem);
}

409
#define K(x) ((x) << (PAGE_SHIFT-10))
410
static int oom_kill_task(struct task_struct *p)
L
Linus Torvalds 已提交
411
{
412
	p = find_lock_task_mm(p);
413 414 415 416 417 418 419 420
	if (!p || p->signal->oom_adj == OOM_DISABLE) {
		task_unlock(p);
		return 1;
	}
	pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB\n",
		task_pid_nr(p), p->comm, K(p->mm->total_vm),
		K(get_mm_counter(p->mm, MM_ANONPAGES)),
		K(get_mm_counter(p->mm, MM_FILEPAGES)));
421
	task_unlock(p);
L
Linus Torvalds 已提交
422

P
Peter Zijlstra 已提交
423
	p->rt.time_slice = HZ;
L
Linus Torvalds 已提交
424 425
	set_tsk_thread_flag(p, TIF_MEMDIE);
	force_sig(SIGKILL, p);
426
	return 0;
L
Linus Torvalds 已提交
427
}
428
#undef K
L
Linus Torvalds 已提交
429

430
static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
431 432
			    unsigned long points, struct mem_cgroup *mem,
			    const char *message)
L
Linus Torvalds 已提交
433
{
434 435
	struct task_struct *victim = p;
	struct task_struct *child;
436
	struct task_struct *t = p;
437 438
	unsigned long victim_points = 0;
	struct timespec uptime;
L
Linus Torvalds 已提交
439

440
	if (printk_ratelimit())
441
		dump_header(p, gfp_mask, order, mem);
442

443 444 445 446
	/*
	 * If the task is already exiting, don't alarm the sysadmin or kill
	 * its children or threads, just set TIF_MEMDIE so it can die quickly
	 */
447
	if (p->flags & PF_EXITING) {
448
		set_tsk_thread_flag(p, TIF_MEMDIE);
449 450 451
		return 0;
	}

452 453 454 455
	task_lock(p);
	pr_err("%s: Kill process %d (%s) score %lu or sacrifice child\n",
		message, task_pid_nr(p), p->comm, points);
	task_unlock(p);
N
Nick Piggin 已提交
456

457 458 459 460 461 462 463
	/*
	 * If any of p's children has a different mm and is eligible for kill,
	 * the one with the highest badness() score is sacrificed for its
	 * parent.  This attempts to lose the minimal amount of work done while
	 * still freeing memory.
	 */
	do_posix_clock_monotonic_gettime(&uptime);
464
	do {
465 466 467 468
		list_for_each_entry(child, &t->children, sibling) {
			unsigned long child_points;

			if (child->mm == p->mm)
469
				continue;
470
			if (mem && !task_in_mem_cgroup(child, mem))
471
				continue;
472 473 474 475 476 477 478

			/* badness() returns 0 if the thread is unkillable */
			child_points = badness(child, uptime.tv_sec);
			if (child_points > victim_points) {
				victim = child;
				victim_points = child_points;
			}
479 480 481
		}
	} while_each_thread(p, t);

482
	return oom_kill_task(victim);
L
Linus Torvalds 已提交
483 484
}

485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508
/*
 * Determines whether the kernel must panic because of the panic_on_oom sysctl.
 */
static void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask,
				int order)
{
	if (likely(!sysctl_panic_on_oom))
		return;
	if (sysctl_panic_on_oom != 2) {
		/*
		 * panic_on_oom == 1 only affects CONSTRAINT_NONE, the kernel
		 * does not panic for cpuset, mempolicy, or memcg allocation
		 * failures.
		 */
		if (constraint != CONSTRAINT_NONE)
			return;
	}
	read_lock(&tasklist_lock);
	dump_header(NULL, gfp_mask, order, NULL);
	read_unlock(&tasklist_lock);
	panic("Out of memory: %s panic_on_oom is enabled\n",
		sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide");
}

509
#ifdef CONFIG_CGROUP_MEM_RES_CTLR
510 511 512 513 514
void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask)
{
	unsigned long points = 0;
	struct task_struct *p;

515
	check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, 0);
L
Li Zefan 已提交
516
	read_lock(&tasklist_lock);
517
retry:
518
	p = select_bad_process(&points, mem, NULL);
519
	if (!p || PTR_ERR(p) == -1UL)
520 521
		goto out;

522
	if (oom_kill_process(p, gfp_mask, 0, points, mem,
523 524 525
				"Memory cgroup out of memory"))
		goto retry;
out:
L
Li Zefan 已提交
526
	read_unlock(&tasklist_lock);
527 528 529
}
#endif

530 531 532 533 534 535 536 537 538 539 540 541 542 543
static BLOCKING_NOTIFIER_HEAD(oom_notify_list);

int register_oom_notifier(struct notifier_block *nb)
{
	return blocking_notifier_chain_register(&oom_notify_list, nb);
}
EXPORT_SYMBOL_GPL(register_oom_notifier);

int unregister_oom_notifier(struct notifier_block *nb)
{
	return blocking_notifier_chain_unregister(&oom_notify_list, nb);
}
EXPORT_SYMBOL_GPL(unregister_oom_notifier);

D
David Rientjes 已提交
544 545 546 547 548
/*
 * Try to acquire the OOM killer lock for the zones in zonelist.  Returns zero
 * if a parallel OOM killing is already taking place that includes a zone in
 * the zonelist.  Otherwise, locks all zones in the zonelist and returns 1.
 */
549
int try_set_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_mask)
D
David Rientjes 已提交
550
{
551 552
	struct zoneref *z;
	struct zone *zone;
D
David Rientjes 已提交
553 554
	int ret = 1;

D
David Rientjes 已提交
555
	spin_lock(&zone_scan_lock);
556 557
	for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
		if (zone_is_oom_locked(zone)) {
D
David Rientjes 已提交
558 559 560
			ret = 0;
			goto out;
		}
561 562 563 564
	}

	for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
		/*
D
David Rientjes 已提交
565
		 * Lock each zone in the zonelist under zone_scan_lock so a
566
		 * parallel invocation of try_set_zonelist_oom() doesn't succeed
567 568 569 570
		 * when it shouldn't.
		 */
		zone_set_flag(zone, ZONE_OOM_LOCKED);
	}
D
David Rientjes 已提交
571 572

out:
D
David Rientjes 已提交
573
	spin_unlock(&zone_scan_lock);
D
David Rientjes 已提交
574 575 576 577 578 579 580 581
	return ret;
}

/*
 * Clears the ZONE_OOM_LOCKED flag for all zones in the zonelist so that failed
 * allocation attempts with zonelists containing them may now recall the OOM
 * killer, if necessary.
 */
582
void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_mask)
D
David Rientjes 已提交
583
{
584 585
	struct zoneref *z;
	struct zone *zone;
D
David Rientjes 已提交
586

D
David Rientjes 已提交
587
	spin_lock(&zone_scan_lock);
588 589 590
	for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
		zone_clear_flag(zone, ZONE_OOM_LOCKED);
	}
D
David Rientjes 已提交
591
	spin_unlock(&zone_scan_lock);
D
David Rientjes 已提交
592 593
}

594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631
/*
 * Try to acquire the oom killer lock for all system zones.  Returns zero if a
 * parallel oom killing is taking place, otherwise locks all zones and returns
 * non-zero.
 */
static int try_set_system_oom(void)
{
	struct zone *zone;
	int ret = 1;

	spin_lock(&zone_scan_lock);
	for_each_populated_zone(zone)
		if (zone_is_oom_locked(zone)) {
			ret = 0;
			goto out;
		}
	for_each_populated_zone(zone)
		zone_set_flag(zone, ZONE_OOM_LOCKED);
out:
	spin_unlock(&zone_scan_lock);
	return ret;
}

/*
 * Clears ZONE_OOM_LOCKED for all system zones so that failed allocation
 * attempts or page faults may now recall the oom killer, if necessary.
 */
static void clear_system_oom(void)
{
	struct zone *zone;

	spin_lock(&zone_scan_lock);
	for_each_populated_zone(zone)
		zone_clear_flag(zone, ZONE_OOM_LOCKED);
	spin_unlock(&zone_scan_lock);
}


632 633 634
/*
 * Must be called with tasklist_lock held for read.
 */
635
static void __out_of_memory(gfp_t gfp_mask, int order, const nodemask_t *mask)
636
{
637 638
	struct task_struct *p;
	unsigned long points;
639

640 641 642
	if (sysctl_oom_kill_allocating_task)
		if (!oom_kill_process(current, gfp_mask, order, 0, NULL,
				"Out of memory (oom_kill_allocating_task)"))
643
			return;
644 645 646 647 648
retry:
	/*
	 * Rambo mode: Shoot down a process and hope it solves whatever
	 * issues we may have.
	 */
649
	p = select_bad_process(&points, NULL, mask);
650

651 652
	if (PTR_ERR(p) == -1UL)
		return;
653

654 655
	/* Found nothing?!?! Either we hang forever, or we panic. */
	if (!p) {
656
		dump_header(NULL, gfp_mask, order, NULL);
657
		read_unlock(&tasklist_lock);
658
		panic("Out of memory and no killable processes...\n");
659
	}
660 661 662 663

	if (oom_kill_process(p, gfp_mask, order, points, NULL,
			     "Out of memory"))
		goto retry;
664 665
}

L
Linus Torvalds 已提交
666
/**
667
 * out_of_memory - kill the "best" process when we run out of memory
R
Randy Dunlap 已提交
668 669 670
 * @zonelist: zonelist pointer
 * @gfp_mask: memory allocation flags
 * @order: amount of memory being requested as a power of 2
671
 * @nodemask: nodemask passed to page allocator
L
Linus Torvalds 已提交
672 673 674 675 676 677
 *
 * If we run out of memory, we have the choice between either
 * killing a random task (bad), letting the system crash (worse)
 * OR try to be smart about which process to kill. Note that we
 * don't have to be perfect here, we just have to be good.
 */
678 679
void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
		int order, nodemask_t *nodemask)
L
Linus Torvalds 已提交
680
{
681
	unsigned long freed = 0;
682
	enum oom_constraint constraint = CONSTRAINT_NONE;
683 684 685 686 687

	blocking_notifier_call_chain(&oom_notify_list, 0, &freed);
	if (freed > 0)
		/* Got some memory back in the last second. */
		return;
L
Linus Torvalds 已提交
688

689 690 691 692 693 694 695 696 697 698
	/*
	 * If current has a pending SIGKILL, then automatically select it.  The
	 * goal is to allow it to allocate so that it may quickly exit and free
	 * its memory.
	 */
	if (fatal_signal_pending(current)) {
		set_thread_flag(TIF_MEMDIE);
		return;
	}

699 700 701 702
	/*
	 * Check if there were limitations on the allocation (only relevant for
	 * NUMA) that may require different handling.
	 */
703 704
	if (zonelist)
		constraint = constrained_alloc(zonelist, gfp_mask, nodemask);
705
	check_panic_on_oom(constraint, gfp_mask, order);
D
David Rientjes 已提交
706
	read_lock(&tasklist_lock);
707 708 709
	__out_of_memory(gfp_mask, order,
			constraint == CONSTRAINT_MEMORY_POLICY ? nodemask :
								 NULL);
710
	read_unlock(&tasklist_lock);
L
Linus Torvalds 已提交
711 712 713

	/*
	 * Give "p" a good chance of killing itself before we
714
	 * retry to allocate memory unless "p" is current
L
Linus Torvalds 已提交
715
	 */
716
	if (!test_thread_flag(TIF_MEMDIE))
717
		schedule_timeout_uninterruptible(1);
L
Linus Torvalds 已提交
718
}
719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734

/*
 * The pagefault handler calls here because it is out of memory, so kill a
 * memory-hogging task.  If a populated zone has ZONE_OOM_LOCKED set, a parallel
 * oom killing is already in progress so do nothing.  If a task is found with
 * TIF_MEMDIE set, it has been killed so do nothing and allow it to exit.
 */
void pagefault_out_of_memory(void)
{
	if (try_set_system_oom()) {
		out_of_memory(NULL, 0, 0, NULL);
		clear_system_oom();
	}
	if (!test_thread_flag(TIF_MEMDIE))
		schedule_timeout_uninterruptible(1);
}