memcontrol.c 145.1 KB
Newer Older
B
Balbir Singh 已提交
1 2 3 4 5
/* memcontrol.c - Memory Controller
 *
 * Copyright IBM Corporation, 2007
 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
 *
6 7 8
 * Copyright 2007 OpenVZ SWsoft Inc
 * Author: Pavel Emelianov <xemul@openvz.org>
 *
9 10 11 12
 * Memory thresholds
 * Copyright (C) 2009 Nokia Corporation
 * Author: Kirill A. Shutemov
 *
B
Balbir Singh 已提交
13 14 15 16 17 18 19 20 21 22 23 24 25 26
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 */

#include <linux/res_counter.h>
#include <linux/memcontrol.h>
#include <linux/cgroup.h>
27
#include <linux/mm.h>
28
#include <linux/hugetlb.h>
K
KAMEZAWA Hiroyuki 已提交
29
#include <linux/pagemap.h>
30
#include <linux/smp.h>
31
#include <linux/page-flags.h>
32
#include <linux/backing-dev.h>
33 34
#include <linux/bit_spinlock.h>
#include <linux/rcupdate.h>
35
#include <linux/limits.h>
36
#include <linux/export.h>
37
#include <linux/mutex.h>
38
#include <linux/rbtree.h>
39
#include <linux/slab.h>
40
#include <linux/swap.h>
41
#include <linux/swapops.h>
42
#include <linux/spinlock.h>
43 44
#include <linux/eventfd.h>
#include <linux/sort.h>
45
#include <linux/fs.h>
46
#include <linux/seq_file.h>
47
#include <linux/vmalloc.h>
48
#include <linux/mm_inline.h>
49
#include <linux/page_cgroup.h>
50
#include <linux/cpu.h>
51
#include <linux/oom.h>
K
KAMEZAWA Hiroyuki 已提交
52
#include "internal.h"
G
Glauber Costa 已提交
53 54
#include <net/sock.h>
#include <net/tcp_memcontrol.h>
B
Balbir Singh 已提交
55

56 57
#include <asm/uaccess.h>

58 59
#include <trace/events/vmscan.h>

60 61
struct cgroup_subsys mem_cgroup_subsys __read_mostly;
#define MEM_CGROUP_RECLAIM_RETRIES	5
62
static struct mem_cgroup *root_mem_cgroup __read_mostly;
B
Balbir Singh 已提交
63

A
Andrew Morton 已提交
64
#ifdef CONFIG_MEMCG_SWAP
L
Li Zefan 已提交
65
/* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */
66
int do_swap_account __read_mostly;
67 68

/* for remember boot option*/
A
Andrew Morton 已提交
69
#ifdef CONFIG_MEMCG_SWAP_ENABLED
70 71 72 73 74
static int really_do_swap_account __initdata = 1;
#else
static int really_do_swap_account __initdata = 0;
#endif

75
#else
76
#define do_swap_account		0
77 78 79
#endif


80 81 82 83 84 85 86 87
/*
 * Statistics for memory cgroup.
 */
enum mem_cgroup_stat_index {
	/*
	 * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
	 */
	MEM_CGROUP_STAT_CACHE, 	   /* # of pages charged as cache */
88
	MEM_CGROUP_STAT_RSS,	   /* # of pages charged as anon rss */
89
	MEM_CGROUP_STAT_FILE_MAPPED,  /* # of pages charged as file rss */
90
	MEM_CGROUP_STAT_SWAP, /* # of pages, swapped out */
91 92 93
	MEM_CGROUP_STAT_NSTATS,
};

94 95 96 97 98 99 100
static const char * const mem_cgroup_stat_names[] = {
	"cache",
	"rss",
	"mapped_file",
	"swap",
};

101 102 103
enum mem_cgroup_events_index {
	MEM_CGROUP_EVENTS_PGPGIN,	/* # of pages paged in */
	MEM_CGROUP_EVENTS_PGPGOUT,	/* # of pages paged out */
104 105
	MEM_CGROUP_EVENTS_PGFAULT,	/* # of page-faults */
	MEM_CGROUP_EVENTS_PGMAJFAULT,	/* # of major page-faults */
106 107
	MEM_CGROUP_EVENTS_NSTATS,
};
108 109 110 111 112 113 114 115

static const char * const mem_cgroup_events_names[] = {
	"pgpgin",
	"pgpgout",
	"pgfault",
	"pgmajfault",
};

116 117 118 119 120 121 122 123 124
/*
 * Per memcg event counter is incremented at every pagein/pageout. With THP,
 * it will be incremated by the number of pages. This counter is used for
 * for trigger some periodic events. This is straightforward and better
 * than using jiffies etc. to handle periodic memcg event.
 */
enum mem_cgroup_events_target {
	MEM_CGROUP_TARGET_THRESH,
	MEM_CGROUP_TARGET_SOFTLIMIT,
125
	MEM_CGROUP_TARGET_NUMAINFO,
126 127
	MEM_CGROUP_NTARGETS,
};
128 129 130
#define THRESHOLDS_EVENTS_TARGET 128
#define SOFTLIMIT_EVENTS_TARGET 1024
#define NUMAINFO_EVENTS_TARGET	1024
131

132
struct mem_cgroup_stat_cpu {
133
	long count[MEM_CGROUP_STAT_NSTATS];
134
	unsigned long events[MEM_CGROUP_EVENTS_NSTATS];
135
	unsigned long nr_page_events;
136
	unsigned long targets[MEM_CGROUP_NTARGETS];
137 138
};

139 140 141 142 143 144 145
struct mem_cgroup_reclaim_iter {
	/* css_id of the last scanned hierarchy member */
	int position;
	/* scan generation, increased every round-trip */
	unsigned int generation;
};

146 147 148 149
/*
 * per-zone information in memory controller.
 */
struct mem_cgroup_per_zone {
150
	struct lruvec		lruvec;
151
	unsigned long		lru_size[NR_LRU_LISTS];
K
KOSAKI Motohiro 已提交
152

153 154
	struct mem_cgroup_reclaim_iter reclaim_iter[DEF_PRIORITY + 1];

155 156 157 158
	struct rb_node		tree_node;	/* RB tree node */
	unsigned long long	usage_in_excess;/* Set to the value by which */
						/* the soft limit is exceeded*/
	bool			on_tree;
159
	struct mem_cgroup	*memcg;		/* Back pointer, we cannot */
160
						/* use container_of	   */
161 162 163 164 165 166 167 168 169 170
};

struct mem_cgroup_per_node {
	struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
};

struct mem_cgroup_lru_info {
	struct mem_cgroup_per_node *nodeinfo[MAX_NUMNODES];
};

171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190
/*
 * Cgroups above their limits are maintained in a RB-Tree, independent of
 * their hierarchy representation
 */

struct mem_cgroup_tree_per_zone {
	struct rb_root rb_root;
	spinlock_t lock;
};

struct mem_cgroup_tree_per_node {
	struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES];
};

struct mem_cgroup_tree {
	struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
};

static struct mem_cgroup_tree soft_limit_tree __read_mostly;

191 192 193 194 195
struct mem_cgroup_threshold {
	struct eventfd_ctx *eventfd;
	u64 threshold;
};

K
KAMEZAWA Hiroyuki 已提交
196
/* For threshold */
197
struct mem_cgroup_threshold_ary {
198
	/* An array index points to threshold just below or equal to usage. */
199
	int current_threshold;
200 201 202 203 204
	/* Size of entries[] */
	unsigned int size;
	/* Array of thresholds */
	struct mem_cgroup_threshold entries[0];
};
205 206 207 208 209 210 211 212 213 214 215 216

struct mem_cgroup_thresholds {
	/* Primary thresholds array */
	struct mem_cgroup_threshold_ary *primary;
	/*
	 * Spare threshold array.
	 * This is needed to make mem_cgroup_unregister_event() "never fail".
	 * It must be able to store at least primary->size - 1 entries.
	 */
	struct mem_cgroup_threshold_ary *spare;
};

K
KAMEZAWA Hiroyuki 已提交
217 218 219 220 221
/* for OOM */
struct mem_cgroup_eventfd_list {
	struct list_head list;
	struct eventfd_ctx *eventfd;
};
222

223 224
static void mem_cgroup_threshold(struct mem_cgroup *memcg);
static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
225

B
Balbir Singh 已提交
226 227 228 229 230 231 232
/*
 * The memory controller data structure. The memory controller controls both
 * page cache and RSS per cgroup. We would eventually like to provide
 * statistics based on the statistics developed by Rik Van Riel for clock-pro,
 * to help the administrator determine what knobs to tune.
 *
 * TODO: Add a water mark for the memory controller. Reclaim will begin when
233 234 235
 * we hit the water mark. May be even add a low water mark, such that
 * no reclaim occurs from a cgroup at it's low water mark, this is
 * a feature that will be implemented much later in the future.
B
Balbir Singh 已提交
236 237 238 239 240 241 242
 */
struct mem_cgroup {
	struct cgroup_subsys_state css;
	/*
	 * the counter to account for memory usage
	 */
	struct res_counter res;
243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260

	union {
		/*
		 * the counter to account for mem+swap usage.
		 */
		struct res_counter memsw;

		/*
		 * rcu_freeing is used only when freeing struct mem_cgroup,
		 * so put it into a union to avoid wasting more memory.
		 * It must be disjoint from the css field.  It could be
		 * in a union with the res field, but res plays a much
		 * larger part in mem_cgroup life than memsw, and might
		 * be of interest, even at time of free, when debugging.
		 * So share rcu_head with the less interesting memsw.
		 */
		struct rcu_head rcu_freeing;
		/*
261 262
		 * We also need some space for a worker in deferred freeing.
		 * By the time we call it, rcu_freeing is no longer in use.
263 264 265 266
		 */
		struct work_struct work_freeing;
	};

267 268 269 270
	/*
	 * Per cgroup active and inactive list, similar to the
	 * per zone LRU lists.
	 */
271
	struct mem_cgroup_lru_info info;
272 273 274
	int last_scanned_node;
#if MAX_NUMNODES > 1
	nodemask_t	scan_nodes;
275 276
	atomic_t	numainfo_events;
	atomic_t	numainfo_updating;
277
#endif
278 279 280 281
	/*
	 * Should the accounting and control be hierarchical, per subtree?
	 */
	bool use_hierarchy;
282 283 284 285

	bool		oom_lock;
	atomic_t	under_oom;

286
	atomic_t	refcnt;
287

288
	int	swappiness;
289 290
	/* OOM-Killer disable */
	int		oom_kill_disable;
K
KOSAKI Motohiro 已提交
291

292 293 294
	/* set when res.limit == memsw.limit */
	bool		memsw_is_minimum;

295 296 297 298
	/* protect arrays of thresholds */
	struct mutex thresholds_lock;

	/* thresholds for memory usage. RCU-protected */
299
	struct mem_cgroup_thresholds thresholds;
300

301
	/* thresholds for mem+swap usage. RCU-protected */
302
	struct mem_cgroup_thresholds memsw_thresholds;
303

K
KAMEZAWA Hiroyuki 已提交
304 305
	/* For oom notifier event fd */
	struct list_head oom_notify;
306

307 308 309 310 311
	/*
	 * Should we move charges of a task when a task is moved into this
	 * mem_cgroup ? And what type of charges should we move ?
	 */
	unsigned long 	move_charge_at_immigrate;
312 313 314 315
	/*
	 * set > 0 if pages under this cgroup are moving to other cgroup.
	 */
	atomic_t	moving_account;
316 317
	/* taken only while moving_account > 0 */
	spinlock_t	move_lock;
318
	/*
319
	 * percpu counter.
320
	 */
321
	struct mem_cgroup_stat_cpu __percpu *stat;
322 323 324 325 326 327
	/*
	 * used when a cpu is offlined or other synchronizations
	 * See mem_cgroup_read_stat().
	 */
	struct mem_cgroup_stat_cpu nocpu_base;
	spinlock_t pcp_counter_lock;
G
Glauber Costa 已提交
328 329 330 331

#ifdef CONFIG_INET
	struct tcp_memcontrol tcp_mem;
#endif
B
Balbir Singh 已提交
332 333
};

334 335 336 337 338 339
/* Stuffs for move charges at task migration. */
/*
 * Types of charges to be moved. "move_charge_at_immitgrate" is treated as a
 * left-shifted bitmap of these types.
 */
enum move_type {
340
	MOVE_CHARGE_TYPE_ANON,	/* private anonymous page and swap of it */
341
	MOVE_CHARGE_TYPE_FILE,	/* file page(including tmpfs) and swap of it */
342 343 344
	NR_MOVE_TYPE,
};

345 346
/* "mc" and its members are protected by cgroup_mutex */
static struct move_charge_struct {
347
	spinlock_t	  lock; /* for from, to */
348 349 350
	struct mem_cgroup *from;
	struct mem_cgroup *to;
	unsigned long precharge;
351
	unsigned long moved_charge;
352
	unsigned long moved_swap;
353 354 355
	struct task_struct *moving_task;	/* a task moving charges */
	wait_queue_head_t waitq;		/* a waitq for other context */
} mc = {
356
	.lock = __SPIN_LOCK_UNLOCKED(mc.lock),
357 358
	.waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
};
359

D
Daisuke Nishimura 已提交
360 361 362 363 364 365
static bool move_anon(void)
{
	return test_bit(MOVE_CHARGE_TYPE_ANON,
					&mc.to->move_charge_at_immigrate);
}

366 367 368 369 370 371
static bool move_file(void)
{
	return test_bit(MOVE_CHARGE_TYPE_FILE,
					&mc.to->move_charge_at_immigrate);
}

372 373 374 375
/*
 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
 * limit reclaim to prevent infinite loops, if they ever occur.
 */
376 377
#define	MEM_CGROUP_MAX_RECLAIM_LOOPS		100
#define	MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS	2
378

379 380
enum charge_type {
	MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
381
	MEM_CGROUP_CHARGE_TYPE_ANON,
K
KAMEZAWA Hiroyuki 已提交
382
	MEM_CGROUP_CHARGE_TYPE_SWAPOUT,	/* for accounting swapcache */
K
KAMEZAWA Hiroyuki 已提交
383
	MEM_CGROUP_CHARGE_TYPE_DROP,	/* a page was unused swap cache */
384 385 386
	NR_CHARGE_TYPE,
};

387
/* for encoding cft->private value on file */
388 389 390
#define _MEM			(0)
#define _MEMSWAP		(1)
#define _OOM_TYPE		(2)
391 392
#define MEMFILE_PRIVATE(x, val)	((x) << 16 | (val))
#define MEMFILE_TYPE(val)	((val) >> 16 & 0xffff)
393
#define MEMFILE_ATTR(val)	((val) & 0xffff)
K
KAMEZAWA Hiroyuki 已提交
394 395
/* Used for OOM nofiier */
#define OOM_CONTROL		(0)
396

397 398 399 400 401 402 403 404
/*
 * Reclaim flags for mem_cgroup_hierarchical_reclaim
 */
#define MEM_CGROUP_RECLAIM_NOSWAP_BIT	0x0
#define MEM_CGROUP_RECLAIM_NOSWAP	(1 << MEM_CGROUP_RECLAIM_NOSWAP_BIT)
#define MEM_CGROUP_RECLAIM_SHRINK_BIT	0x1
#define MEM_CGROUP_RECLAIM_SHRINK	(1 << MEM_CGROUP_RECLAIM_SHRINK_BIT)

405 406
static void mem_cgroup_get(struct mem_cgroup *memcg);
static void mem_cgroup_put(struct mem_cgroup *memcg);
G
Glauber Costa 已提交
407 408

/* Writing them here to avoid exposing memcg's inner layout */
A
Andrew Morton 已提交
409
#ifdef CONFIG_MEMCG_KMEM
G
Glauber Costa 已提交
410
#include <net/sock.h>
G
Glauber Costa 已提交
411
#include <net/ip.h>
G
Glauber Costa 已提交
412 413 414 415

static bool mem_cgroup_is_root(struct mem_cgroup *memcg);
void sock_update_memcg(struct sock *sk)
{
416
	if (mem_cgroup_sockets_enabled) {
G
Glauber Costa 已提交
417
		struct mem_cgroup *memcg;
418
		struct cg_proto *cg_proto;
G
Glauber Costa 已提交
419 420 421

		BUG_ON(!sk->sk_prot->proto_cgroup);

422 423 424 425 426 427 428 429 430 431 432 433 434 435
		/* Socket cloning can throw us here with sk_cgrp already
		 * filled. It won't however, necessarily happen from
		 * process context. So the test for root memcg given
		 * the current task's memcg won't help us in this case.
		 *
		 * Respecting the original socket's memcg is a better
		 * decision in this case.
		 */
		if (sk->sk_cgrp) {
			BUG_ON(mem_cgroup_is_root(sk->sk_cgrp->memcg));
			mem_cgroup_get(sk->sk_cgrp->memcg);
			return;
		}

G
Glauber Costa 已提交
436 437
		rcu_read_lock();
		memcg = mem_cgroup_from_task(current);
438 439
		cg_proto = sk->sk_prot->proto_cgroup(memcg);
		if (!mem_cgroup_is_root(memcg) && memcg_proto_active(cg_proto)) {
G
Glauber Costa 已提交
440
			mem_cgroup_get(memcg);
441
			sk->sk_cgrp = cg_proto;
G
Glauber Costa 已提交
442 443 444 445 446 447 448 449
		}
		rcu_read_unlock();
	}
}
EXPORT_SYMBOL(sock_update_memcg);

void sock_release_memcg(struct sock *sk)
{
450
	if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
G
Glauber Costa 已提交
451 452 453 454 455 456
		struct mem_cgroup *memcg;
		WARN_ON(!sk->sk_cgrp->memcg);
		memcg = sk->sk_cgrp->memcg;
		mem_cgroup_put(memcg);
	}
}
G
Glauber Costa 已提交
457

458
#ifdef CONFIG_INET
G
Glauber Costa 已提交
459 460 461 462 463 464 465 466
struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg)
{
	if (!memcg || mem_cgroup_is_root(memcg))
		return NULL;

	return &memcg->tcp_mem.cg_proto;
}
EXPORT_SYMBOL(tcp_proto_cgroup);
G
Glauber Costa 已提交
467
#endif /* CONFIG_INET */
A
Andrew Morton 已提交
468
#endif /* CONFIG_MEMCG_KMEM */
G
Glauber Costa 已提交
469

A
Andrew Morton 已提交
470
#if defined(CONFIG_INET) && defined(CONFIG_MEMCG_KMEM)
471 472 473 474 475 476 477 478 479 480 481 482
static void disarm_sock_keys(struct mem_cgroup *memcg)
{
	if (!memcg_proto_activated(&memcg->tcp_mem.cg_proto))
		return;
	static_key_slow_dec(&memcg_socket_limit_enabled);
}
#else
static void disarm_sock_keys(struct mem_cgroup *memcg)
{
}
#endif

483
static void drain_all_stock_async(struct mem_cgroup *memcg);
484

485
static struct mem_cgroup_per_zone *
486
mem_cgroup_zoneinfo(struct mem_cgroup *memcg, int nid, int zid)
487
{
488
	return &memcg->info.nodeinfo[nid]->zoneinfo[zid];
489 490
}

491
struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg)
492
{
493
	return &memcg->css;
494 495
}

496
static struct mem_cgroup_per_zone *
497
page_cgroup_zoneinfo(struct mem_cgroup *memcg, struct page *page)
498
{
499 500
	int nid = page_to_nid(page);
	int zid = page_zonenum(page);
501

502
	return mem_cgroup_zoneinfo(memcg, nid, zid);
503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520
}

static struct mem_cgroup_tree_per_zone *
soft_limit_tree_node_zone(int nid, int zid)
{
	return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
}

static struct mem_cgroup_tree_per_zone *
soft_limit_tree_from_page(struct page *page)
{
	int nid = page_to_nid(page);
	int zid = page_zonenum(page);

	return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
}

static void
521
__mem_cgroup_insert_exceeded(struct mem_cgroup *memcg,
522
				struct mem_cgroup_per_zone *mz,
523 524
				struct mem_cgroup_tree_per_zone *mctz,
				unsigned long long new_usage_in_excess)
525 526 527 528 529 530 531 532
{
	struct rb_node **p = &mctz->rb_root.rb_node;
	struct rb_node *parent = NULL;
	struct mem_cgroup_per_zone *mz_node;

	if (mz->on_tree)
		return;

533 534 535
	mz->usage_in_excess = new_usage_in_excess;
	if (!mz->usage_in_excess)
		return;
536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551
	while (*p) {
		parent = *p;
		mz_node = rb_entry(parent, struct mem_cgroup_per_zone,
					tree_node);
		if (mz->usage_in_excess < mz_node->usage_in_excess)
			p = &(*p)->rb_left;
		/*
		 * We can't avoid mem cgroups that are over their soft
		 * limit by the same amount
		 */
		else if (mz->usage_in_excess >= mz_node->usage_in_excess)
			p = &(*p)->rb_right;
	}
	rb_link_node(&mz->tree_node, parent, p);
	rb_insert_color(&mz->tree_node, &mctz->rb_root);
	mz->on_tree = true;
552 553 554
}

static void
555
__mem_cgroup_remove_exceeded(struct mem_cgroup *memcg,
556 557 558 559 560 561 562 563 564
				struct mem_cgroup_per_zone *mz,
				struct mem_cgroup_tree_per_zone *mctz)
{
	if (!mz->on_tree)
		return;
	rb_erase(&mz->tree_node, &mctz->rb_root);
	mz->on_tree = false;
}

565
static void
566
mem_cgroup_remove_exceeded(struct mem_cgroup *memcg,
567 568 569 570
				struct mem_cgroup_per_zone *mz,
				struct mem_cgroup_tree_per_zone *mctz)
{
	spin_lock(&mctz->lock);
571
	__mem_cgroup_remove_exceeded(memcg, mz, mctz);
572 573 574 575
	spin_unlock(&mctz->lock);
}


576
static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
577
{
578
	unsigned long long excess;
579 580
	struct mem_cgroup_per_zone *mz;
	struct mem_cgroup_tree_per_zone *mctz;
581 582
	int nid = page_to_nid(page);
	int zid = page_zonenum(page);
583 584 585
	mctz = soft_limit_tree_from_page(page);

	/*
586 587
	 * Necessary to update all ancestors when hierarchy is used.
	 * because their event counter is not touched.
588
	 */
589 590 591
	for (; memcg; memcg = parent_mem_cgroup(memcg)) {
		mz = mem_cgroup_zoneinfo(memcg, nid, zid);
		excess = res_counter_soft_limit_excess(&memcg->res);
592 593 594 595
		/*
		 * We have to update the tree if mz is on RB-tree or
		 * mem is over its softlimit.
		 */
596
		if (excess || mz->on_tree) {
597 598 599
			spin_lock(&mctz->lock);
			/* if on-tree, remove it */
			if (mz->on_tree)
600
				__mem_cgroup_remove_exceeded(memcg, mz, mctz);
601
			/*
602 603
			 * Insert again. mz->usage_in_excess will be updated.
			 * If excess is 0, no tree ops.
604
			 */
605
			__mem_cgroup_insert_exceeded(memcg, mz, mctz, excess);
606 607
			spin_unlock(&mctz->lock);
		}
608 609 610
	}
}

611
static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
612 613 614 615 616
{
	int node, zone;
	struct mem_cgroup_per_zone *mz;
	struct mem_cgroup_tree_per_zone *mctz;

B
Bob Liu 已提交
617
	for_each_node(node) {
618
		for (zone = 0; zone < MAX_NR_ZONES; zone++) {
619
			mz = mem_cgroup_zoneinfo(memcg, node, zone);
620
			mctz = soft_limit_tree_node_zone(node, zone);
621
			mem_cgroup_remove_exceeded(memcg, mz, mctz);
622 623 624 625
		}
	}
}

626 627 628 629
static struct mem_cgroup_per_zone *
__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
{
	struct rb_node *rightmost = NULL;
630
	struct mem_cgroup_per_zone *mz;
631 632

retry:
633
	mz = NULL;
634 635 636 637 638 639 640 641 642 643
	rightmost = rb_last(&mctz->rb_root);
	if (!rightmost)
		goto done;		/* Nothing to reclaim from */

	mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node);
	/*
	 * Remove the node now but someone else can add it back,
	 * we will to add it back at the end of reclaim to its correct
	 * position in the tree.
	 */
644 645 646
	__mem_cgroup_remove_exceeded(mz->memcg, mz, mctz);
	if (!res_counter_soft_limit_excess(&mz->memcg->res) ||
		!css_tryget(&mz->memcg->css))
647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662
		goto retry;
done:
	return mz;
}

static struct mem_cgroup_per_zone *
mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
{
	struct mem_cgroup_per_zone *mz;

	spin_lock(&mctz->lock);
	mz = __mem_cgroup_largest_soft_limit_node(mctz);
	spin_unlock(&mctz->lock);
	return mz;
}

663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681
/*
 * Implementation Note: reading percpu statistics for memcg.
 *
 * Both of vmstat[] and percpu_counter has threshold and do periodic
 * synchronization to implement "quick" read. There are trade-off between
 * reading cost and precision of value. Then, we may have a chance to implement
 * a periodic synchronizion of counter in memcg's counter.
 *
 * But this _read() function is used for user interface now. The user accounts
 * memory usage by memory cgroup and he _always_ requires exact value because
 * he accounts memory. Even if we provide quick-and-fuzzy read, we always
 * have to visit all online cpus and make sum. So, for now, unnecessary
 * synchronization is not implemented. (just implemented for cpu hotplug)
 *
 * If there are kernel internal actions which can make use of some not-exact
 * value, and reading all cpu value can be performance bottleneck in some
 * common workload, threashold and synchonization as vmstat[] should be
 * implemented.
 */
682
static long mem_cgroup_read_stat(struct mem_cgroup *memcg,
683
				 enum mem_cgroup_stat_index idx)
684
{
685
	long val = 0;
686 687
	int cpu;

688 689
	get_online_cpus();
	for_each_online_cpu(cpu)
690
		val += per_cpu(memcg->stat->count[idx], cpu);
691
#ifdef CONFIG_HOTPLUG_CPU
692 693 694
	spin_lock(&memcg->pcp_counter_lock);
	val += memcg->nocpu_base.count[idx];
	spin_unlock(&memcg->pcp_counter_lock);
695 696
#endif
	put_online_cpus();
697 698 699
	return val;
}

700
static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg,
701 702 703
					 bool charge)
{
	int val = (charge) ? 1 : -1;
704
	this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_SWAP], val);
705 706
}

707
static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg,
708 709 710 711 712 713
					    enum mem_cgroup_events_index idx)
{
	unsigned long val = 0;
	int cpu;

	for_each_online_cpu(cpu)
714
		val += per_cpu(memcg->stat->events[idx], cpu);
715
#ifdef CONFIG_HOTPLUG_CPU
716 717 718
	spin_lock(&memcg->pcp_counter_lock);
	val += memcg->nocpu_base.events[idx];
	spin_unlock(&memcg->pcp_counter_lock);
719 720 721 722
#endif
	return val;
}

723
static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
724
					 bool anon, int nr_pages)
725
{
726 727
	preempt_disable();

728 729 730 731 732 733
	/*
	 * Here, RSS means 'mapped anon' and anon's SwapCache. Shmem/tmpfs is
	 * counted as CACHE even if it's on ANON LRU.
	 */
	if (anon)
		__this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS],
734
				nr_pages);
735
	else
736
		__this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE],
737
				nr_pages);
738

739 740
	/* pagein of a big page is an event. So, ignore page size */
	if (nr_pages > 0)
741
		__this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGIN]);
742
	else {
743
		__this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]);
744 745
		nr_pages = -nr_pages; /* for event */
	}
746

747
	__this_cpu_add(memcg->stat->nr_page_events, nr_pages);
748

749
	preempt_enable();
750 751
}

752
unsigned long
753
mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
754 755 756 757 758 759 760 761
{
	struct mem_cgroup_per_zone *mz;

	mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
	return mz->lru_size[lru];
}

static unsigned long
762
mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, int nid, int zid,
763
			unsigned int lru_mask)
764 765
{
	struct mem_cgroup_per_zone *mz;
H
Hugh Dickins 已提交
766
	enum lru_list lru;
767 768
	unsigned long ret = 0;

769
	mz = mem_cgroup_zoneinfo(memcg, nid, zid);
770

H
Hugh Dickins 已提交
771 772 773
	for_each_lru(lru) {
		if (BIT(lru) & lru_mask)
			ret += mz->lru_size[lru];
774 775 776 777 778
	}
	return ret;
}

static unsigned long
779
mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
780 781
			int nid, unsigned int lru_mask)
{
782 783 784
	u64 total = 0;
	int zid;

785
	for (zid = 0; zid < MAX_NR_ZONES; zid++)
786 787
		total += mem_cgroup_zone_nr_lru_pages(memcg,
						nid, zid, lru_mask);
788

789 790
	return total;
}
791

792
static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
793
			unsigned int lru_mask)
794
{
795
	int nid;
796 797
	u64 total = 0;

798
	for_each_node_state(nid, N_HIGH_MEMORY)
799
		total += mem_cgroup_node_nr_lru_pages(memcg, nid, lru_mask);
800
	return total;
801 802
}

803 804
static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
				       enum mem_cgroup_events_target target)
805 806 807
{
	unsigned long val, next;

808
	val = __this_cpu_read(memcg->stat->nr_page_events);
809
	next = __this_cpu_read(memcg->stat->targets[target]);
810
	/* from time_after() in jiffies.h */
811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826
	if ((long)next - (long)val < 0) {
		switch (target) {
		case MEM_CGROUP_TARGET_THRESH:
			next = val + THRESHOLDS_EVENTS_TARGET;
			break;
		case MEM_CGROUP_TARGET_SOFTLIMIT:
			next = val + SOFTLIMIT_EVENTS_TARGET;
			break;
		case MEM_CGROUP_TARGET_NUMAINFO:
			next = val + NUMAINFO_EVENTS_TARGET;
			break;
		default:
			break;
		}
		__this_cpu_write(memcg->stat->targets[target], next);
		return true;
827
	}
828
	return false;
829 830 831 832 833 834
}

/*
 * Check events in order.
 *
 */
835
static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
836
{
837
	preempt_disable();
838
	/* threshold event is triggered in finer grain than soft limit */
839 840
	if (unlikely(mem_cgroup_event_ratelimit(memcg,
						MEM_CGROUP_TARGET_THRESH))) {
841 842
		bool do_softlimit;
		bool do_numainfo __maybe_unused;
843 844 845 846 847 848 849 850 851

		do_softlimit = mem_cgroup_event_ratelimit(memcg,
						MEM_CGROUP_TARGET_SOFTLIMIT);
#if MAX_NUMNODES > 1
		do_numainfo = mem_cgroup_event_ratelimit(memcg,
						MEM_CGROUP_TARGET_NUMAINFO);
#endif
		preempt_enable();

852
		mem_cgroup_threshold(memcg);
853
		if (unlikely(do_softlimit))
854
			mem_cgroup_update_tree(memcg, page);
855
#if MAX_NUMNODES > 1
856
		if (unlikely(do_numainfo))
857
			atomic_inc(&memcg->numainfo_events);
858
#endif
859 860
	} else
		preempt_enable();
861 862
}

G
Glauber Costa 已提交
863
struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
B
Balbir Singh 已提交
864 865 866 867 868 869
{
	return container_of(cgroup_subsys_state(cont,
				mem_cgroup_subsys_id), struct mem_cgroup,
				css);
}

870
struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
871
{
872 873 874 875 876 877 878 879
	/*
	 * mm_update_next_owner() may clear mm->owner to NULL
	 * if it races with swapoff, page migration, etc.
	 * So this can be called with p == NULL.
	 */
	if (unlikely(!p))
		return NULL;

880 881 882 883
	return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
				struct mem_cgroup, css);
}

884
struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
885
{
886
	struct mem_cgroup *memcg = NULL;
887 888 889

	if (!mm)
		return NULL;
890 891 892 893 894 895 896
	/*
	 * Because we have no locks, mm->owner's may be being moved to other
	 * cgroup. We use css_tryget() here even if this looks
	 * pessimistic (rather than adding locks here).
	 */
	rcu_read_lock();
	do {
897 898
		memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
		if (unlikely(!memcg))
899
			break;
900
	} while (!css_tryget(&memcg->css));
901
	rcu_read_unlock();
902
	return memcg;
903 904
}

905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924
/**
 * mem_cgroup_iter - iterate over memory cgroup hierarchy
 * @root: hierarchy root
 * @prev: previously returned memcg, NULL on first invocation
 * @reclaim: cookie for shared reclaim walks, NULL for full walks
 *
 * Returns references to children of the hierarchy below @root, or
 * @root itself, or %NULL after a full round-trip.
 *
 * Caller must pass the return value in @prev on subsequent
 * invocations for reference counting, or use mem_cgroup_iter_break()
 * to cancel a hierarchy walk before the round-trip is complete.
 *
 * Reclaimers can specify a zone and a priority level in @reclaim to
 * divide up the memcgs in the hierarchy among all concurrent
 * reclaimers operating on the same zone and priority.
 */
struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
				   struct mem_cgroup *prev,
				   struct mem_cgroup_reclaim_cookie *reclaim)
K
KAMEZAWA Hiroyuki 已提交
925
{
926 927
	struct mem_cgroup *memcg = NULL;
	int id = 0;
928

929 930 931
	if (mem_cgroup_disabled())
		return NULL;

932 933
	if (!root)
		root = root_mem_cgroup;
K
KAMEZAWA Hiroyuki 已提交
934

935 936
	if (prev && !reclaim)
		id = css_id(&prev->css);
K
KAMEZAWA Hiroyuki 已提交
937

938 939
	if (prev && prev != root)
		css_put(&prev->css);
K
KAMEZAWA Hiroyuki 已提交
940

941 942 943 944 945
	if (!root->use_hierarchy && root != root_mem_cgroup) {
		if (prev)
			return NULL;
		return root;
	}
K
KAMEZAWA Hiroyuki 已提交
946

947
	while (!memcg) {
948
		struct mem_cgroup_reclaim_iter *uninitialized_var(iter);
949
		struct cgroup_subsys_state *css;
950

951 952 953 954 955 956 957 958 959 960 961
		if (reclaim) {
			int nid = zone_to_nid(reclaim->zone);
			int zid = zone_idx(reclaim->zone);
			struct mem_cgroup_per_zone *mz;

			mz = mem_cgroup_zoneinfo(root, nid, zid);
			iter = &mz->reclaim_iter[reclaim->priority];
			if (prev && reclaim->generation != iter->generation)
				return NULL;
			id = iter->position;
		}
K
KAMEZAWA Hiroyuki 已提交
962

963 964 965 966 967 968 969 970
		rcu_read_lock();
		css = css_get_next(&mem_cgroup_subsys, id + 1, &root->css, &id);
		if (css) {
			if (css == &root->css || css_tryget(css))
				memcg = container_of(css,
						     struct mem_cgroup, css);
		} else
			id = 0;
K
KAMEZAWA Hiroyuki 已提交
971 972
		rcu_read_unlock();

973 974 975 976 977 978 979
		if (reclaim) {
			iter->position = id;
			if (!css)
				iter->generation++;
			else if (!prev && memcg)
				reclaim->generation = iter->generation;
		}
980 981 982 983 984

		if (prev && !css)
			return NULL;
	}
	return memcg;
K
KAMEZAWA Hiroyuki 已提交
985
}
K
KAMEZAWA Hiroyuki 已提交
986

987 988 989 990 991 992 993
/**
 * mem_cgroup_iter_break - abort a hierarchy walk prematurely
 * @root: hierarchy root
 * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
 */
void mem_cgroup_iter_break(struct mem_cgroup *root,
			   struct mem_cgroup *prev)
994 995 996 997 998 999
{
	if (!root)
		root = root_mem_cgroup;
	if (prev && prev != root)
		css_put(&prev->css);
}
K
KAMEZAWA Hiroyuki 已提交
1000

1001 1002 1003 1004 1005 1006
/*
 * Iteration constructs for visiting all cgroups (under a tree).  If
 * loops are exited prematurely (break), mem_cgroup_iter_break() must
 * be used for reference counting.
 */
#define for_each_mem_cgroup_tree(iter, root)		\
1007
	for (iter = mem_cgroup_iter(root, NULL, NULL);	\
1008
	     iter != NULL;				\
1009
	     iter = mem_cgroup_iter(root, iter, NULL))
1010

1011
#define for_each_mem_cgroup(iter)			\
1012
	for (iter = mem_cgroup_iter(NULL, NULL, NULL);	\
1013
	     iter != NULL;				\
1014
	     iter = mem_cgroup_iter(NULL, iter, NULL))
K
KAMEZAWA Hiroyuki 已提交
1015

1016
static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
1017
{
1018
	return (memcg == root_mem_cgroup);
1019 1020
}

1021 1022
void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
{
1023
	struct mem_cgroup *memcg;
1024 1025 1026 1027 1028

	if (!mm)
		return;

	rcu_read_lock();
1029 1030
	memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
	if (unlikely(!memcg))
1031 1032 1033 1034
		goto out;

	switch (idx) {
	case PGFAULT:
1035 1036 1037 1038
		this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGFAULT]);
		break;
	case PGMAJFAULT:
		this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGMAJFAULT]);
1039 1040 1041 1042 1043 1044 1045 1046 1047
		break;
	default:
		BUG();
	}
out:
	rcu_read_unlock();
}
EXPORT_SYMBOL(mem_cgroup_count_vm_event);

1048 1049 1050
/**
 * mem_cgroup_zone_lruvec - get the lru list vector for a zone and memcg
 * @zone: zone of the wanted lruvec
1051
 * @memcg: memcg of the wanted lruvec
1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068
 *
 * Returns the lru list vector holding pages for the given @zone and
 * @mem.  This can be the global zone lruvec, if the memory controller
 * is disabled.
 */
struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
				      struct mem_cgroup *memcg)
{
	struct mem_cgroup_per_zone *mz;

	if (mem_cgroup_disabled())
		return &zone->lruvec;

	mz = mem_cgroup_zoneinfo(memcg, zone_to_nid(zone), zone_idx(zone));
	return &mz->lruvec;
}

K
KAMEZAWA Hiroyuki 已提交
1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081
/*
 * Following LRU functions are allowed to be used without PCG_LOCK.
 * Operations are called by routine of global LRU independently from memcg.
 * What we have to take care of here is validness of pc->mem_cgroup.
 *
 * Changes to pc->mem_cgroup happens when
 * 1. charge
 * 2. moving account
 * In typical case, "charge" is done before add-to-lru. Exception is SwapCache.
 * It is added to LRU before charge.
 * If PCG_USED bit is not set, page_cgroup is not added to this private LRU.
 * When moving account, the page is not on LRU. It's isolated.
 */
1082

1083
/**
1084
 * mem_cgroup_page_lruvec - return lruvec for adding an lru page
1085
 * @page: the page
1086
 * @zone: zone of the page
1087
 */
1088
struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone)
K
KAMEZAWA Hiroyuki 已提交
1089 1090
{
	struct mem_cgroup_per_zone *mz;
1091 1092
	struct mem_cgroup *memcg;
	struct page_cgroup *pc;
1093

1094
	if (mem_cgroup_disabled())
1095 1096
		return &zone->lruvec;

K
KAMEZAWA Hiroyuki 已提交
1097
	pc = lookup_page_cgroup(page);
1098
	memcg = pc->mem_cgroup;
1099 1100

	/*
1101
	 * Surreptitiously switch any uncharged offlist page to root:
1102 1103 1104 1105 1106 1107 1108
	 * an uncharged page off lru does nothing to secure
	 * its former mem_cgroup from sudden removal.
	 *
	 * Our caller holds lru_lock, and PageCgroupUsed is updated
	 * under page_cgroup lock: between them, they make all uses
	 * of pc->mem_cgroup safe.
	 */
1109
	if (!PageLRU(page) && !PageCgroupUsed(pc) && memcg != root_mem_cgroup)
1110 1111
		pc->mem_cgroup = memcg = root_mem_cgroup;

1112 1113
	mz = page_cgroup_zoneinfo(memcg, page);
	return &mz->lruvec;
K
KAMEZAWA Hiroyuki 已提交
1114
}
1115

1116
/**
1117 1118 1119 1120
 * mem_cgroup_update_lru_size - account for adding or removing an lru page
 * @lruvec: mem_cgroup per zone lru vector
 * @lru: index of lru list the page is sitting on
 * @nr_pages: positive when adding or negative when removing
1121
 *
1122 1123
 * This function must be called when a page is added to or removed from an
 * lru list.
1124
 */
1125 1126
void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
				int nr_pages)
1127 1128
{
	struct mem_cgroup_per_zone *mz;
1129
	unsigned long *lru_size;
1130 1131 1132 1133

	if (mem_cgroup_disabled())
		return;

1134 1135 1136 1137
	mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
	lru_size = mz->lru_size + lru;
	*lru_size += nr_pages;
	VM_BUG_ON((long)(*lru_size) < 0);
K
KAMEZAWA Hiroyuki 已提交
1138
}
1139

1140
/*
1141
 * Checks whether given mem is same or in the root_mem_cgroup's
1142 1143
 * hierarchy subtree
 */
1144 1145
bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
				  struct mem_cgroup *memcg)
1146
{
1147 1148
	if (root_memcg == memcg)
		return true;
1149
	if (!root_memcg->use_hierarchy || !memcg)
1150
		return false;
1151 1152 1153 1154 1155 1156 1157 1158
	return css_is_ancestor(&memcg->css, &root_memcg->css);
}

static bool mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
				       struct mem_cgroup *memcg)
{
	bool ret;

1159
	rcu_read_lock();
1160
	ret = __mem_cgroup_same_or_subtree(root_memcg, memcg);
1161 1162
	rcu_read_unlock();
	return ret;
1163 1164
}

1165
int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *memcg)
1166 1167
{
	int ret;
1168
	struct mem_cgroup *curr = NULL;
1169
	struct task_struct *p;
1170

1171
	p = find_lock_task_mm(task);
1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186
	if (p) {
		curr = try_get_mem_cgroup_from_mm(p->mm);
		task_unlock(p);
	} else {
		/*
		 * All threads may have already detached their mm's, but the oom
		 * killer still needs to detect if they have already been oom
		 * killed to prevent needlessly killing additional tasks.
		 */
		task_lock(task);
		curr = mem_cgroup_from_task(task);
		if (curr)
			css_get(&curr->css);
		task_unlock(task);
	}
1187 1188
	if (!curr)
		return 0;
1189
	/*
1190
	 * We should check use_hierarchy of "memcg" not "curr". Because checking
1191
	 * use_hierarchy of "curr" here make this function true if hierarchy is
1192 1193
	 * enabled in "curr" and "curr" is a child of "memcg" in *cgroup*
	 * hierarchy(even if use_hierarchy is disabled in "memcg").
1194
	 */
1195
	ret = mem_cgroup_same_or_subtree(memcg, curr);
1196
	css_put(&curr->css);
1197 1198 1199
	return ret;
}

1200
int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec)
1201
{
1202
	unsigned long inactive_ratio;
1203
	unsigned long inactive;
1204
	unsigned long active;
1205
	unsigned long gb;
1206

1207 1208
	inactive = mem_cgroup_get_lru_size(lruvec, LRU_INACTIVE_ANON);
	active = mem_cgroup_get_lru_size(lruvec, LRU_ACTIVE_ANON);
1209

1210 1211 1212 1213 1214 1215
	gb = (inactive + active) >> (30 - PAGE_SHIFT);
	if (gb)
		inactive_ratio = int_sqrt(10 * gb);
	else
		inactive_ratio = 1;

1216
	return inactive * inactive_ratio < active;
1217 1218
}

1219
int mem_cgroup_inactive_file_is_low(struct lruvec *lruvec)
1220 1221 1222 1223
{
	unsigned long active;
	unsigned long inactive;

1224 1225
	inactive = mem_cgroup_get_lru_size(lruvec, LRU_INACTIVE_FILE);
	active = mem_cgroup_get_lru_size(lruvec, LRU_ACTIVE_FILE);
1226 1227 1228 1229

	return (active > inactive);
}

1230 1231 1232
#define mem_cgroup_from_res_counter(counter, member)	\
	container_of(counter, struct mem_cgroup, member)

1233
/**
1234
 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
W
Wanpeng Li 已提交
1235
 * @memcg: the memory cgroup
1236
 *
1237
 * Returns the maximum amount of memory @mem can be charged with, in
1238
 * pages.
1239
 */
1240
static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1241
{
1242 1243
	unsigned long long margin;

1244
	margin = res_counter_margin(&memcg->res);
1245
	if (do_swap_account)
1246
		margin = min(margin, res_counter_margin(&memcg->memsw));
1247
	return margin >> PAGE_SHIFT;
1248 1249
}

1250
int mem_cgroup_swappiness(struct mem_cgroup *memcg)
K
KOSAKI Motohiro 已提交
1251 1252 1253 1254 1255 1256 1257
{
	struct cgroup *cgrp = memcg->css.cgroup;

	/* root ? */
	if (cgrp->parent == NULL)
		return vm_swappiness;

1258
	return memcg->swappiness;
K
KOSAKI Motohiro 已提交
1259 1260
}

1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274
/*
 * memcg->moving_account is used for checking possibility that some thread is
 * calling move_account(). When a thread on CPU-A starts moving pages under
 * a memcg, other threads should check memcg->moving_account under
 * rcu_read_lock(), like this:
 *
 *         CPU-A                                    CPU-B
 *                                              rcu_read_lock()
 *         memcg->moving_account+1              if (memcg->mocing_account)
 *                                                   take heavy locks.
 *         synchronize_rcu()                    update something.
 *                                              rcu_read_unlock()
 *         start move here.
 */
1275 1276 1277 1278

/* for quick checking without looking up memcg */
atomic_t memcg_moving __read_mostly;

1279
static void mem_cgroup_start_move(struct mem_cgroup *memcg)
1280
{
1281
	atomic_inc(&memcg_moving);
1282
	atomic_inc(&memcg->moving_account);
1283 1284 1285
	synchronize_rcu();
}

1286
static void mem_cgroup_end_move(struct mem_cgroup *memcg)
1287
{
1288 1289 1290 1291
	/*
	 * Now, mem_cgroup_clear_mc() may call this function with NULL.
	 * We check NULL in callee rather than caller.
	 */
1292 1293
	if (memcg) {
		atomic_dec(&memcg_moving);
1294
		atomic_dec(&memcg->moving_account);
1295
	}
1296
}
1297

1298 1299 1300
/*
 * 2 routines for checking "mem" is under move_account() or not.
 *
1301 1302
 * mem_cgroup_stolen() -  checking whether a cgroup is mc.from or not. This
 *			  is used for avoiding races in accounting.  If true,
1303 1304 1305 1306 1307 1308 1309
 *			  pc->mem_cgroup may be overwritten.
 *
 * mem_cgroup_under_move() - checking a cgroup is mc.from or mc.to or
 *			  under hierarchy of moving cgroups. This is for
 *			  waiting at hith-memory prressure caused by "move".
 */

1310
static bool mem_cgroup_stolen(struct mem_cgroup *memcg)
1311 1312
{
	VM_BUG_ON(!rcu_read_lock_held());
1313
	return atomic_read(&memcg->moving_account) > 0;
1314
}
1315

1316
static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
1317
{
1318 1319
	struct mem_cgroup *from;
	struct mem_cgroup *to;
1320
	bool ret = false;
1321 1322 1323 1324 1325 1326 1327 1328 1329
	/*
	 * Unlike task_move routines, we access mc.to, mc.from not under
	 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
	 */
	spin_lock(&mc.lock);
	from = mc.from;
	to = mc.to;
	if (!from)
		goto unlock;
1330

1331 1332
	ret = mem_cgroup_same_or_subtree(memcg, from)
		|| mem_cgroup_same_or_subtree(memcg, to);
1333 1334
unlock:
	spin_unlock(&mc.lock);
1335 1336 1337
	return ret;
}

1338
static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
1339 1340
{
	if (mc.moving_task && current != mc.moving_task) {
1341
		if (mem_cgroup_under_move(memcg)) {
1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353
			DEFINE_WAIT(wait);
			prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
			/* moving charge context might have finished. */
			if (mc.moving_task)
				schedule();
			finish_wait(&mc.waitq, &wait);
			return true;
		}
	}
	return false;
}

1354 1355 1356 1357
/*
 * Take this lock when
 * - a code tries to modify page's memcg while it's USED.
 * - a code tries to modify page state accounting in a memcg.
1358
 * see mem_cgroup_stolen(), too.
1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371
 */
static void move_lock_mem_cgroup(struct mem_cgroup *memcg,
				  unsigned long *flags)
{
	spin_lock_irqsave(&memcg->move_lock, *flags);
}

static void move_unlock_mem_cgroup(struct mem_cgroup *memcg,
				unsigned long *flags)
{
	spin_unlock_irqrestore(&memcg->move_lock, *flags);
}

1372
/**
1373
 * mem_cgroup_print_oom_info: Called from OOM with tasklist_lock held in read mode.
1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391
 * @memcg: The memory cgroup that went over limit
 * @p: Task that is going to be killed
 *
 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
 * enabled
 */
void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
{
	struct cgroup *task_cgrp;
	struct cgroup *mem_cgrp;
	/*
	 * Need a buffer in BSS, can't rely on allocations. The code relies
	 * on the assumption that OOM is serialized for memory controller.
	 * If this assumption is broken, revisit this code.
	 */
	static char memcg_name[PATH_MAX];
	int ret;

1392
	if (!memcg || !p)
1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437
		return;

	rcu_read_lock();

	mem_cgrp = memcg->css.cgroup;
	task_cgrp = task_cgroup(p, mem_cgroup_subsys_id);

	ret = cgroup_path(task_cgrp, memcg_name, PATH_MAX);
	if (ret < 0) {
		/*
		 * Unfortunately, we are unable to convert to a useful name
		 * But we'll still print out the usage information
		 */
		rcu_read_unlock();
		goto done;
	}
	rcu_read_unlock();

	printk(KERN_INFO "Task in %s killed", memcg_name);

	rcu_read_lock();
	ret = cgroup_path(mem_cgrp, memcg_name, PATH_MAX);
	if (ret < 0) {
		rcu_read_unlock();
		goto done;
	}
	rcu_read_unlock();

	/*
	 * Continues from above, so we don't need an KERN_ level
	 */
	printk(KERN_CONT " as a result of limit of %s\n", memcg_name);
done:

	printk(KERN_INFO "memory: usage %llukB, limit %llukB, failcnt %llu\n",
		res_counter_read_u64(&memcg->res, RES_USAGE) >> 10,
		res_counter_read_u64(&memcg->res, RES_LIMIT) >> 10,
		res_counter_read_u64(&memcg->res, RES_FAILCNT));
	printk(KERN_INFO "memory+swap: usage %llukB, limit %llukB, "
		"failcnt %llu\n",
		res_counter_read_u64(&memcg->memsw, RES_USAGE) >> 10,
		res_counter_read_u64(&memcg->memsw, RES_LIMIT) >> 10,
		res_counter_read_u64(&memcg->memsw, RES_FAILCNT));
}

1438 1439 1440 1441
/*
 * This function returns the number of memcg under hierarchy tree. Returns
 * 1(self count) if no children.
 */
1442
static int mem_cgroup_count_children(struct mem_cgroup *memcg)
1443 1444
{
	int num = 0;
K
KAMEZAWA Hiroyuki 已提交
1445 1446
	struct mem_cgroup *iter;

1447
	for_each_mem_cgroup_tree(iter, memcg)
K
KAMEZAWA Hiroyuki 已提交
1448
		num++;
1449 1450 1451
	return num;
}

D
David Rientjes 已提交
1452 1453 1454
/*
 * Return the memory (and swap, if configured) limit for a memcg.
 */
1455
static u64 mem_cgroup_get_limit(struct mem_cgroup *memcg)
D
David Rientjes 已提交
1456 1457 1458 1459
{
	u64 limit;
	u64 memsw;

1460 1461 1462
	limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
	limit += total_swap_pages << PAGE_SHIFT;

D
David Rientjes 已提交
1463 1464 1465 1466 1467 1468 1469 1470
	memsw = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
	/*
	 * If memsw is finite and limits the amount of swap space available
	 * to this memcg, return that limit.
	 */
	return min(limit, memsw);
}

1471 1472
void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
			      int order)
1473 1474 1475 1476 1477 1478 1479
{
	struct mem_cgroup *iter;
	unsigned long chosen_points = 0;
	unsigned long totalpages;
	unsigned int points = 0;
	struct task_struct *chosen = NULL;

1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490
	/*
	 * If current has a pending SIGKILL, then automatically select it.  The
	 * goal is to allow it to allocate so that it may quickly exit and free
	 * its memory.
	 */
	if (fatal_signal_pending(current)) {
		set_thread_flag(TIF_MEMDIE);
		return;
	}

	check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, order, NULL);
1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537
	totalpages = mem_cgroup_get_limit(memcg) >> PAGE_SHIFT ? : 1;
	for_each_mem_cgroup_tree(iter, memcg) {
		struct cgroup *cgroup = iter->css.cgroup;
		struct cgroup_iter it;
		struct task_struct *task;

		cgroup_iter_start(cgroup, &it);
		while ((task = cgroup_iter_next(cgroup, &it))) {
			switch (oom_scan_process_thread(task, totalpages, NULL,
							false)) {
			case OOM_SCAN_SELECT:
				if (chosen)
					put_task_struct(chosen);
				chosen = task;
				chosen_points = ULONG_MAX;
				get_task_struct(chosen);
				/* fall through */
			case OOM_SCAN_CONTINUE:
				continue;
			case OOM_SCAN_ABORT:
				cgroup_iter_end(cgroup, &it);
				mem_cgroup_iter_break(memcg, iter);
				if (chosen)
					put_task_struct(chosen);
				return;
			case OOM_SCAN_OK:
				break;
			};
			points = oom_badness(task, memcg, NULL, totalpages);
			if (points > chosen_points) {
				if (chosen)
					put_task_struct(chosen);
				chosen = task;
				chosen_points = points;
				get_task_struct(chosen);
			}
		}
		cgroup_iter_end(cgroup, &it);
	}

	if (!chosen)
		return;
	points = chosen_points * 1000 / totalpages;
	oom_kill_process(chosen, gfp_mask, order, points, totalpages, memcg,
			 NULL, "Memory cgroup out of memory");
}

1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573
static unsigned long mem_cgroup_reclaim(struct mem_cgroup *memcg,
					gfp_t gfp_mask,
					unsigned long flags)
{
	unsigned long total = 0;
	bool noswap = false;
	int loop;

	if (flags & MEM_CGROUP_RECLAIM_NOSWAP)
		noswap = true;
	if (!(flags & MEM_CGROUP_RECLAIM_SHRINK) && memcg->memsw_is_minimum)
		noswap = true;

	for (loop = 0; loop < MEM_CGROUP_MAX_RECLAIM_LOOPS; loop++) {
		if (loop)
			drain_all_stock_async(memcg);
		total += try_to_free_mem_cgroup_pages(memcg, gfp_mask, noswap);
		/*
		 * Allow limit shrinkers, which are triggered directly
		 * by userspace, to catch signals and stop reclaim
		 * after minimal progress, regardless of the margin.
		 */
		if (total && (flags & MEM_CGROUP_RECLAIM_SHRINK))
			break;
		if (mem_cgroup_margin(memcg))
			break;
		/*
		 * If nothing was reclaimed after two attempts, there
		 * may be no reclaimable pages in this hierarchy.
		 */
		if (loop && !total)
			break;
	}
	return total;
}

1574 1575
/**
 * test_mem_cgroup_node_reclaimable
W
Wanpeng Li 已提交
1576
 * @memcg: the target memcg
1577 1578 1579 1580 1581 1582 1583
 * @nid: the node ID to be checked.
 * @noswap : specify true here if the user wants flle only information.
 *
 * This function returns whether the specified memcg contains any
 * reclaimable pages on a node. Returns true if there are any reclaimable
 * pages in the node.
 */
1584
static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg,
1585 1586
		int nid, bool noswap)
{
1587
	if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_FILE))
1588 1589 1590
		return true;
	if (noswap || !total_swap_pages)
		return false;
1591
	if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_ANON))
1592 1593 1594 1595
		return true;
	return false;

}
1596 1597 1598 1599 1600 1601 1602 1603
#if MAX_NUMNODES > 1

/*
 * Always updating the nodemask is not very good - even if we have an empty
 * list or the wrong list here, we can start from some node and traverse all
 * nodes based on the zonelist. So update the list loosely once per 10 secs.
 *
 */
1604
static void mem_cgroup_may_update_nodemask(struct mem_cgroup *memcg)
1605 1606
{
	int nid;
1607 1608 1609 1610
	/*
	 * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET
	 * pagein/pageout changes since the last update.
	 */
1611
	if (!atomic_read(&memcg->numainfo_events))
1612
		return;
1613
	if (atomic_inc_return(&memcg->numainfo_updating) > 1)
1614 1615 1616
		return;

	/* make a nodemask where this memcg uses memory from */
1617
	memcg->scan_nodes = node_states[N_HIGH_MEMORY];
1618 1619 1620

	for_each_node_mask(nid, node_states[N_HIGH_MEMORY]) {

1621 1622
		if (!test_mem_cgroup_node_reclaimable(memcg, nid, false))
			node_clear(nid, memcg->scan_nodes);
1623
	}
1624

1625 1626
	atomic_set(&memcg->numainfo_events, 0);
	atomic_set(&memcg->numainfo_updating, 0);
1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640
}

/*
 * Selecting a node where we start reclaim from. Because what we need is just
 * reducing usage counter, start from anywhere is O,K. Considering
 * memory reclaim from current node, there are pros. and cons.
 *
 * Freeing memory from current node means freeing memory from a node which
 * we'll use or we've used. So, it may make LRU bad. And if several threads
 * hit limits, it will see a contention on a node. But freeing from remote
 * node means more costs for memory reclaim because of memory latency.
 *
 * Now, we use round-robin. Better algorithm is welcomed.
 */
1641
int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
1642 1643 1644
{
	int node;

1645 1646
	mem_cgroup_may_update_nodemask(memcg);
	node = memcg->last_scanned_node;
1647

1648
	node = next_node(node, memcg->scan_nodes);
1649
	if (node == MAX_NUMNODES)
1650
		node = first_node(memcg->scan_nodes);
1651 1652 1653 1654 1655 1656 1657 1658 1659
	/*
	 * We call this when we hit limit, not when pages are added to LRU.
	 * No LRU may hold pages because all pages are UNEVICTABLE or
	 * memcg is too small and all pages are not on LRU. In that case,
	 * we use curret node.
	 */
	if (unlikely(node == MAX_NUMNODES))
		node = numa_node_id();

1660
	memcg->last_scanned_node = node;
1661 1662 1663
	return node;
}

1664 1665 1666 1667 1668 1669
/*
 * Check all nodes whether it contains reclaimable pages or not.
 * For quick scan, we make use of scan_nodes. This will allow us to skip
 * unused nodes. But scan_nodes is lazily updated and may not cotain
 * enough new information. We need to do double check.
 */
1670
static bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
1671 1672 1673 1674 1675 1676 1677
{
	int nid;

	/*
	 * quick check...making use of scan_node.
	 * We can skip unused nodes.
	 */
1678 1679
	if (!nodes_empty(memcg->scan_nodes)) {
		for (nid = first_node(memcg->scan_nodes);
1680
		     nid < MAX_NUMNODES;
1681
		     nid = next_node(nid, memcg->scan_nodes)) {
1682

1683
			if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap))
1684 1685 1686 1687 1688 1689 1690
				return true;
		}
	}
	/*
	 * Check rest of nodes.
	 */
	for_each_node_state(nid, N_HIGH_MEMORY) {
1691
		if (node_isset(nid, memcg->scan_nodes))
1692
			continue;
1693
		if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap))
1694 1695 1696 1697 1698
			return true;
	}
	return false;
}

1699
#else
1700
int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
1701 1702 1703
{
	return 0;
}
1704

1705
static bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
1706
{
1707
	return test_mem_cgroup_node_reclaimable(memcg, 0, noswap);
1708
}
1709 1710
#endif

1711 1712 1713 1714
static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
				   struct zone *zone,
				   gfp_t gfp_mask,
				   unsigned long *total_scanned)
1715
{
1716
	struct mem_cgroup *victim = NULL;
1717
	int total = 0;
K
KAMEZAWA Hiroyuki 已提交
1718
	int loop = 0;
1719
	unsigned long excess;
1720
	unsigned long nr_scanned;
1721 1722 1723 1724
	struct mem_cgroup_reclaim_cookie reclaim = {
		.zone = zone,
		.priority = 0,
	};
1725

1726
	excess = res_counter_soft_limit_excess(&root_memcg->res) >> PAGE_SHIFT;
K
KAMEZAWA Hiroyuki 已提交
1727

1728
	while (1) {
1729
		victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
1730
		if (!victim) {
K
KAMEZAWA Hiroyuki 已提交
1731
			loop++;
1732 1733 1734 1735 1736 1737
			if (loop >= 2) {
				/*
				 * If we have not been able to reclaim
				 * anything, it might because there are
				 * no reclaimable pages under this hierarchy
				 */
1738
				if (!total)
1739 1740
					break;
				/*
L
Lucas De Marchi 已提交
1741
				 * We want to do more targeted reclaim.
1742 1743 1744 1745 1746
				 * excess >> 2 is not to excessive so as to
				 * reclaim too much, nor too less that we keep
				 * coming back to reclaim from this cgroup
				 */
				if (total >= (excess >> 2) ||
1747
					(loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
1748 1749
					break;
			}
1750
			continue;
1751
		}
1752
		if (!mem_cgroup_reclaimable(victim, false))
1753
			continue;
1754 1755 1756 1757
		total += mem_cgroup_shrink_node_zone(victim, gfp_mask, false,
						     zone, &nr_scanned);
		*total_scanned += nr_scanned;
		if (!res_counter_soft_limit_excess(&root_memcg->res))
1758
			break;
1759
	}
1760
	mem_cgroup_iter_break(root_memcg, victim);
K
KAMEZAWA Hiroyuki 已提交
1761
	return total;
1762 1763
}

K
KAMEZAWA Hiroyuki 已提交
1764 1765 1766
/*
 * Check OOM-Killer is already running under our hierarchy.
 * If someone is running, return false.
1767
 * Has to be called with memcg_oom_lock
K
KAMEZAWA Hiroyuki 已提交
1768
 */
1769
static bool mem_cgroup_oom_lock(struct mem_cgroup *memcg)
K
KAMEZAWA Hiroyuki 已提交
1770
{
1771
	struct mem_cgroup *iter, *failed = NULL;
1772

1773
	for_each_mem_cgroup_tree(iter, memcg) {
1774
		if (iter->oom_lock) {
1775 1776 1777 1778 1779
			/*
			 * this subtree of our hierarchy is already locked
			 * so we cannot give a lock.
			 */
			failed = iter;
1780 1781
			mem_cgroup_iter_break(memcg, iter);
			break;
1782 1783
		} else
			iter->oom_lock = true;
K
KAMEZAWA Hiroyuki 已提交
1784
	}
K
KAMEZAWA Hiroyuki 已提交
1785

1786
	if (!failed)
1787
		return true;
1788 1789 1790 1791 1792

	/*
	 * OK, we failed to lock the whole subtree so we have to clean up
	 * what we set up to the failing subtree
	 */
1793
	for_each_mem_cgroup_tree(iter, memcg) {
1794
		if (iter == failed) {
1795 1796
			mem_cgroup_iter_break(memcg, iter);
			break;
1797 1798 1799
		}
		iter->oom_lock = false;
	}
1800
	return false;
1801
}
1802

1803
/*
1804
 * Has to be called with memcg_oom_lock
1805
 */
1806
static int mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
1807
{
K
KAMEZAWA Hiroyuki 已提交
1808 1809
	struct mem_cgroup *iter;

1810
	for_each_mem_cgroup_tree(iter, memcg)
1811 1812 1813 1814
		iter->oom_lock = false;
	return 0;
}

1815
static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
1816 1817 1818
{
	struct mem_cgroup *iter;

1819
	for_each_mem_cgroup_tree(iter, memcg)
1820 1821 1822
		atomic_inc(&iter->under_oom);
}

1823
static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
1824 1825 1826
{
	struct mem_cgroup *iter;

K
KAMEZAWA Hiroyuki 已提交
1827 1828 1829 1830 1831
	/*
	 * When a new child is created while the hierarchy is under oom,
	 * mem_cgroup_oom_lock() may not be called. We have to use
	 * atomic_add_unless() here.
	 */
1832
	for_each_mem_cgroup_tree(iter, memcg)
1833
		atomic_add_unless(&iter->under_oom, -1, 0);
1834 1835
}

1836
static DEFINE_SPINLOCK(memcg_oom_lock);
K
KAMEZAWA Hiroyuki 已提交
1837 1838
static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);

K
KAMEZAWA Hiroyuki 已提交
1839
struct oom_wait_info {
1840
	struct mem_cgroup *memcg;
K
KAMEZAWA Hiroyuki 已提交
1841 1842 1843 1844 1845 1846
	wait_queue_t	wait;
};

static int memcg_oom_wake_function(wait_queue_t *wait,
	unsigned mode, int sync, void *arg)
{
1847 1848
	struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
	struct mem_cgroup *oom_wait_memcg;
K
KAMEZAWA Hiroyuki 已提交
1849 1850 1851
	struct oom_wait_info *oom_wait_info;

	oom_wait_info = container_of(wait, struct oom_wait_info, wait);
1852
	oom_wait_memcg = oom_wait_info->memcg;
K
KAMEZAWA Hiroyuki 已提交
1853 1854

	/*
1855
	 * Both of oom_wait_info->memcg and wake_memcg are stable under us.
K
KAMEZAWA Hiroyuki 已提交
1856 1857
	 * Then we can use css_is_ancestor without taking care of RCU.
	 */
1858 1859
	if (!mem_cgroup_same_or_subtree(oom_wait_memcg, wake_memcg)
		&& !mem_cgroup_same_or_subtree(wake_memcg, oom_wait_memcg))
K
KAMEZAWA Hiroyuki 已提交
1860 1861 1862 1863
		return 0;
	return autoremove_wake_function(wait, mode, sync, arg);
}

1864
static void memcg_wakeup_oom(struct mem_cgroup *memcg)
K
KAMEZAWA Hiroyuki 已提交
1865
{
1866 1867
	/* for filtering, pass "memcg" as argument. */
	__wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
K
KAMEZAWA Hiroyuki 已提交
1868 1869
}

1870
static void memcg_oom_recover(struct mem_cgroup *memcg)
1871
{
1872 1873
	if (memcg && atomic_read(&memcg->under_oom))
		memcg_wakeup_oom(memcg);
1874 1875
}

K
KAMEZAWA Hiroyuki 已提交
1876 1877 1878
/*
 * try to call OOM killer. returns false if we should exit memory-reclaim loop.
 */
1879 1880
static bool mem_cgroup_handle_oom(struct mem_cgroup *memcg, gfp_t mask,
				  int order)
1881
{
K
KAMEZAWA Hiroyuki 已提交
1882
	struct oom_wait_info owait;
1883
	bool locked, need_to_kill;
K
KAMEZAWA Hiroyuki 已提交
1884

1885
	owait.memcg = memcg;
K
KAMEZAWA Hiroyuki 已提交
1886 1887 1888 1889
	owait.wait.flags = 0;
	owait.wait.func = memcg_oom_wake_function;
	owait.wait.private = current;
	INIT_LIST_HEAD(&owait.wait.task_list);
1890
	need_to_kill = true;
1891
	mem_cgroup_mark_under_oom(memcg);
1892

1893
	/* At first, try to OOM lock hierarchy under memcg.*/
1894
	spin_lock(&memcg_oom_lock);
1895
	locked = mem_cgroup_oom_lock(memcg);
K
KAMEZAWA Hiroyuki 已提交
1896 1897 1898 1899 1900
	/*
	 * Even if signal_pending(), we can't quit charge() loop without
	 * accounting. So, UNINTERRUPTIBLE is appropriate. But SIGKILL
	 * under OOM is always welcomed, use TASK_KILLABLE here.
	 */
1901
	prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
1902
	if (!locked || memcg->oom_kill_disable)
1903 1904
		need_to_kill = false;
	if (locked)
1905
		mem_cgroup_oom_notify(memcg);
1906
	spin_unlock(&memcg_oom_lock);
K
KAMEZAWA Hiroyuki 已提交
1907

1908 1909
	if (need_to_kill) {
		finish_wait(&memcg_oom_waitq, &owait.wait);
1910
		mem_cgroup_out_of_memory(memcg, mask, order);
1911
	} else {
K
KAMEZAWA Hiroyuki 已提交
1912
		schedule();
K
KAMEZAWA Hiroyuki 已提交
1913
		finish_wait(&memcg_oom_waitq, &owait.wait);
K
KAMEZAWA Hiroyuki 已提交
1914
	}
1915
	spin_lock(&memcg_oom_lock);
1916
	if (locked)
1917 1918
		mem_cgroup_oom_unlock(memcg);
	memcg_wakeup_oom(memcg);
1919
	spin_unlock(&memcg_oom_lock);
K
KAMEZAWA Hiroyuki 已提交
1920

1921
	mem_cgroup_unmark_under_oom(memcg);
1922

K
KAMEZAWA Hiroyuki 已提交
1923 1924 1925
	if (test_thread_flag(TIF_MEMDIE) || fatal_signal_pending(current))
		return false;
	/* Give chance to dying process */
1926
	schedule_timeout_uninterruptible(1);
K
KAMEZAWA Hiroyuki 已提交
1927
	return true;
1928 1929
}

1930 1931 1932
/*
 * Currently used to update mapped file statistics, but the routine can be
 * generalized to update other statistics as well.
1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949
 *
 * Notes: Race condition
 *
 * We usually use page_cgroup_lock() for accessing page_cgroup member but
 * it tends to be costly. But considering some conditions, we doesn't need
 * to do so _always_.
 *
 * Considering "charge", lock_page_cgroup() is not required because all
 * file-stat operations happen after a page is attached to radix-tree. There
 * are no race with "charge".
 *
 * Considering "uncharge", we know that memcg doesn't clear pc->mem_cgroup
 * at "uncharge" intentionally. So, we always see valid pc->mem_cgroup even
 * if there are race with "uncharge". Statistics itself is properly handled
 * by flags.
 *
 * Considering "move", this is an only case we see a race. To make the race
1950 1951
 * small, we check mm->moving_account and detect there are possibility of race
 * If there is, we take a lock.
1952
 */
1953

1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966
void __mem_cgroup_begin_update_page_stat(struct page *page,
				bool *locked, unsigned long *flags)
{
	struct mem_cgroup *memcg;
	struct page_cgroup *pc;

	pc = lookup_page_cgroup(page);
again:
	memcg = pc->mem_cgroup;
	if (unlikely(!memcg || !PageCgroupUsed(pc)))
		return;
	/*
	 * If this memory cgroup is not under account moving, we don't
1967
	 * need to take move_lock_mem_cgroup(). Because we already hold
1968
	 * rcu_read_lock(), any calls to move_account will be delayed until
1969
	 * rcu_read_unlock() if mem_cgroup_stolen() == true.
1970
	 */
1971
	if (!mem_cgroup_stolen(memcg))
1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988
		return;

	move_lock_mem_cgroup(memcg, flags);
	if (memcg != pc->mem_cgroup || !PageCgroupUsed(pc)) {
		move_unlock_mem_cgroup(memcg, flags);
		goto again;
	}
	*locked = true;
}

void __mem_cgroup_end_update_page_stat(struct page *page, unsigned long *flags)
{
	struct page_cgroup *pc = lookup_page_cgroup(page);

	/*
	 * It's guaranteed that pc->mem_cgroup never changes while
	 * lock is held because a routine modifies pc->mem_cgroup
1989
	 * should take move_lock_mem_cgroup().
1990 1991 1992 1993
	 */
	move_unlock_mem_cgroup(pc->mem_cgroup, flags);
}

1994 1995
void mem_cgroup_update_page_stat(struct page *page,
				 enum mem_cgroup_page_stat_item idx, int val)
1996
{
1997
	struct mem_cgroup *memcg;
1998
	struct page_cgroup *pc = lookup_page_cgroup(page);
1999
	unsigned long uninitialized_var(flags);
2000

2001
	if (mem_cgroup_disabled())
2002
		return;
2003

2004 2005
	memcg = pc->mem_cgroup;
	if (unlikely(!memcg || !PageCgroupUsed(pc)))
2006
		return;
2007 2008

	switch (idx) {
2009 2010
	case MEMCG_NR_FILE_MAPPED:
		idx = MEM_CGROUP_STAT_FILE_MAPPED;
2011 2012 2013
		break;
	default:
		BUG();
2014
	}
2015

2016
	this_cpu_add(memcg->stat->count[idx], val);
2017
}
2018

2019 2020 2021 2022
/*
 * size of first charge trial. "32" comes from vmscan.c's magic value.
 * TODO: maybe necessary to use big numbers in big irons.
 */
2023
#define CHARGE_BATCH	32U
2024 2025
struct memcg_stock_pcp {
	struct mem_cgroup *cached; /* this never be root cgroup */
2026
	unsigned int nr_pages;
2027
	struct work_struct work;
2028
	unsigned long flags;
2029
#define FLUSHING_CACHED_CHARGE	0
2030 2031
};
static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
2032
static DEFINE_MUTEX(percpu_charge_mutex);
2033 2034

/*
2035
 * Try to consume stocked charge on this cpu. If success, one page is consumed
2036 2037 2038 2039
 * from local stock and true is returned. If the stock is 0 or charges from a
 * cgroup which is not current target, returns false. This stock will be
 * refilled.
 */
2040
static bool consume_stock(struct mem_cgroup *memcg)
2041 2042 2043 2044 2045
{
	struct memcg_stock_pcp *stock;
	bool ret = true;

	stock = &get_cpu_var(memcg_stock);
2046
	if (memcg == stock->cached && stock->nr_pages)
2047
		stock->nr_pages--;
2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060
	else /* need to call res_counter_charge */
		ret = false;
	put_cpu_var(memcg_stock);
	return ret;
}

/*
 * Returns stocks cached in percpu to res_counter and reset cached information.
 */
static void drain_stock(struct memcg_stock_pcp *stock)
{
	struct mem_cgroup *old = stock->cached;

2061 2062 2063 2064
	if (stock->nr_pages) {
		unsigned long bytes = stock->nr_pages * PAGE_SIZE;

		res_counter_uncharge(&old->res, bytes);
2065
		if (do_swap_account)
2066 2067
			res_counter_uncharge(&old->memsw, bytes);
		stock->nr_pages = 0;
2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079
	}
	stock->cached = NULL;
}

/*
 * This must be called under preempt disabled or must be called by
 * a thread which is pinned to local cpu.
 */
static void drain_local_stock(struct work_struct *dummy)
{
	struct memcg_stock_pcp *stock = &__get_cpu_var(memcg_stock);
	drain_stock(stock);
2080
	clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
2081 2082 2083 2084
}

/*
 * Cache charges(val) which is from res_counter, to local per_cpu area.
2085
 * This will be consumed by consume_stock() function, later.
2086
 */
2087
static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2088 2089 2090
{
	struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock);

2091
	if (stock->cached != memcg) { /* reset if necessary */
2092
		drain_stock(stock);
2093
		stock->cached = memcg;
2094
	}
2095
	stock->nr_pages += nr_pages;
2096 2097 2098 2099
	put_cpu_var(memcg_stock);
}

/*
2100
 * Drains all per-CPU charge caches for given root_memcg resp. subtree
2101 2102
 * of the hierarchy under it. sync flag says whether we should block
 * until the work is done.
2103
 */
2104
static void drain_all_stock(struct mem_cgroup *root_memcg, bool sync)
2105
{
2106
	int cpu, curcpu;
2107

2108 2109
	/* Notify other cpus that system-wide "drain" is running */
	get_online_cpus();
2110
	curcpu = get_cpu();
2111 2112
	for_each_online_cpu(cpu) {
		struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
2113
		struct mem_cgroup *memcg;
2114

2115 2116
		memcg = stock->cached;
		if (!memcg || !stock->nr_pages)
2117
			continue;
2118
		if (!mem_cgroup_same_or_subtree(root_memcg, memcg))
2119
			continue;
2120 2121 2122 2123 2124 2125
		if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
			if (cpu == curcpu)
				drain_local_stock(&stock->work);
			else
				schedule_work_on(cpu, &stock->work);
		}
2126
	}
2127
	put_cpu();
2128 2129 2130 2131 2132 2133

	if (!sync)
		goto out;

	for_each_online_cpu(cpu) {
		struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
2134
		if (test_bit(FLUSHING_CACHED_CHARGE, &stock->flags))
2135 2136 2137
			flush_work(&stock->work);
	}
out:
2138
 	put_online_cpus();
2139 2140 2141 2142 2143 2144 2145 2146
}

/*
 * Tries to drain stocked charges in other cpus. This function is asynchronous
 * and just put a work per cpu for draining localy on each cpu. Caller can
 * expects some charges will be back to res_counter later but cannot wait for
 * it.
 */
2147
static void drain_all_stock_async(struct mem_cgroup *root_memcg)
2148
{
2149 2150 2151 2152 2153
	/*
	 * If someone calls draining, avoid adding more kworker runs.
	 */
	if (!mutex_trylock(&percpu_charge_mutex))
		return;
2154
	drain_all_stock(root_memcg, false);
2155
	mutex_unlock(&percpu_charge_mutex);
2156 2157 2158
}

/* This is a synchronous drain interface. */
2159
static void drain_all_stock_sync(struct mem_cgroup *root_memcg)
2160 2161
{
	/* called when force_empty is called */
2162
	mutex_lock(&percpu_charge_mutex);
2163
	drain_all_stock(root_memcg, true);
2164
	mutex_unlock(&percpu_charge_mutex);
2165 2166
}

2167 2168 2169 2170
/*
 * This function drains percpu counter value from DEAD cpu and
 * move it to local cpu. Note that this function can be preempted.
 */
2171
static void mem_cgroup_drain_pcp_counter(struct mem_cgroup *memcg, int cpu)
2172 2173 2174
{
	int i;

2175
	spin_lock(&memcg->pcp_counter_lock);
2176
	for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
2177
		long x = per_cpu(memcg->stat->count[i], cpu);
2178

2179 2180
		per_cpu(memcg->stat->count[i], cpu) = 0;
		memcg->nocpu_base.count[i] += x;
2181
	}
2182
	for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
2183
		unsigned long x = per_cpu(memcg->stat->events[i], cpu);
2184

2185 2186
		per_cpu(memcg->stat->events[i], cpu) = 0;
		memcg->nocpu_base.events[i] += x;
2187
	}
2188
	spin_unlock(&memcg->pcp_counter_lock);
2189 2190 2191
}

static int __cpuinit memcg_cpu_hotplug_callback(struct notifier_block *nb,
2192 2193 2194 2195 2196
					unsigned long action,
					void *hcpu)
{
	int cpu = (unsigned long)hcpu;
	struct memcg_stock_pcp *stock;
2197
	struct mem_cgroup *iter;
2198

2199
	if (action == CPU_ONLINE)
2200 2201
		return NOTIFY_OK;

2202
	if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
2203
		return NOTIFY_OK;
2204

2205
	for_each_mem_cgroup(iter)
2206 2207
		mem_cgroup_drain_pcp_counter(iter, cpu);

2208 2209 2210 2211 2212
	stock = &per_cpu(memcg_stock, cpu);
	drain_stock(stock);
	return NOTIFY_OK;
}

2213 2214 2215 2216 2217 2218 2219 2220 2221 2222

/* See __mem_cgroup_try_charge() for details */
enum {
	CHARGE_OK,		/* success */
	CHARGE_RETRY,		/* need to retry but retry is not bad */
	CHARGE_NOMEM,		/* we can't do more. return -ENOMEM */
	CHARGE_WOULDBLOCK,	/* GFP_WAIT wasn't set and no enough res. */
	CHARGE_OOM_DIE,		/* the current is killed because of OOM */
};

2223
static int mem_cgroup_do_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
2224
				unsigned int nr_pages, bool oom_check)
2225
{
2226
	unsigned long csize = nr_pages * PAGE_SIZE;
2227 2228 2229 2230 2231
	struct mem_cgroup *mem_over_limit;
	struct res_counter *fail_res;
	unsigned long flags = 0;
	int ret;

2232
	ret = res_counter_charge(&memcg->res, csize, &fail_res);
2233 2234 2235 2236

	if (likely(!ret)) {
		if (!do_swap_account)
			return CHARGE_OK;
2237
		ret = res_counter_charge(&memcg->memsw, csize, &fail_res);
2238 2239 2240
		if (likely(!ret))
			return CHARGE_OK;

2241
		res_counter_uncharge(&memcg->res, csize);
2242 2243 2244 2245
		mem_over_limit = mem_cgroup_from_res_counter(fail_res, memsw);
		flags |= MEM_CGROUP_RECLAIM_NOSWAP;
	} else
		mem_over_limit = mem_cgroup_from_res_counter(fail_res, res);
2246
	/*
2247 2248
	 * nr_pages can be either a huge page (HPAGE_PMD_NR), a batch
	 * of regular pages (CHARGE_BATCH), or a single regular page (1).
2249 2250 2251 2252
	 *
	 * Never reclaim on behalf of optional batching, retry with a
	 * single page instead.
	 */
2253
	if (nr_pages == CHARGE_BATCH)
2254 2255 2256 2257 2258
		return CHARGE_RETRY;

	if (!(gfp_mask & __GFP_WAIT))
		return CHARGE_WOULDBLOCK;

2259
	ret = mem_cgroup_reclaim(mem_over_limit, gfp_mask, flags);
2260
	if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
2261
		return CHARGE_RETRY;
2262
	/*
2263 2264 2265 2266 2267 2268 2269
	 * Even though the limit is exceeded at this point, reclaim
	 * may have been able to free some pages.  Retry the charge
	 * before killing the task.
	 *
	 * Only for regular pages, though: huge pages are rather
	 * unlikely to succeed so close to the limit, and we fall back
	 * to regular pages anyway in case of failure.
2270
	 */
2271
	if (nr_pages == 1 && ret)
2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284
		return CHARGE_RETRY;

	/*
	 * At task move, charge accounts can be doubly counted. So, it's
	 * better to wait until the end of task_move if something is going on.
	 */
	if (mem_cgroup_wait_acct_move(mem_over_limit))
		return CHARGE_RETRY;

	/* If we don't need to call oom-killer at el, return immediately */
	if (!oom_check)
		return CHARGE_NOMEM;
	/* check OOM */
2285
	if (!mem_cgroup_handle_oom(mem_over_limit, gfp_mask, get_order(csize)))
2286 2287 2288 2289 2290
		return CHARGE_OOM_DIE;

	return CHARGE_RETRY;
}

2291
/*
2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310
 * __mem_cgroup_try_charge() does
 * 1. detect memcg to be charged against from passed *mm and *ptr,
 * 2. update res_counter
 * 3. call memory reclaim if necessary.
 *
 * In some special case, if the task is fatal, fatal_signal_pending() or
 * has TIF_MEMDIE, this function returns -EINTR while writing root_mem_cgroup
 * to *ptr. There are two reasons for this. 1: fatal threads should quit as soon
 * as possible without any hazards. 2: all pages should have a valid
 * pc->mem_cgroup. If mm is NULL and the caller doesn't pass a valid memcg
 * pointer, that is treated as a charge to root_mem_cgroup.
 *
 * So __mem_cgroup_try_charge() will return
 *  0       ...  on success, filling *ptr with a valid memcg pointer.
 *  -ENOMEM ...  charge failure because of resource limits.
 *  -EINTR  ...  if thread is fatal. *ptr is filled with root_mem_cgroup.
 *
 * Unlike the exported interface, an "oom" parameter is added. if oom==true,
 * the oom-killer can be invoked.
2311
 */
2312
static int __mem_cgroup_try_charge(struct mm_struct *mm,
A
Andrea Arcangeli 已提交
2313
				   gfp_t gfp_mask,
2314
				   unsigned int nr_pages,
2315
				   struct mem_cgroup **ptr,
2316
				   bool oom)
2317
{
2318
	unsigned int batch = max(CHARGE_BATCH, nr_pages);
2319
	int nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES;
2320
	struct mem_cgroup *memcg = NULL;
2321
	int ret;
2322

K
KAMEZAWA Hiroyuki 已提交
2323 2324 2325 2326 2327 2328 2329 2330
	/*
	 * Unlike gloval-vm's OOM-kill, we're not in memory shortage
	 * in system level. So, allow to go ahead dying process in addition to
	 * MEMDIE process.
	 */
	if (unlikely(test_thread_flag(TIF_MEMDIE)
		     || fatal_signal_pending(current)))
		goto bypass;
2331

2332
	/*
2333 2334
	 * We always charge the cgroup the mm_struct belongs to.
	 * The mm_struct's mem_cgroup changes on task migration if the
2335
	 * thread group leader migrates. It's possible that mm is not
2336
	 * set, if so charge the root memcg (happens for pagecache usage).
2337
	 */
2338
	if (!*ptr && !mm)
2339
		*ptr = root_mem_cgroup;
K
KAMEZAWA Hiroyuki 已提交
2340
again:
2341 2342 2343 2344
	if (*ptr) { /* css should be a valid one */
		memcg = *ptr;
		VM_BUG_ON(css_is_removed(&memcg->css));
		if (mem_cgroup_is_root(memcg))
K
KAMEZAWA Hiroyuki 已提交
2345
			goto done;
2346
		if (nr_pages == 1 && consume_stock(memcg))
K
KAMEZAWA Hiroyuki 已提交
2347
			goto done;
2348
		css_get(&memcg->css);
2349
	} else {
K
KAMEZAWA Hiroyuki 已提交
2350
		struct task_struct *p;
2351

K
KAMEZAWA Hiroyuki 已提交
2352 2353 2354
		rcu_read_lock();
		p = rcu_dereference(mm->owner);
		/*
2355
		 * Because we don't have task_lock(), "p" can exit.
2356
		 * In that case, "memcg" can point to root or p can be NULL with
2357 2358 2359 2360 2361 2362
		 * race with swapoff. Then, we have small risk of mis-accouning.
		 * But such kind of mis-account by race always happens because
		 * we don't have cgroup_mutex(). It's overkill and we allo that
		 * small race, here.
		 * (*) swapoff at el will charge against mm-struct not against
		 * task-struct. So, mm->owner can be NULL.
K
KAMEZAWA Hiroyuki 已提交
2363
		 */
2364
		memcg = mem_cgroup_from_task(p);
2365 2366 2367
		if (!memcg)
			memcg = root_mem_cgroup;
		if (mem_cgroup_is_root(memcg)) {
K
KAMEZAWA Hiroyuki 已提交
2368 2369 2370
			rcu_read_unlock();
			goto done;
		}
2371
		if (nr_pages == 1 && consume_stock(memcg)) {
K
KAMEZAWA Hiroyuki 已提交
2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383
			/*
			 * It seems dagerous to access memcg without css_get().
			 * But considering how consume_stok works, it's not
			 * necessary. If consume_stock success, some charges
			 * from this memcg are cached on this cpu. So, we
			 * don't need to call css_get()/css_tryget() before
			 * calling consume_stock().
			 */
			rcu_read_unlock();
			goto done;
		}
		/* after here, we may be blocked. we need to get refcnt */
2384
		if (!css_tryget(&memcg->css)) {
K
KAMEZAWA Hiroyuki 已提交
2385 2386 2387 2388 2389
			rcu_read_unlock();
			goto again;
		}
		rcu_read_unlock();
	}
2390

2391 2392
	do {
		bool oom_check;
2393

2394
		/* If killed, bypass charge */
K
KAMEZAWA Hiroyuki 已提交
2395
		if (fatal_signal_pending(current)) {
2396
			css_put(&memcg->css);
2397
			goto bypass;
K
KAMEZAWA Hiroyuki 已提交
2398
		}
2399

2400 2401 2402 2403
		oom_check = false;
		if (oom && !nr_oom_retries) {
			oom_check = true;
			nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES;
2404
		}
2405

2406
		ret = mem_cgroup_do_charge(memcg, gfp_mask, batch, oom_check);
2407 2408 2409 2410
		switch (ret) {
		case CHARGE_OK:
			break;
		case CHARGE_RETRY: /* not in OOM situation but retry */
2411
			batch = nr_pages;
2412 2413
			css_put(&memcg->css);
			memcg = NULL;
K
KAMEZAWA Hiroyuki 已提交
2414
			goto again;
2415
		case CHARGE_WOULDBLOCK: /* !__GFP_WAIT */
2416
			css_put(&memcg->css);
2417 2418
			goto nomem;
		case CHARGE_NOMEM: /* OOM routine works */
K
KAMEZAWA Hiroyuki 已提交
2419
			if (!oom) {
2420
				css_put(&memcg->css);
K
KAMEZAWA Hiroyuki 已提交
2421
				goto nomem;
K
KAMEZAWA Hiroyuki 已提交
2422
			}
2423 2424 2425 2426
			/* If oom, we never return -ENOMEM */
			nr_oom_retries--;
			break;
		case CHARGE_OOM_DIE: /* Killed by OOM Killer */
2427
			css_put(&memcg->css);
K
KAMEZAWA Hiroyuki 已提交
2428
			goto bypass;
2429
		}
2430 2431
	} while (ret != CHARGE_OK);

2432
	if (batch > nr_pages)
2433 2434
		refill_stock(memcg, batch - nr_pages);
	css_put(&memcg->css);
2435
done:
2436
	*ptr = memcg;
2437 2438
	return 0;
nomem:
2439
	*ptr = NULL;
2440
	return -ENOMEM;
K
KAMEZAWA Hiroyuki 已提交
2441
bypass:
2442 2443
	*ptr = root_mem_cgroup;
	return -EINTR;
2444
}
2445

2446 2447 2448 2449 2450
/*
 * Somemtimes we have to undo a charge we got by try_charge().
 * This function is for that and do uncharge, put css's refcnt.
 * gotten by try_charge().
 */
2451
static void __mem_cgroup_cancel_charge(struct mem_cgroup *memcg,
2452
				       unsigned int nr_pages)
2453
{
2454
	if (!mem_cgroup_is_root(memcg)) {
2455 2456
		unsigned long bytes = nr_pages * PAGE_SIZE;

2457
		res_counter_uncharge(&memcg->res, bytes);
2458
		if (do_swap_account)
2459
			res_counter_uncharge(&memcg->memsw, bytes);
2460
	}
2461 2462
}

2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480
/*
 * Cancel chrages in this cgroup....doesn't propagate to parent cgroup.
 * This is useful when moving usage to parent cgroup.
 */
static void __mem_cgroup_cancel_local_charge(struct mem_cgroup *memcg,
					unsigned int nr_pages)
{
	unsigned long bytes = nr_pages * PAGE_SIZE;

	if (mem_cgroup_is_root(memcg))
		return;

	res_counter_uncharge_until(&memcg->res, memcg->res.parent, bytes);
	if (do_swap_account)
		res_counter_uncharge_until(&memcg->memsw,
						memcg->memsw.parent, bytes);
}

2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499
/*
 * A helper function to get mem_cgroup from ID. must be called under
 * rcu_read_lock(). The caller must check css_is_removed() or some if
 * it's concern. (dropping refcnt from swap can be called against removed
 * memcg.)
 */
static struct mem_cgroup *mem_cgroup_lookup(unsigned short id)
{
	struct cgroup_subsys_state *css;

	/* ID 0 is unused ID */
	if (!id)
		return NULL;
	css = css_lookup(&mem_cgroup_subsys, id);
	if (!css)
		return NULL;
	return container_of(css, struct mem_cgroup, css);
}

2500
struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
2501
{
2502
	struct mem_cgroup *memcg = NULL;
2503
	struct page_cgroup *pc;
2504
	unsigned short id;
2505 2506
	swp_entry_t ent;

2507 2508 2509
	VM_BUG_ON(!PageLocked(page));

	pc = lookup_page_cgroup(page);
2510
	lock_page_cgroup(pc);
2511
	if (PageCgroupUsed(pc)) {
2512 2513 2514
		memcg = pc->mem_cgroup;
		if (memcg && !css_tryget(&memcg->css))
			memcg = NULL;
2515
	} else if (PageSwapCache(page)) {
2516
		ent.val = page_private(page);
2517
		id = lookup_swap_cgroup_id(ent);
2518
		rcu_read_lock();
2519 2520 2521
		memcg = mem_cgroup_lookup(id);
		if (memcg && !css_tryget(&memcg->css))
			memcg = NULL;
2522
		rcu_read_unlock();
2523
	}
2524
	unlock_page_cgroup(pc);
2525
	return memcg;
2526 2527
}

2528
static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
2529
				       struct page *page,
2530
				       unsigned int nr_pages,
2531 2532
				       enum charge_type ctype,
				       bool lrucare)
2533
{
2534
	struct page_cgroup *pc = lookup_page_cgroup(page);
2535
	struct zone *uninitialized_var(zone);
2536
	struct lruvec *lruvec;
2537
	bool was_on_lru = false;
2538
	bool anon;
2539

2540
	lock_page_cgroup(pc);
2541
	VM_BUG_ON(PageCgroupUsed(pc));
2542 2543 2544 2545
	/*
	 * we don't need page_cgroup_lock about tail pages, becase they are not
	 * accessed by any other context at this point.
	 */
2546 2547 2548 2549 2550 2551 2552 2553 2554

	/*
	 * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page
	 * may already be on some other mem_cgroup's LRU.  Take care of it.
	 */
	if (lrucare) {
		zone = page_zone(page);
		spin_lock_irq(&zone->lru_lock);
		if (PageLRU(page)) {
2555
			lruvec = mem_cgroup_zone_lruvec(zone, pc->mem_cgroup);
2556
			ClearPageLRU(page);
2557
			del_page_from_lru_list(page, lruvec, page_lru(page));
2558 2559 2560 2561
			was_on_lru = true;
		}
	}

2562
	pc->mem_cgroup = memcg;
2563 2564 2565 2566 2567 2568 2569
	/*
	 * We access a page_cgroup asynchronously without lock_page_cgroup().
	 * Especially when a page_cgroup is taken from a page, pc->mem_cgroup
	 * is accessed after testing USED bit. To make pc->mem_cgroup visible
	 * before USED bit, we need memory barrier here.
	 * See mem_cgroup_add_lru_list(), etc.
 	 */
K
KAMEZAWA Hiroyuki 已提交
2570
	smp_wmb();
2571
	SetPageCgroupUsed(pc);
2572

2573 2574
	if (lrucare) {
		if (was_on_lru) {
2575
			lruvec = mem_cgroup_zone_lruvec(zone, pc->mem_cgroup);
2576 2577
			VM_BUG_ON(PageLRU(page));
			SetPageLRU(page);
2578
			add_page_to_lru_list(page, lruvec, page_lru(page));
2579 2580 2581 2582
		}
		spin_unlock_irq(&zone->lru_lock);
	}

2583
	if (ctype == MEM_CGROUP_CHARGE_TYPE_ANON)
2584 2585 2586 2587 2588
		anon = true;
	else
		anon = false;

	mem_cgroup_charge_statistics(memcg, anon, nr_pages);
2589
	unlock_page_cgroup(pc);
2590

2591 2592 2593 2594 2595
	/*
	 * "charge_statistics" updated event counter. Then, check it.
	 * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
	 * if they exceeds softlimit.
	 */
2596
	memcg_check_events(memcg, page);
2597
}
2598

2599 2600
#ifdef CONFIG_TRANSPARENT_HUGEPAGE

2601
#define PCGF_NOCOPY_AT_SPLIT (1 << PCG_LOCK | 1 << PCG_MIGRATION)
2602 2603
/*
 * Because tail pages are not marked as "used", set it. We're under
2604 2605 2606
 * zone->lru_lock, 'splitting on pmd' and compound_lock.
 * charge/uncharge will be never happen and move_account() is done under
 * compound_lock(), so we don't have to take care of races.
2607
 */
2608
void mem_cgroup_split_huge_fixup(struct page *head)
2609 2610
{
	struct page_cgroup *head_pc = lookup_page_cgroup(head);
2611 2612
	struct page_cgroup *pc;
	int i;
2613

2614 2615
	if (mem_cgroup_disabled())
		return;
2616 2617 2618 2619 2620 2621
	for (i = 1; i < HPAGE_PMD_NR; i++) {
		pc = head_pc + i;
		pc->mem_cgroup = head_pc->mem_cgroup;
		smp_wmb();/* see __commit_charge() */
		pc->flags = head_pc->flags & ~PCGF_NOCOPY_AT_SPLIT;
	}
2622
}
2623
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
2624

2625
/**
2626
 * mem_cgroup_move_account - move account of the page
2627
 * @page: the page
2628
 * @nr_pages: number of regular pages (>1 for huge pages)
2629 2630 2631 2632 2633
 * @pc:	page_cgroup of the page.
 * @from: mem_cgroup which the page is moved from.
 * @to:	mem_cgroup which the page is moved to. @from != @to.
 *
 * The caller must confirm following.
K
KAMEZAWA Hiroyuki 已提交
2634
 * - page is not on LRU (isolate_page() is useful.)
2635
 * - compound_lock is held when nr_pages > 1
2636
 *
2637 2638
 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
 * from old cgroup.
2639
 */
2640 2641 2642 2643
static int mem_cgroup_move_account(struct page *page,
				   unsigned int nr_pages,
				   struct page_cgroup *pc,
				   struct mem_cgroup *from,
2644
				   struct mem_cgroup *to)
2645
{
2646 2647
	unsigned long flags;
	int ret;
2648
	bool anon = PageAnon(page);
2649

2650
	VM_BUG_ON(from == to);
2651
	VM_BUG_ON(PageLRU(page));
2652 2653 2654 2655 2656 2657 2658
	/*
	 * The page is isolated from LRU. So, collapse function
	 * will not handle this page. But page splitting can happen.
	 * Do this check under compound_page_lock(). The caller should
	 * hold it.
	 */
	ret = -EBUSY;
2659
	if (nr_pages > 1 && !PageTransHuge(page))
2660 2661 2662 2663 2664 2665 2666 2667
		goto out;

	lock_page_cgroup(pc);

	ret = -EINVAL;
	if (!PageCgroupUsed(pc) || pc->mem_cgroup != from)
		goto unlock;

2668
	move_lock_mem_cgroup(from, &flags);
2669

2670
	if (!anon && page_mapped(page)) {
2671 2672 2673 2674 2675
		/* Update mapped_file data for mem_cgroup */
		preempt_disable();
		__this_cpu_dec(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
		__this_cpu_inc(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
		preempt_enable();
2676
	}
2677
	mem_cgroup_charge_statistics(from, anon, -nr_pages);
2678

2679
	/* caller should have done css_get */
K
KAMEZAWA Hiroyuki 已提交
2680
	pc->mem_cgroup = to;
2681
	mem_cgroup_charge_statistics(to, anon, nr_pages);
2682 2683 2684
	/*
	 * We charges against "to" which may not have any tasks. Then, "to"
	 * can be under rmdir(). But in current implementation, caller of
2685
	 * this function is just force_empty() and move charge, so it's
L
Lucas De Marchi 已提交
2686
	 * guaranteed that "to" is never removed. So, we don't check rmdir
2687
	 * status here.
2688
	 */
2689
	move_unlock_mem_cgroup(from, &flags);
2690 2691
	ret = 0;
unlock:
2692
	unlock_page_cgroup(pc);
2693 2694 2695
	/*
	 * check events
	 */
2696 2697
	memcg_check_events(to, page);
	memcg_check_events(from, page);
2698
out:
2699 2700 2701 2702 2703 2704 2705
	return ret;
}

/*
 * move charges to its parent.
 */

2706 2707
static int mem_cgroup_move_parent(struct page *page,
				  struct page_cgroup *pc,
2708
				  struct mem_cgroup *child)
2709 2710
{
	struct mem_cgroup *parent;
2711
	unsigned int nr_pages;
2712
	unsigned long uninitialized_var(flags);
2713 2714 2715
	int ret;

	/* Is ROOT ? */
2716
	if (mem_cgroup_is_root(child))
2717 2718
		return -EINVAL;

2719 2720 2721 2722 2723
	ret = -EBUSY;
	if (!get_page_unless_zero(page))
		goto out;
	if (isolate_lru_page(page))
		goto put;
2724

2725
	nr_pages = hpage_nr_pages(page);
K
KAMEZAWA Hiroyuki 已提交
2726

2727 2728 2729 2730 2731 2732
	parent = parent_mem_cgroup(child);
	/*
	 * If no parent, move charges to root cgroup.
	 */
	if (!parent)
		parent = root_mem_cgroup;
2733

2734
	if (nr_pages > 1)
2735 2736
		flags = compound_lock_irqsave(page);

2737
	ret = mem_cgroup_move_account(page, nr_pages,
2738
				pc, child, parent);
2739 2740
	if (!ret)
		__mem_cgroup_cancel_local_charge(child, nr_pages);
2741

2742
	if (nr_pages > 1)
2743
		compound_unlock_irqrestore(page, flags);
K
KAMEZAWA Hiroyuki 已提交
2744
	putback_lru_page(page);
2745
put:
2746
	put_page(page);
2747
out:
2748 2749 2750
	return ret;
}

2751 2752 2753 2754 2755 2756 2757
/*
 * Charge the memory controller for page usage.
 * Return
 * 0 if the charge was successful
 * < 0 if the cgroup is over its limit
 */
static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
2758
				gfp_t gfp_mask, enum charge_type ctype)
2759
{
2760
	struct mem_cgroup *memcg = NULL;
2761
	unsigned int nr_pages = 1;
2762
	bool oom = true;
2763
	int ret;
A
Andrea Arcangeli 已提交
2764

A
Andrea Arcangeli 已提交
2765
	if (PageTransHuge(page)) {
2766
		nr_pages <<= compound_order(page);
A
Andrea Arcangeli 已提交
2767
		VM_BUG_ON(!PageTransHuge(page));
2768 2769 2770 2771 2772
		/*
		 * Never OOM-kill a process for a huge page.  The
		 * fault handler will fall back to regular pages.
		 */
		oom = false;
A
Andrea Arcangeli 已提交
2773
	}
2774

2775
	ret = __mem_cgroup_try_charge(mm, gfp_mask, nr_pages, &memcg, oom);
2776
	if (ret == -ENOMEM)
2777
		return ret;
2778
	__mem_cgroup_commit_charge(memcg, page, nr_pages, ctype, false);
2779 2780 2781
	return 0;
}

2782 2783
int mem_cgroup_newpage_charge(struct page *page,
			      struct mm_struct *mm, gfp_t gfp_mask)
2784
{
2785
	if (mem_cgroup_disabled())
2786
		return 0;
2787 2788 2789
	VM_BUG_ON(page_mapped(page));
	VM_BUG_ON(page->mapping && !PageAnon(page));
	VM_BUG_ON(!mm);
2790
	return mem_cgroup_charge_common(page, mm, gfp_mask,
2791
					MEM_CGROUP_CHARGE_TYPE_ANON);
2792 2793
}

2794 2795 2796
/*
 * While swap-in, try_charge -> commit or cancel, the page is locked.
 * And when try_charge() successfully returns, one refcnt to memcg without
2797
 * struct page_cgroup is acquired. This refcnt will be consumed by
2798 2799
 * "commit()" or removed by "cancel()"
 */
2800 2801 2802 2803
static int __mem_cgroup_try_charge_swapin(struct mm_struct *mm,
					  struct page *page,
					  gfp_t mask,
					  struct mem_cgroup **memcgp)
2804
{
2805
	struct mem_cgroup *memcg;
2806
	struct page_cgroup *pc;
2807
	int ret;
2808

2809 2810 2811 2812 2813 2814 2815 2816 2817 2818
	pc = lookup_page_cgroup(page);
	/*
	 * Every swap fault against a single page tries to charge the
	 * page, bail as early as possible.  shmem_unuse() encounters
	 * already charged pages, too.  The USED bit is protected by
	 * the page lock, which serializes swap cache removal, which
	 * in turn serializes uncharging.
	 */
	if (PageCgroupUsed(pc))
		return 0;
2819 2820 2821 2822
	if (!do_swap_account)
		goto charge_cur_mm;
	/*
	 * A racing thread's fault, or swapoff, may have already updated
H
Hugh Dickins 已提交
2823 2824 2825
	 * the pte, and even removed page from swap cache: in those cases
	 * do_swap_page()'s pte_same() test will fail; but there's also a
	 * KSM case which does need to charge the page.
2826 2827
	 */
	if (!PageSwapCache(page))
H
Hugh Dickins 已提交
2828
		goto charge_cur_mm;
2829 2830
	memcg = try_get_mem_cgroup_from_page(page);
	if (!memcg)
2831
		goto charge_cur_mm;
2832 2833
	*memcgp = memcg;
	ret = __mem_cgroup_try_charge(NULL, mask, 1, memcgp, true);
2834
	css_put(&memcg->css);
2835 2836
	if (ret == -EINTR)
		ret = 0;
2837
	return ret;
2838
charge_cur_mm:
2839 2840 2841 2842
	ret = __mem_cgroup_try_charge(mm, mask, 1, memcgp, true);
	if (ret == -EINTR)
		ret = 0;
	return ret;
2843 2844
}

2845 2846 2847 2848 2849 2850 2851 2852 2853
int mem_cgroup_try_charge_swapin(struct mm_struct *mm, struct page *page,
				 gfp_t gfp_mask, struct mem_cgroup **memcgp)
{
	*memcgp = NULL;
	if (mem_cgroup_disabled())
		return 0;
	return __mem_cgroup_try_charge_swapin(mm, page, gfp_mask, memcgp);
}

2854 2855 2856 2857 2858 2859 2860 2861 2862
void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg)
{
	if (mem_cgroup_disabled())
		return;
	if (!memcg)
		return;
	__mem_cgroup_cancel_charge(memcg, 1);
}

D
Daisuke Nishimura 已提交
2863
static void
2864
__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *memcg,
D
Daisuke Nishimura 已提交
2865
					enum charge_type ctype)
2866
{
2867
	if (mem_cgroup_disabled())
2868
		return;
2869
	if (!memcg)
2870
		return;
2871
	cgroup_exclude_rmdir(&memcg->css);
2872

2873
	__mem_cgroup_commit_charge(memcg, page, 1, ctype, true);
2874 2875 2876
	/*
	 * Now swap is on-memory. This means this page may be
	 * counted both as mem and swap....double count.
2877 2878 2879
	 * Fix it by uncharging from memsw. Basically, this SwapCache is stable
	 * under lock_page(). But in do_swap_page()::memory.c, reuse_swap_page()
	 * may call delete_from_swap_cache() before reach here.
2880
	 */
2881
	if (do_swap_account && PageSwapCache(page)) {
2882
		swp_entry_t ent = {.val = page_private(page)};
2883
		mem_cgroup_uncharge_swap(ent);
2884
	}
2885 2886 2887 2888 2889
	/*
	 * At swapin, we may charge account against cgroup which has no tasks.
	 * So, rmdir()->pre_destroy() can be called while we do this charge.
	 * In that case, we need to call pre_destroy() again. check it here.
	 */
2890
	cgroup_release_and_wakeup_rmdir(&memcg->css);
2891 2892
}

2893 2894
void mem_cgroup_commit_charge_swapin(struct page *page,
				     struct mem_cgroup *memcg)
D
Daisuke Nishimura 已提交
2895
{
2896
	__mem_cgroup_commit_charge_swapin(page, memcg,
2897
					  MEM_CGROUP_CHARGE_TYPE_ANON);
D
Daisuke Nishimura 已提交
2898 2899
}

2900 2901
int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
				gfp_t gfp_mask)
2902
{
2903 2904 2905 2906
	struct mem_cgroup *memcg = NULL;
	enum charge_type type = MEM_CGROUP_CHARGE_TYPE_CACHE;
	int ret;

2907
	if (mem_cgroup_disabled())
2908 2909 2910 2911 2912 2913 2914
		return 0;
	if (PageCompound(page))
		return 0;

	if (!PageSwapCache(page))
		ret = mem_cgroup_charge_common(page, mm, gfp_mask, type);
	else { /* page is swapcache/shmem */
2915 2916
		ret = __mem_cgroup_try_charge_swapin(mm, page,
						     gfp_mask, &memcg);
2917 2918 2919 2920
		if (!ret)
			__mem_cgroup_commit_charge_swapin(page, memcg, type);
	}
	return ret;
2921 2922
}

2923
static void mem_cgroup_do_uncharge(struct mem_cgroup *memcg,
2924 2925
				   unsigned int nr_pages,
				   const enum charge_type ctype)
2926 2927 2928
{
	struct memcg_batch_info *batch = NULL;
	bool uncharge_memsw = true;
2929

2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940
	/* If swapout, usage of swap doesn't decrease */
	if (!do_swap_account || ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
		uncharge_memsw = false;

	batch = &current->memcg_batch;
	/*
	 * In usual, we do css_get() when we remember memcg pointer.
	 * But in this case, we keep res->usage until end of a series of
	 * uncharges. Then, it's ok to ignore memcg's refcnt.
	 */
	if (!batch->memcg)
2941
		batch->memcg = memcg;
2942 2943
	/*
	 * do_batch > 0 when unmapping pages or inode invalidate/truncate.
L
Lucas De Marchi 已提交
2944
	 * In those cases, all pages freed continuously can be expected to be in
2945 2946 2947 2948 2949 2950 2951 2952
	 * the same cgroup and we have chance to coalesce uncharges.
	 * But we do uncharge one by one if this is killed by OOM(TIF_MEMDIE)
	 * because we want to do uncharge as soon as possible.
	 */

	if (!batch->do_batch || test_thread_flag(TIF_MEMDIE))
		goto direct_uncharge;

2953
	if (nr_pages > 1)
A
Andrea Arcangeli 已提交
2954 2955
		goto direct_uncharge;

2956 2957 2958 2959 2960
	/*
	 * In typical case, batch->memcg == mem. This means we can
	 * merge a series of uncharges to an uncharge of res_counter.
	 * If not, we uncharge res_counter ony by one.
	 */
2961
	if (batch->memcg != memcg)
2962 2963
		goto direct_uncharge;
	/* remember freed charge and uncharge it later */
2964
	batch->nr_pages++;
2965
	if (uncharge_memsw)
2966
		batch->memsw_nr_pages++;
2967 2968
	return;
direct_uncharge:
2969
	res_counter_uncharge(&memcg->res, nr_pages * PAGE_SIZE);
2970
	if (uncharge_memsw)
2971 2972 2973
		res_counter_uncharge(&memcg->memsw, nr_pages * PAGE_SIZE);
	if (unlikely(batch->memcg != memcg))
		memcg_oom_recover(memcg);
2974
}
2975

2976
/*
2977
 * uncharge if !page_mapped(page)
2978
 */
2979
static struct mem_cgroup *
2980 2981
__mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype,
			     bool end_migration)
2982
{
2983
	struct mem_cgroup *memcg = NULL;
2984 2985
	unsigned int nr_pages = 1;
	struct page_cgroup *pc;
2986
	bool anon;
2987

2988
	if (mem_cgroup_disabled())
2989
		return NULL;
2990

2991
	VM_BUG_ON(PageSwapCache(page));
K
KAMEZAWA Hiroyuki 已提交
2992

A
Andrea Arcangeli 已提交
2993
	if (PageTransHuge(page)) {
2994
		nr_pages <<= compound_order(page);
A
Andrea Arcangeli 已提交
2995 2996
		VM_BUG_ON(!PageTransHuge(page));
	}
2997
	/*
2998
	 * Check if our page_cgroup is valid
2999
	 */
3000
	pc = lookup_page_cgroup(page);
3001
	if (unlikely(!PageCgroupUsed(pc)))
3002
		return NULL;
3003

3004
	lock_page_cgroup(pc);
K
KAMEZAWA Hiroyuki 已提交
3005

3006
	memcg = pc->mem_cgroup;
3007

K
KAMEZAWA Hiroyuki 已提交
3008 3009 3010
	if (!PageCgroupUsed(pc))
		goto unlock_out;

3011 3012
	anon = PageAnon(page);

K
KAMEZAWA Hiroyuki 已提交
3013
	switch (ctype) {
3014
	case MEM_CGROUP_CHARGE_TYPE_ANON:
3015 3016 3017 3018 3019
		/*
		 * Generally PageAnon tells if it's the anon statistics to be
		 * updated; but sometimes e.g. mem_cgroup_uncharge_page() is
		 * used before page reached the stage of being marked PageAnon.
		 */
3020 3021
		anon = true;
		/* fallthrough */
K
KAMEZAWA Hiroyuki 已提交
3022
	case MEM_CGROUP_CHARGE_TYPE_DROP:
3023
		/* See mem_cgroup_prepare_migration() */
3024 3025 3026 3027 3028 3029 3030 3031 3032 3033
		if (page_mapped(page))
			goto unlock_out;
		/*
		 * Pages under migration may not be uncharged.  But
		 * end_migration() /must/ be the one uncharging the
		 * unused post-migration page and so it has to call
		 * here with the migration bit still set.  See the
		 * res_counter handling below.
		 */
		if (!end_migration && PageCgroupMigration(pc))
K
KAMEZAWA Hiroyuki 已提交
3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044
			goto unlock_out;
		break;
	case MEM_CGROUP_CHARGE_TYPE_SWAPOUT:
		if (!PageAnon(page)) {	/* Shared memory */
			if (page->mapping && !page_is_file_cache(page))
				goto unlock_out;
		} else if (page_mapped(page)) /* Anon */
				goto unlock_out;
		break;
	default:
		break;
3045
	}
K
KAMEZAWA Hiroyuki 已提交
3046

3047
	mem_cgroup_charge_statistics(memcg, anon, -nr_pages);
K
KAMEZAWA Hiroyuki 已提交
3048

3049
	ClearPageCgroupUsed(pc);
3050 3051 3052 3053 3054 3055
	/*
	 * pc->mem_cgroup is not cleared here. It will be accessed when it's
	 * freed from LRU. This is safe because uncharged page is expected not
	 * to be reused (freed soon). Exception is SwapCache, it's handled by
	 * special functions.
	 */
3056

3057
	unlock_page_cgroup(pc);
K
KAMEZAWA Hiroyuki 已提交
3058
	/*
3059
	 * even after unlock, we have memcg->res.usage here and this memcg
K
KAMEZAWA Hiroyuki 已提交
3060 3061
	 * will never be freed.
	 */
3062
	memcg_check_events(memcg, page);
K
KAMEZAWA Hiroyuki 已提交
3063
	if (do_swap_account && ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT) {
3064 3065
		mem_cgroup_swap_statistics(memcg, true);
		mem_cgroup_get(memcg);
K
KAMEZAWA Hiroyuki 已提交
3066
	}
3067 3068 3069 3070 3071 3072
	/*
	 * Migration does not charge the res_counter for the
	 * replacement page, so leave it alone when phasing out the
	 * page that is unused after the migration.
	 */
	if (!end_migration && !mem_cgroup_is_root(memcg))
3073
		mem_cgroup_do_uncharge(memcg, nr_pages, ctype);
3074

3075
	return memcg;
K
KAMEZAWA Hiroyuki 已提交
3076 3077 3078

unlock_out:
	unlock_page_cgroup(pc);
3079
	return NULL;
3080 3081
}

3082 3083
void mem_cgroup_uncharge_page(struct page *page)
{
3084 3085 3086
	/* early check. */
	if (page_mapped(page))
		return;
3087
	VM_BUG_ON(page->mapping && !PageAnon(page));
3088 3089
	if (PageSwapCache(page))
		return;
3090
	__mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_ANON, false);
3091 3092 3093 3094 3095
}

void mem_cgroup_uncharge_cache_page(struct page *page)
{
	VM_BUG_ON(page_mapped(page));
3096
	VM_BUG_ON(page->mapping);
3097
	__mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE, false);
3098 3099
}

3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113
/*
 * Batch_start/batch_end is called in unmap_page_range/invlidate/trucate.
 * In that cases, pages are freed continuously and we can expect pages
 * are in the same memcg. All these calls itself limits the number of
 * pages freed at once, then uncharge_start/end() is called properly.
 * This may be called prural(2) times in a context,
 */

void mem_cgroup_uncharge_start(void)
{
	current->memcg_batch.do_batch++;
	/* We can do nest. */
	if (current->memcg_batch.do_batch == 1) {
		current->memcg_batch.memcg = NULL;
3114 3115
		current->memcg_batch.nr_pages = 0;
		current->memcg_batch.memsw_nr_pages = 0;
3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135
	}
}

void mem_cgroup_uncharge_end(void)
{
	struct memcg_batch_info *batch = &current->memcg_batch;

	if (!batch->do_batch)
		return;

	batch->do_batch--;
	if (batch->do_batch) /* If stacked, do nothing. */
		return;

	if (!batch->memcg)
		return;
	/*
	 * This "batch->memcg" is valid without any css_get/put etc...
	 * bacause we hide charges behind us.
	 */
3136 3137 3138 3139 3140 3141
	if (batch->nr_pages)
		res_counter_uncharge(&batch->memcg->res,
				     batch->nr_pages * PAGE_SIZE);
	if (batch->memsw_nr_pages)
		res_counter_uncharge(&batch->memcg->memsw,
				     batch->memsw_nr_pages * PAGE_SIZE);
3142
	memcg_oom_recover(batch->memcg);
3143 3144 3145 3146
	/* forget this pointer (for sanity check) */
	batch->memcg = NULL;
}

3147
#ifdef CONFIG_SWAP
3148
/*
3149
 * called after __delete_from_swap_cache() and drop "page" account.
3150 3151
 * memcg information is recorded to swap_cgroup of "ent"
 */
K
KAMEZAWA Hiroyuki 已提交
3152 3153
void
mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
3154 3155
{
	struct mem_cgroup *memcg;
K
KAMEZAWA Hiroyuki 已提交
3156 3157 3158 3159 3160
	int ctype = MEM_CGROUP_CHARGE_TYPE_SWAPOUT;

	if (!swapout) /* this was a swap cache but the swap is unused ! */
		ctype = MEM_CGROUP_CHARGE_TYPE_DROP;

3161
	memcg = __mem_cgroup_uncharge_common(page, ctype, false);
3162

K
KAMEZAWA Hiroyuki 已提交
3163 3164 3165 3166 3167
	/*
	 * record memcg information,  if swapout && memcg != NULL,
	 * mem_cgroup_get() was called in uncharge().
	 */
	if (do_swap_account && swapout && memcg)
3168
		swap_cgroup_record(ent, css_id(&memcg->css));
3169
}
3170
#endif
3171

A
Andrew Morton 已提交
3172
#ifdef CONFIG_MEMCG_SWAP
3173 3174 3175 3176 3177
/*
 * called from swap_entry_free(). remove record in swap_cgroup and
 * uncharge "memsw" account.
 */
void mem_cgroup_uncharge_swap(swp_entry_t ent)
K
KAMEZAWA Hiroyuki 已提交
3178
{
3179
	struct mem_cgroup *memcg;
3180
	unsigned short id;
3181 3182 3183 3184

	if (!do_swap_account)
		return;

3185 3186 3187
	id = swap_cgroup_record(ent, 0);
	rcu_read_lock();
	memcg = mem_cgroup_lookup(id);
3188
	if (memcg) {
3189 3190 3191 3192
		/*
		 * We uncharge this because swap is freed.
		 * This memcg can be obsolete one. We avoid calling css_tryget
		 */
3193
		if (!mem_cgroup_is_root(memcg))
3194
			res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
3195
		mem_cgroup_swap_statistics(memcg, false);
3196 3197
		mem_cgroup_put(memcg);
	}
3198
	rcu_read_unlock();
K
KAMEZAWA Hiroyuki 已提交
3199
}
3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215

/**
 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
 * @entry: swap entry to be moved
 * @from:  mem_cgroup which the entry is moved from
 * @to:  mem_cgroup which the entry is moved to
 *
 * It succeeds only when the swap_cgroup's record for this entry is the same
 * as the mem_cgroup's id of @from.
 *
 * Returns 0 on success, -EINVAL on failure.
 *
 * The caller must have charged to @to, IOW, called res_counter_charge() about
 * both res and memsw, and called css_get().
 */
static int mem_cgroup_move_swap_account(swp_entry_t entry,
3216
				struct mem_cgroup *from, struct mem_cgroup *to)
3217 3218 3219 3220 3221 3222 3223 3224
{
	unsigned short old_id, new_id;

	old_id = css_id(&from->css);
	new_id = css_id(&to->css);

	if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
		mem_cgroup_swap_statistics(from, false);
3225
		mem_cgroup_swap_statistics(to, true);
3226
		/*
3227 3228 3229 3230 3231 3232
		 * This function is only called from task migration context now.
		 * It postpones res_counter and refcount handling till the end
		 * of task migration(mem_cgroup_clear_mc()) for performance
		 * improvement. But we cannot postpone mem_cgroup_get(to)
		 * because if the process that has been moved to @to does
		 * swap-in, the refcount of @to might be decreased to 0.
3233 3234 3235 3236 3237 3238 3239 3240
		 */
		mem_cgroup_get(to);
		return 0;
	}
	return -EINVAL;
}
#else
static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
3241
				struct mem_cgroup *from, struct mem_cgroup *to)
3242 3243 3244
{
	return -EINVAL;
}
3245
#endif
K
KAMEZAWA Hiroyuki 已提交
3246

3247
/*
3248 3249
 * Before starting migration, account PAGE_SIZE to mem_cgroup that the old
 * page belongs to.
3250
 */
3251 3252
void mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
				  struct mem_cgroup **memcgp)
3253
{
3254
	struct mem_cgroup *memcg = NULL;
3255
	struct page_cgroup *pc;
3256
	enum charge_type ctype;
3257

3258
	*memcgp = NULL;
3259

A
Andrea Arcangeli 已提交
3260
	VM_BUG_ON(PageTransHuge(page));
3261
	if (mem_cgroup_disabled())
3262
		return;
3263

3264 3265 3266
	pc = lookup_page_cgroup(page);
	lock_page_cgroup(pc);
	if (PageCgroupUsed(pc)) {
3267 3268
		memcg = pc->mem_cgroup;
		css_get(&memcg->css);
3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299
		/*
		 * At migrating an anonymous page, its mapcount goes down
		 * to 0 and uncharge() will be called. But, even if it's fully
		 * unmapped, migration may fail and this page has to be
		 * charged again. We set MIGRATION flag here and delay uncharge
		 * until end_migration() is called
		 *
		 * Corner Case Thinking
		 * A)
		 * When the old page was mapped as Anon and it's unmap-and-freed
		 * while migration was ongoing.
		 * If unmap finds the old page, uncharge() of it will be delayed
		 * until end_migration(). If unmap finds a new page, it's
		 * uncharged when it make mapcount to be 1->0. If unmap code
		 * finds swap_migration_entry, the new page will not be mapped
		 * and end_migration() will find it(mapcount==0).
		 *
		 * B)
		 * When the old page was mapped but migraion fails, the kernel
		 * remaps it. A charge for it is kept by MIGRATION flag even
		 * if mapcount goes down to 0. We can do remap successfully
		 * without charging it again.
		 *
		 * C)
		 * The "old" page is under lock_page() until the end of
		 * migration, so, the old page itself will not be swapped-out.
		 * If the new page is swapped out before end_migraton, our
		 * hook to usual swap-out path will catch the event.
		 */
		if (PageAnon(page))
			SetPageCgroupMigration(pc);
3300
	}
3301
	unlock_page_cgroup(pc);
3302 3303 3304 3305
	/*
	 * If the page is not charged at this point,
	 * we return here.
	 */
3306
	if (!memcg)
3307
		return;
3308

3309
	*memcgp = memcg;
3310 3311 3312 3313 3314 3315 3316
	/*
	 * We charge new page before it's used/mapped. So, even if unlock_page()
	 * is called before end_migration, we can catch all events on this new
	 * page. In the case new page is migrated but not remapped, new page's
	 * mapcount will be finally 0 and we call uncharge in end_migration().
	 */
	if (PageAnon(page))
3317
		ctype = MEM_CGROUP_CHARGE_TYPE_ANON;
3318
	else
3319
		ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
3320 3321 3322 3323 3324
	/*
	 * The page is committed to the memcg, but it's not actually
	 * charged to the res_counter since we plan on replacing the
	 * old one and only one page is going to be left afterwards.
	 */
3325
	__mem_cgroup_commit_charge(memcg, newpage, 1, ctype, false);
3326
}
3327

3328
/* remove redundant charge if migration failed*/
3329
void mem_cgroup_end_migration(struct mem_cgroup *memcg,
3330
	struct page *oldpage, struct page *newpage, bool migration_ok)
3331
{
3332
	struct page *used, *unused;
3333
	struct page_cgroup *pc;
3334
	bool anon;
3335

3336
	if (!memcg)
3337
		return;
3338
	/* blocks rmdir() */
3339
	cgroup_exclude_rmdir(&memcg->css);
3340
	if (!migration_ok) {
3341 3342
		used = oldpage;
		unused = newpage;
3343
	} else {
3344
		used = newpage;
3345 3346
		unused = oldpage;
	}
3347
	anon = PageAnon(used);
3348 3349 3350 3351
	__mem_cgroup_uncharge_common(unused,
				     anon ? MEM_CGROUP_CHARGE_TYPE_ANON
				     : MEM_CGROUP_CHARGE_TYPE_CACHE,
				     true);
3352
	css_put(&memcg->css);
3353
	/*
3354 3355 3356
	 * We disallowed uncharge of pages under migration because mapcount
	 * of the page goes down to zero, temporarly.
	 * Clear the flag and check the page should be charged.
3357
	 */
3358 3359 3360 3361 3362
	pc = lookup_page_cgroup(oldpage);
	lock_page_cgroup(pc);
	ClearPageCgroupMigration(pc);
	unlock_page_cgroup(pc);

3363
	/*
3364 3365 3366 3367 3368 3369
	 * If a page is a file cache, radix-tree replacement is very atomic
	 * and we can skip this check. When it was an Anon page, its mapcount
	 * goes down to 0. But because we added MIGRATION flage, it's not
	 * uncharged yet. There are several case but page->mapcount check
	 * and USED bit check in mem_cgroup_uncharge_page() will do enough
	 * check. (see prepare_charge() also)
3370
	 */
3371
	if (anon)
3372
		mem_cgroup_uncharge_page(used);
3373
	/*
3374 3375
	 * At migration, we may charge account against cgroup which has no
	 * tasks.
3376 3377 3378
	 * So, rmdir()->pre_destroy() can be called while we do this charge.
	 * In that case, we need to call pre_destroy() again. check it here.
	 */
3379
	cgroup_release_and_wakeup_rmdir(&memcg->css);
3380
}
3381

3382 3383 3384 3385 3386 3387 3388 3389
/*
 * At replace page cache, newpage is not under any memcg but it's on
 * LRU. So, this function doesn't touch res_counter but handles LRU
 * in correct way. Both pages are locked so we cannot race with uncharge.
 */
void mem_cgroup_replace_page_cache(struct page *oldpage,
				  struct page *newpage)
{
3390
	struct mem_cgroup *memcg = NULL;
3391 3392 3393 3394 3395 3396 3397 3398 3399
	struct page_cgroup *pc;
	enum charge_type type = MEM_CGROUP_CHARGE_TYPE_CACHE;

	if (mem_cgroup_disabled())
		return;

	pc = lookup_page_cgroup(oldpage);
	/* fix accounting on old pages */
	lock_page_cgroup(pc);
3400 3401 3402 3403 3404
	if (PageCgroupUsed(pc)) {
		memcg = pc->mem_cgroup;
		mem_cgroup_charge_statistics(memcg, false, -1);
		ClearPageCgroupUsed(pc);
	}
3405 3406
	unlock_page_cgroup(pc);

3407 3408 3409 3410 3411 3412
	/*
	 * When called from shmem_replace_page(), in some cases the
	 * oldpage has already been charged, and in some cases not.
	 */
	if (!memcg)
		return;
3413 3414 3415 3416 3417
	/*
	 * Even if newpage->mapping was NULL before starting replacement,
	 * the newpage may be on LRU(or pagevec for LRU) already. We lock
	 * LRU while we overwrite pc->mem_cgroup.
	 */
3418
	__mem_cgroup_commit_charge(memcg, newpage, 1, type, true);
3419 3420
}

3421 3422 3423 3424 3425 3426
#ifdef CONFIG_DEBUG_VM
static struct page_cgroup *lookup_page_cgroup_used(struct page *page)
{
	struct page_cgroup *pc;

	pc = lookup_page_cgroup(page);
3427 3428 3429 3430 3431
	/*
	 * Can be NULL while feeding pages into the page allocator for
	 * the first time, i.e. during boot or memory hotplug;
	 * or when mem_cgroup_disabled().
	 */
3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450
	if (likely(pc) && PageCgroupUsed(pc))
		return pc;
	return NULL;
}

bool mem_cgroup_bad_page_check(struct page *page)
{
	if (mem_cgroup_disabled())
		return false;

	return lookup_page_cgroup_used(page) != NULL;
}

void mem_cgroup_print_bad_page(struct page *page)
{
	struct page_cgroup *pc;

	pc = lookup_page_cgroup_used(page);
	if (pc) {
3451
		printk(KERN_ALERT "pc:%p pc->flags:%lx pc->mem_cgroup:%p\n",
3452 3453 3454 3455 3456
		       pc, pc->flags, pc->mem_cgroup);
	}
}
#endif

3457 3458
static DEFINE_MUTEX(set_limit_mutex);

3459
static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
3460
				unsigned long long val)
3461
{
3462
	int retry_count;
3463
	u64 memswlimit, memlimit;
3464
	int ret = 0;
3465 3466
	int children = mem_cgroup_count_children(memcg);
	u64 curusage, oldusage;
3467
	int enlarge;
3468 3469 3470 3471 3472 3473 3474 3475 3476

	/*
	 * For keeping hierarchical_reclaim simple, how long we should retry
	 * is depends on callers. We set our retry-count to be function
	 * of # of children which we should visit in this loop.
	 */
	retry_count = MEM_CGROUP_RECLAIM_RETRIES * children;

	oldusage = res_counter_read_u64(&memcg->res, RES_USAGE);
3477

3478
	enlarge = 0;
3479
	while (retry_count) {
3480 3481 3482 3483
		if (signal_pending(current)) {
			ret = -EINTR;
			break;
		}
3484 3485 3486
		/*
		 * Rather than hide all in some function, I do this in
		 * open coded manner. You see what this really does.
3487
		 * We have to guarantee memcg->res.limit <= memcg->memsw.limit.
3488 3489 3490 3491 3492 3493
		 */
		mutex_lock(&set_limit_mutex);
		memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
		if (memswlimit < val) {
			ret = -EINVAL;
			mutex_unlock(&set_limit_mutex);
3494 3495
			break;
		}
3496 3497 3498 3499 3500

		memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
		if (memlimit < val)
			enlarge = 1;

3501
		ret = res_counter_set_limit(&memcg->res, val);
3502 3503 3504 3505 3506 3507
		if (!ret) {
			if (memswlimit == val)
				memcg->memsw_is_minimum = true;
			else
				memcg->memsw_is_minimum = false;
		}
3508 3509 3510 3511 3512
		mutex_unlock(&set_limit_mutex);

		if (!ret)
			break;

3513 3514
		mem_cgroup_reclaim(memcg, GFP_KERNEL,
				   MEM_CGROUP_RECLAIM_SHRINK);
3515 3516 3517 3518 3519 3520
		curusage = res_counter_read_u64(&memcg->res, RES_USAGE);
		/* Usage is reduced ? */
  		if (curusage >= oldusage)
			retry_count--;
		else
			oldusage = curusage;
3521
	}
3522 3523
	if (!ret && enlarge)
		memcg_oom_recover(memcg);
3524

3525 3526 3527
	return ret;
}

L
Li Zefan 已提交
3528 3529
static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
					unsigned long long val)
3530
{
3531
	int retry_count;
3532
	u64 memlimit, memswlimit, oldusage, curusage;
3533 3534
	int children = mem_cgroup_count_children(memcg);
	int ret = -EBUSY;
3535
	int enlarge = 0;
3536

3537 3538 3539
	/* see mem_cgroup_resize_res_limit */
 	retry_count = children * MEM_CGROUP_RECLAIM_RETRIES;
	oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
3540 3541 3542 3543 3544 3545 3546 3547
	while (retry_count) {
		if (signal_pending(current)) {
			ret = -EINTR;
			break;
		}
		/*
		 * Rather than hide all in some function, I do this in
		 * open coded manner. You see what this really does.
3548
		 * We have to guarantee memcg->res.limit <= memcg->memsw.limit.
3549 3550 3551 3552 3553 3554 3555 3556
		 */
		mutex_lock(&set_limit_mutex);
		memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
		if (memlimit > val) {
			ret = -EINVAL;
			mutex_unlock(&set_limit_mutex);
			break;
		}
3557 3558 3559
		memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
		if (memswlimit < val)
			enlarge = 1;
3560
		ret = res_counter_set_limit(&memcg->memsw, val);
3561 3562 3563 3564 3565 3566
		if (!ret) {
			if (memlimit == val)
				memcg->memsw_is_minimum = true;
			else
				memcg->memsw_is_minimum = false;
		}
3567 3568 3569 3570 3571
		mutex_unlock(&set_limit_mutex);

		if (!ret)
			break;

3572 3573 3574
		mem_cgroup_reclaim(memcg, GFP_KERNEL,
				   MEM_CGROUP_RECLAIM_NOSWAP |
				   MEM_CGROUP_RECLAIM_SHRINK);
3575
		curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
3576
		/* Usage is reduced ? */
3577
		if (curusage >= oldusage)
3578
			retry_count--;
3579 3580
		else
			oldusage = curusage;
3581
	}
3582 3583
	if (!ret && enlarge)
		memcg_oom_recover(memcg);
3584 3585 3586
	return ret;
}

3587
unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
3588 3589
					    gfp_t gfp_mask,
					    unsigned long *total_scanned)
3590 3591 3592 3593 3594 3595
{
	unsigned long nr_reclaimed = 0;
	struct mem_cgroup_per_zone *mz, *next_mz = NULL;
	unsigned long reclaimed;
	int loop = 0;
	struct mem_cgroup_tree_per_zone *mctz;
3596
	unsigned long long excess;
3597
	unsigned long nr_scanned;
3598 3599 3600 3601

	if (order > 0)
		return 0;

3602
	mctz = soft_limit_tree_node_zone(zone_to_nid(zone), zone_idx(zone));
3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615
	/*
	 * This loop can run a while, specially if mem_cgroup's continuously
	 * keep exceeding their soft limit and putting the system under
	 * pressure
	 */
	do {
		if (next_mz)
			mz = next_mz;
		else
			mz = mem_cgroup_largest_soft_limit_node(mctz);
		if (!mz)
			break;

3616
		nr_scanned = 0;
3617
		reclaimed = mem_cgroup_soft_reclaim(mz->memcg, zone,
3618
						    gfp_mask, &nr_scanned);
3619
		nr_reclaimed += reclaimed;
3620
		*total_scanned += nr_scanned;
3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642
		spin_lock(&mctz->lock);

		/*
		 * If we failed to reclaim anything from this memory cgroup
		 * it is time to move on to the next cgroup
		 */
		next_mz = NULL;
		if (!reclaimed) {
			do {
				/*
				 * Loop until we find yet another one.
				 *
				 * By the time we get the soft_limit lock
				 * again, someone might have aded the
				 * group back on the RB tree. Iterate to
				 * make sure we get a different mem.
				 * mem_cgroup_largest_soft_limit_node returns
				 * NULL if no other cgroup is present on
				 * the tree
				 */
				next_mz =
				__mem_cgroup_largest_soft_limit_node(mctz);
3643
				if (next_mz == mz)
3644
					css_put(&next_mz->memcg->css);
3645
				else /* next_mz == NULL or other memcg */
3646 3647 3648
					break;
			} while (1);
		}
3649 3650
		__mem_cgroup_remove_exceeded(mz->memcg, mz, mctz);
		excess = res_counter_soft_limit_excess(&mz->memcg->res);
3651 3652 3653 3654 3655 3656 3657 3658
		/*
		 * One school of thought says that we should not add
		 * back the node to the tree if reclaim returns 0.
		 * But our reclaim could return 0, simply because due
		 * to priority we are exposing a smaller subset of
		 * memory to reclaim from. Consider this as a longer
		 * term TODO.
		 */
3659
		/* If excess == 0, no tree ops */
3660
		__mem_cgroup_insert_exceeded(mz->memcg, mz, mctz, excess);
3661
		spin_unlock(&mctz->lock);
3662
		css_put(&mz->memcg->css);
3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674
		loop++;
		/*
		 * Could not reclaim anything and there are no more
		 * mem cgroups to try or we seem to be looping without
		 * reclaiming anything.
		 */
		if (!nr_reclaimed &&
			(next_mz == NULL ||
			loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
			break;
	} while (!nr_reclaimed);
	if (next_mz)
3675
		css_put(&next_mz->memcg->css);
3676 3677 3678
	return nr_reclaimed;
}

3679
/*
3680 3681 3682 3683
 * Traverse a specified page_cgroup list and try to drop them all.  This doesn't
 * reclaim the pages page themselves - it just removes the page_cgroups.
 * Returns true if some page_cgroups were not freed, indicating that the caller
 * must retry this operation.
3684
 */
3685
static bool mem_cgroup_force_empty_list(struct mem_cgroup *memcg,
K
KAMEZAWA Hiroyuki 已提交
3686
				int node, int zid, enum lru_list lru)
3687
{
K
KAMEZAWA Hiroyuki 已提交
3688 3689
	struct mem_cgroup_per_zone *mz;
	unsigned long flags, loop;
3690
	struct list_head *list;
3691 3692
	struct page *busy;
	struct zone *zone;
3693

K
KAMEZAWA Hiroyuki 已提交
3694
	zone = &NODE_DATA(node)->node_zones[zid];
3695
	mz = mem_cgroup_zoneinfo(memcg, node, zid);
3696
	list = &mz->lruvec.lists[lru];
3697

3698
	loop = mz->lru_size[lru];
3699 3700 3701 3702
	/* give some margin against EBUSY etc...*/
	loop += 256;
	busy = NULL;
	while (loop--) {
3703
		struct page_cgroup *pc;
3704 3705
		struct page *page;

K
KAMEZAWA Hiroyuki 已提交
3706
		spin_lock_irqsave(&zone->lru_lock, flags);
3707
		if (list_empty(list)) {
K
KAMEZAWA Hiroyuki 已提交
3708
			spin_unlock_irqrestore(&zone->lru_lock, flags);
3709
			break;
3710
		}
3711 3712 3713
		page = list_entry(list->prev, struct page, lru);
		if (busy == page) {
			list_move(&page->lru, list);
3714
			busy = NULL;
K
KAMEZAWA Hiroyuki 已提交
3715
			spin_unlock_irqrestore(&zone->lru_lock, flags);
3716 3717
			continue;
		}
K
KAMEZAWA Hiroyuki 已提交
3718
		spin_unlock_irqrestore(&zone->lru_lock, flags);
3719

3720
		pc = lookup_page_cgroup(page);
3721

3722
		if (mem_cgroup_move_parent(page, pc, memcg)) {
3723
			/* found lock contention or "pc" is obsolete. */
3724
			busy = page;
3725 3726 3727
			cond_resched();
		} else
			busy = NULL;
3728
	}
3729
	return !list_empty(list);
3730 3731 3732 3733 3734 3735
}

/*
 * make mem_cgroup's charge to be 0 if there is no task.
 * This enables deleting this mem_cgroup.
 */
3736
static int mem_cgroup_force_empty(struct mem_cgroup *memcg, bool free_all)
3737
{
3738 3739 3740
	int ret;
	int node, zid, shrink;
	int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
3741
	struct cgroup *cgrp = memcg->css.cgroup;
3742

3743
	css_get(&memcg->css);
3744 3745

	shrink = 0;
3746 3747 3748
	/* should free all ? */
	if (free_all)
		goto try_to_free;
3749
move_account:
3750
	do {
3751
		ret = -EBUSY;
3752 3753
		if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children))
			goto out;
3754 3755
		/* This is for making all *used* pages to be on LRU. */
		lru_add_drain_all();
3756
		drain_all_stock_sync(memcg);
3757
		ret = 0;
3758
		mem_cgroup_start_move(memcg);
3759
		for_each_node_state(node, N_HIGH_MEMORY) {
3760
			for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) {
H
Hugh Dickins 已提交
3761 3762
				enum lru_list lru;
				for_each_lru(lru) {
3763
					ret = mem_cgroup_force_empty_list(memcg,
H
Hugh Dickins 已提交
3764
							node, zid, lru);
3765 3766 3767
					if (ret)
						break;
				}
3768
			}
3769 3770 3771
			if (ret)
				break;
		}
3772 3773
		mem_cgroup_end_move(memcg);
		memcg_oom_recover(memcg);
3774
		cond_resched();
3775
	/* "ret" should also be checked to ensure all lists are empty. */
3776
	} while (res_counter_read_u64(&memcg->res, RES_USAGE) > 0 || ret);
3777
out:
3778
	css_put(&memcg->css);
3779
	return ret;
3780 3781

try_to_free:
3782 3783
	/* returns EBUSY if there is a task or if we come here twice. */
	if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children) || shrink) {
3784 3785 3786
		ret = -EBUSY;
		goto out;
	}
3787 3788
	/* we call try-to-free pages for make this cgroup empty */
	lru_add_drain_all();
3789 3790
	/* try to free all pages in this cgroup */
	shrink = 1;
3791
	while (nr_retries && res_counter_read_u64(&memcg->res, RES_USAGE) > 0) {
3792
		int progress;
3793 3794 3795 3796 3797

		if (signal_pending(current)) {
			ret = -EINTR;
			goto out;
		}
3798
		progress = try_to_free_mem_cgroup_pages(memcg, GFP_KERNEL,
3799
						false);
3800
		if (!progress) {
3801
			nr_retries--;
3802
			/* maybe some writeback is necessary */
3803
			congestion_wait(BLK_RW_ASYNC, HZ/10);
3804
		}
3805 3806

	}
K
KAMEZAWA Hiroyuki 已提交
3807
	lru_add_drain();
3808
	/* try move_account...there may be some *locked* pages. */
3809
	goto move_account;
3810 3811
}

3812
static int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event)
3813 3814 3815 3816 3817
{
	return mem_cgroup_force_empty(mem_cgroup_from_cont(cont), true);
}


3818 3819 3820 3821 3822 3823 3824 3825 3826
static u64 mem_cgroup_hierarchy_read(struct cgroup *cont, struct cftype *cft)
{
	return mem_cgroup_from_cont(cont)->use_hierarchy;
}

static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft,
					u64 val)
{
	int retval = 0;
3827
	struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
3828
	struct cgroup *parent = cont->parent;
3829
	struct mem_cgroup *parent_memcg = NULL;
3830 3831

	if (parent)
3832
		parent_memcg = mem_cgroup_from_cont(parent);
3833 3834

	cgroup_lock();
3835 3836 3837 3838

	if (memcg->use_hierarchy == val)
		goto out;

3839
	/*
3840
	 * If parent's use_hierarchy is set, we can't make any modifications
3841 3842 3843 3844 3845 3846
	 * in the child subtrees. If it is unset, then the change can
	 * occur, provided the current cgroup has no children.
	 *
	 * For the root cgroup, parent_mem is NULL, we allow value to be
	 * set if there are no children.
	 */
3847
	if ((!parent_memcg || !parent_memcg->use_hierarchy) &&
3848 3849
				(val == 1 || val == 0)) {
		if (list_empty(&cont->children))
3850
			memcg->use_hierarchy = val;
3851 3852 3853 3854
		else
			retval = -EBUSY;
	} else
		retval = -EINVAL;
3855 3856

out:
3857 3858 3859 3860 3861
	cgroup_unlock();

	return retval;
}

3862

3863
static unsigned long mem_cgroup_recursive_stat(struct mem_cgroup *memcg,
3864
					       enum mem_cgroup_stat_index idx)
3865
{
K
KAMEZAWA Hiroyuki 已提交
3866
	struct mem_cgroup *iter;
3867
	long val = 0;
3868

3869
	/* Per-cpu values can be negative, use a signed accumulator */
3870
	for_each_mem_cgroup_tree(iter, memcg)
K
KAMEZAWA Hiroyuki 已提交
3871 3872 3873 3874 3875
		val += mem_cgroup_read_stat(iter, idx);

	if (val < 0) /* race ? */
		val = 0;
	return val;
3876 3877
}

3878
static inline u64 mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
3879
{
K
KAMEZAWA Hiroyuki 已提交
3880
	u64 val;
3881

3882
	if (!mem_cgroup_is_root(memcg)) {
3883
		if (!swap)
3884
			return res_counter_read_u64(&memcg->res, RES_USAGE);
3885
		else
3886
			return res_counter_read_u64(&memcg->memsw, RES_USAGE);
3887 3888
	}

3889 3890
	val = mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_CACHE);
	val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_RSS);
3891

K
KAMEZAWA Hiroyuki 已提交
3892
	if (swap)
3893
		val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_SWAP);
3894 3895 3896 3897

	return val << PAGE_SHIFT;
}

3898 3899 3900
static ssize_t mem_cgroup_read(struct cgroup *cont, struct cftype *cft,
			       struct file *file, char __user *buf,
			       size_t nbytes, loff_t *ppos)
B
Balbir Singh 已提交
3901
{
3902
	struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
3903
	char str[64];
3904
	u64 val;
3905
	int type, name, len;
3906 3907 3908

	type = MEMFILE_TYPE(cft->private);
	name = MEMFILE_ATTR(cft->private);
3909 3910 3911 3912

	if (!do_swap_account && type == _MEMSWAP)
		return -EOPNOTSUPP;

3913 3914
	switch (type) {
	case _MEM:
3915
		if (name == RES_USAGE)
3916
			val = mem_cgroup_usage(memcg, false);
3917
		else
3918
			val = res_counter_read_u64(&memcg->res, name);
3919 3920
		break;
	case _MEMSWAP:
3921
		if (name == RES_USAGE)
3922
			val = mem_cgroup_usage(memcg, true);
3923
		else
3924
			val = res_counter_read_u64(&memcg->memsw, name);
3925 3926 3927 3928
		break;
	default:
		BUG();
	}
3929 3930 3931

	len = scnprintf(str, sizeof(str), "%llu\n", (unsigned long long)val);
	return simple_read_from_buffer(buf, nbytes, ppos, str, len);
B
Balbir Singh 已提交
3932
}
3933 3934 3935 3936
/*
 * The user of this function is...
 * RES_LIMIT.
 */
3937 3938
static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
			    const char *buffer)
B
Balbir Singh 已提交
3939
{
3940
	struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
3941
	int type, name;
3942 3943 3944
	unsigned long long val;
	int ret;

3945 3946
	type = MEMFILE_TYPE(cft->private);
	name = MEMFILE_ATTR(cft->private);
3947 3948 3949 3950

	if (!do_swap_account && type == _MEMSWAP)
		return -EOPNOTSUPP;

3951
	switch (name) {
3952
	case RES_LIMIT:
3953 3954 3955 3956
		if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
			ret = -EINVAL;
			break;
		}
3957 3958
		/* This function does all necessary parse...reuse it */
		ret = res_counter_memparse_write_strategy(buffer, &val);
3959 3960 3961
		if (ret)
			break;
		if (type == _MEM)
3962
			ret = mem_cgroup_resize_limit(memcg, val);
3963 3964
		else
			ret = mem_cgroup_resize_memsw_limit(memcg, val);
3965
		break;
3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979
	case RES_SOFT_LIMIT:
		ret = res_counter_memparse_write_strategy(buffer, &val);
		if (ret)
			break;
		/*
		 * For memsw, soft limits are hard to implement in terms
		 * of semantics, for now, we support soft limits for
		 * control without swap
		 */
		if (type == _MEM)
			ret = res_counter_set_soft_limit(&memcg->res, val);
		else
			ret = -EINVAL;
		break;
3980 3981 3982 3983 3984
	default:
		ret = -EINVAL; /* should be BUG() ? */
		break;
	}
	return ret;
B
Balbir Singh 已提交
3985 3986
}

3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013
static void memcg_get_hierarchical_limit(struct mem_cgroup *memcg,
		unsigned long long *mem_limit, unsigned long long *memsw_limit)
{
	struct cgroup *cgroup;
	unsigned long long min_limit, min_memsw_limit, tmp;

	min_limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
	min_memsw_limit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
	cgroup = memcg->css.cgroup;
	if (!memcg->use_hierarchy)
		goto out;

	while (cgroup->parent) {
		cgroup = cgroup->parent;
		memcg = mem_cgroup_from_cont(cgroup);
		if (!memcg->use_hierarchy)
			break;
		tmp = res_counter_read_u64(&memcg->res, RES_LIMIT);
		min_limit = min(min_limit, tmp);
		tmp = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
		min_memsw_limit = min(min_memsw_limit, tmp);
	}
out:
	*mem_limit = min_limit;
	*memsw_limit = min_memsw_limit;
}

4014
static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
4015
{
4016
	struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
4017
	int type, name;
4018

4019 4020
	type = MEMFILE_TYPE(event);
	name = MEMFILE_ATTR(event);
4021 4022 4023 4024

	if (!do_swap_account && type == _MEMSWAP)
		return -EOPNOTSUPP;

4025
	switch (name) {
4026
	case RES_MAX_USAGE:
4027
		if (type == _MEM)
4028
			res_counter_reset_max(&memcg->res);
4029
		else
4030
			res_counter_reset_max(&memcg->memsw);
4031 4032
		break;
	case RES_FAILCNT:
4033
		if (type == _MEM)
4034
			res_counter_reset_failcnt(&memcg->res);
4035
		else
4036
			res_counter_reset_failcnt(&memcg->memsw);
4037 4038
		break;
	}
4039

4040
	return 0;
4041 4042
}

4043 4044 4045 4046 4047 4048
static u64 mem_cgroup_move_charge_read(struct cgroup *cgrp,
					struct cftype *cft)
{
	return mem_cgroup_from_cont(cgrp)->move_charge_at_immigrate;
}

4049
#ifdef CONFIG_MMU
4050 4051 4052
static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
					struct cftype *cft, u64 val)
{
4053
	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
4054 4055 4056 4057 4058 4059 4060 4061 4062

	if (val >= (1 << NR_MOVE_TYPE))
		return -EINVAL;
	/*
	 * We check this value several times in both in can_attach() and
	 * attach(), so we need cgroup lock to prevent this value from being
	 * inconsistent.
	 */
	cgroup_lock();
4063
	memcg->move_charge_at_immigrate = val;
4064 4065 4066 4067
	cgroup_unlock();

	return 0;
}
4068 4069 4070 4071 4072 4073 4074
#else
static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
					struct cftype *cft, u64 val)
{
	return -ENOSYS;
}
#endif
4075

4076
#ifdef CONFIG_NUMA
4077
static int memcg_numa_stat_show(struct cgroup *cont, struct cftype *cft,
4078
				      struct seq_file *m)
4079 4080 4081 4082
{
	int nid;
	unsigned long total_nr, file_nr, anon_nr, unevictable_nr;
	unsigned long node_nr;
4083
	struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
4084

4085
	total_nr = mem_cgroup_nr_lru_pages(memcg, LRU_ALL);
4086 4087
	seq_printf(m, "total=%lu", total_nr);
	for_each_node_state(nid, N_HIGH_MEMORY) {
4088
		node_nr = mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL);
4089 4090 4091 4092
		seq_printf(m, " N%d=%lu", nid, node_nr);
	}
	seq_putc(m, '\n');

4093
	file_nr = mem_cgroup_nr_lru_pages(memcg, LRU_ALL_FILE);
4094 4095
	seq_printf(m, "file=%lu", file_nr);
	for_each_node_state(nid, N_HIGH_MEMORY) {
4096
		node_nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
4097
				LRU_ALL_FILE);
4098 4099 4100 4101
		seq_printf(m, " N%d=%lu", nid, node_nr);
	}
	seq_putc(m, '\n');

4102
	anon_nr = mem_cgroup_nr_lru_pages(memcg, LRU_ALL_ANON);
4103 4104
	seq_printf(m, "anon=%lu", anon_nr);
	for_each_node_state(nid, N_HIGH_MEMORY) {
4105
		node_nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
4106
				LRU_ALL_ANON);
4107 4108 4109 4110
		seq_printf(m, " N%d=%lu", nid, node_nr);
	}
	seq_putc(m, '\n');

4111
	unevictable_nr = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_UNEVICTABLE));
4112 4113
	seq_printf(m, "unevictable=%lu", unevictable_nr);
	for_each_node_state(nid, N_HIGH_MEMORY) {
4114
		node_nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
4115
				BIT(LRU_UNEVICTABLE));
4116 4117 4118 4119 4120 4121 4122
		seq_printf(m, " N%d=%lu", nid, node_nr);
	}
	seq_putc(m, '\n');
	return 0;
}
#endif /* CONFIG_NUMA */

4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135
static const char * const mem_cgroup_lru_names[] = {
	"inactive_anon",
	"active_anon",
	"inactive_file",
	"active_file",
	"unevictable",
};

static inline void mem_cgroup_lru_names_not_uptodate(void)
{
	BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS);
}

4136
static int memcg_stat_show(struct cgroup *cont, struct cftype *cft,
4137
				 struct seq_file *m)
4138
{
4139
	struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
4140 4141
	struct mem_cgroup *mi;
	unsigned int i;
4142

4143
	for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
4144
		if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
4145
			continue;
4146 4147
		seq_printf(m, "%s %ld\n", mem_cgroup_stat_names[i],
			   mem_cgroup_read_stat(memcg, i) * PAGE_SIZE);
4148
	}
L
Lee Schermerhorn 已提交
4149

4150 4151 4152 4153 4154 4155 4156 4157
	for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++)
		seq_printf(m, "%s %lu\n", mem_cgroup_events_names[i],
			   mem_cgroup_read_events(memcg, i));

	for (i = 0; i < NR_LRU_LISTS; i++)
		seq_printf(m, "%s %lu\n", mem_cgroup_lru_names[i],
			   mem_cgroup_nr_lru_pages(memcg, BIT(i)) * PAGE_SIZE);

K
KAMEZAWA Hiroyuki 已提交
4158
	/* Hierarchical information */
4159 4160
	{
		unsigned long long limit, memsw_limit;
4161
		memcg_get_hierarchical_limit(memcg, &limit, &memsw_limit);
4162
		seq_printf(m, "hierarchical_memory_limit %llu\n", limit);
4163
		if (do_swap_account)
4164 4165
			seq_printf(m, "hierarchical_memsw_limit %llu\n",
				   memsw_limit);
4166
	}
K
KOSAKI Motohiro 已提交
4167

4168 4169 4170
	for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
		long long val = 0;

4171
		if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
4172
			continue;
4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192
		for_each_mem_cgroup_tree(mi, memcg)
			val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE;
		seq_printf(m, "total_%s %lld\n", mem_cgroup_stat_names[i], val);
	}

	for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
		unsigned long long val = 0;

		for_each_mem_cgroup_tree(mi, memcg)
			val += mem_cgroup_read_events(mi, i);
		seq_printf(m, "total_%s %llu\n",
			   mem_cgroup_events_names[i], val);
	}

	for (i = 0; i < NR_LRU_LISTS; i++) {
		unsigned long long val = 0;

		for_each_mem_cgroup_tree(mi, memcg)
			val += mem_cgroup_nr_lru_pages(mi, BIT(i)) * PAGE_SIZE;
		seq_printf(m, "total_%s %llu\n", mem_cgroup_lru_names[i], val);
4193
	}
K
KAMEZAWA Hiroyuki 已提交
4194

K
KOSAKI Motohiro 已提交
4195 4196 4197 4198
#ifdef CONFIG_DEBUG_VM
	{
		int nid, zid;
		struct mem_cgroup_per_zone *mz;
4199
		struct zone_reclaim_stat *rstat;
K
KOSAKI Motohiro 已提交
4200 4201 4202 4203 4204
		unsigned long recent_rotated[2] = {0, 0};
		unsigned long recent_scanned[2] = {0, 0};

		for_each_online_node(nid)
			for (zid = 0; zid < MAX_NR_ZONES; zid++) {
4205
				mz = mem_cgroup_zoneinfo(memcg, nid, zid);
4206
				rstat = &mz->lruvec.reclaim_stat;
K
KOSAKI Motohiro 已提交
4207

4208 4209 4210 4211
				recent_rotated[0] += rstat->recent_rotated[0];
				recent_rotated[1] += rstat->recent_rotated[1];
				recent_scanned[0] += rstat->recent_scanned[0];
				recent_scanned[1] += rstat->recent_scanned[1];
K
KOSAKI Motohiro 已提交
4212
			}
4213 4214 4215 4216
		seq_printf(m, "recent_rotated_anon %lu\n", recent_rotated[0]);
		seq_printf(m, "recent_rotated_file %lu\n", recent_rotated[1]);
		seq_printf(m, "recent_scanned_anon %lu\n", recent_scanned[0]);
		seq_printf(m, "recent_scanned_file %lu\n", recent_scanned[1]);
K
KOSAKI Motohiro 已提交
4217 4218 4219
	}
#endif

4220 4221 4222
	return 0;
}

K
KOSAKI Motohiro 已提交
4223 4224 4225 4226
static u64 mem_cgroup_swappiness_read(struct cgroup *cgrp, struct cftype *cft)
{
	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);

4227
	return mem_cgroup_swappiness(memcg);
K
KOSAKI Motohiro 已提交
4228 4229 4230 4231 4232 4233 4234
}

static int mem_cgroup_swappiness_write(struct cgroup *cgrp, struct cftype *cft,
				       u64 val)
{
	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
	struct mem_cgroup *parent;
4235

K
KOSAKI Motohiro 已提交
4236 4237 4238 4239 4240 4241 4242
	if (val > 100)
		return -EINVAL;

	if (cgrp->parent == NULL)
		return -EINVAL;

	parent = mem_cgroup_from_cont(cgrp->parent);
4243 4244 4245

	cgroup_lock();

K
KOSAKI Motohiro 已提交
4246 4247
	/* If under hierarchy, only empty-root can set this value */
	if ((parent->use_hierarchy) ||
4248 4249
	    (memcg->use_hierarchy && !list_empty(&cgrp->children))) {
		cgroup_unlock();
K
KOSAKI Motohiro 已提交
4250
		return -EINVAL;
4251
	}
K
KOSAKI Motohiro 已提交
4252 4253 4254

	memcg->swappiness = val;

4255 4256
	cgroup_unlock();

K
KOSAKI Motohiro 已提交
4257 4258 4259
	return 0;
}

4260 4261 4262 4263 4264 4265 4266 4267
static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
{
	struct mem_cgroup_threshold_ary *t;
	u64 usage;
	int i;

	rcu_read_lock();
	if (!swap)
4268
		t = rcu_dereference(memcg->thresholds.primary);
4269
	else
4270
		t = rcu_dereference(memcg->memsw_thresholds.primary);
4271 4272 4273 4274 4275 4276 4277

	if (!t)
		goto unlock;

	usage = mem_cgroup_usage(memcg, swap);

	/*
4278
	 * current_threshold points to threshold just below or equal to usage.
4279 4280 4281
	 * If it's not true, a threshold was crossed after last
	 * call of __mem_cgroup_threshold().
	 */
4282
	i = t->current_threshold;
4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305

	/*
	 * Iterate backward over array of thresholds starting from
	 * current_threshold and check if a threshold is crossed.
	 * If none of thresholds below usage is crossed, we read
	 * only one element of the array here.
	 */
	for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
		eventfd_signal(t->entries[i].eventfd, 1);

	/* i = current_threshold + 1 */
	i++;

	/*
	 * Iterate forward over array of thresholds starting from
	 * current_threshold+1 and check if a threshold is crossed.
	 * If none of thresholds above usage is crossed, we read
	 * only one element of the array here.
	 */
	for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
		eventfd_signal(t->entries[i].eventfd, 1);

	/* Update current_threshold */
4306
	t->current_threshold = i - 1;
4307 4308 4309 4310 4311 4312
unlock:
	rcu_read_unlock();
}

static void mem_cgroup_threshold(struct mem_cgroup *memcg)
{
4313 4314 4315 4316 4317 4318 4319
	while (memcg) {
		__mem_cgroup_threshold(memcg, false);
		if (do_swap_account)
			__mem_cgroup_threshold(memcg, true);

		memcg = parent_mem_cgroup(memcg);
	}
4320 4321 4322 4323 4324 4325 4326 4327 4328 4329
}

static int compare_thresholds(const void *a, const void *b)
{
	const struct mem_cgroup_threshold *_a = a;
	const struct mem_cgroup_threshold *_b = b;

	return _a->threshold - _b->threshold;
}

4330
static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
K
KAMEZAWA Hiroyuki 已提交
4331 4332 4333
{
	struct mem_cgroup_eventfd_list *ev;

4334
	list_for_each_entry(ev, &memcg->oom_notify, list)
K
KAMEZAWA Hiroyuki 已提交
4335 4336 4337 4338
		eventfd_signal(ev->eventfd, 1);
	return 0;
}

4339
static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
K
KAMEZAWA Hiroyuki 已提交
4340
{
K
KAMEZAWA Hiroyuki 已提交
4341 4342
	struct mem_cgroup *iter;

4343
	for_each_mem_cgroup_tree(iter, memcg)
K
KAMEZAWA Hiroyuki 已提交
4344
		mem_cgroup_oom_notify_cb(iter);
K
KAMEZAWA Hiroyuki 已提交
4345 4346 4347 4348
}

static int mem_cgroup_usage_register_event(struct cgroup *cgrp,
	struct cftype *cft, struct eventfd_ctx *eventfd, const char *args)
4349 4350
{
	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
4351 4352
	struct mem_cgroup_thresholds *thresholds;
	struct mem_cgroup_threshold_ary *new;
4353 4354
	int type = MEMFILE_TYPE(cft->private);
	u64 threshold, usage;
4355
	int i, size, ret;
4356 4357 4358 4359 4360 4361

	ret = res_counter_memparse_write_strategy(args, &threshold);
	if (ret)
		return ret;

	mutex_lock(&memcg->thresholds_lock);
4362

4363
	if (type == _MEM)
4364
		thresholds = &memcg->thresholds;
4365
	else if (type == _MEMSWAP)
4366
		thresholds = &memcg->memsw_thresholds;
4367 4368 4369 4370 4371 4372
	else
		BUG();

	usage = mem_cgroup_usage(memcg, type == _MEMSWAP);

	/* Check if a threshold crossed before adding a new one */
4373
	if (thresholds->primary)
4374 4375
		__mem_cgroup_threshold(memcg, type == _MEMSWAP);

4376
	size = thresholds->primary ? thresholds->primary->size + 1 : 1;
4377 4378

	/* Allocate memory for new array of thresholds */
4379
	new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold),
4380
			GFP_KERNEL);
4381
	if (!new) {
4382 4383 4384
		ret = -ENOMEM;
		goto unlock;
	}
4385
	new->size = size;
4386 4387

	/* Copy thresholds (if any) to new array */
4388 4389
	if (thresholds->primary) {
		memcpy(new->entries, thresholds->primary->entries, (size - 1) *
4390
				sizeof(struct mem_cgroup_threshold));
4391 4392
	}

4393
	/* Add new threshold */
4394 4395
	new->entries[size - 1].eventfd = eventfd;
	new->entries[size - 1].threshold = threshold;
4396 4397

	/* Sort thresholds. Registering of new threshold isn't time-critical */
4398
	sort(new->entries, size, sizeof(struct mem_cgroup_threshold),
4399 4400 4401
			compare_thresholds, NULL);

	/* Find current threshold */
4402
	new->current_threshold = -1;
4403
	for (i = 0; i < size; i++) {
4404
		if (new->entries[i].threshold <= usage) {
4405
			/*
4406 4407
			 * new->current_threshold will not be used until
			 * rcu_assign_pointer(), so it's safe to increment
4408 4409
			 * it here.
			 */
4410
			++new->current_threshold;
4411 4412
		} else
			break;
4413 4414
	}

4415 4416 4417 4418 4419
	/* Free old spare buffer and save old primary buffer as spare */
	kfree(thresholds->spare);
	thresholds->spare = thresholds->primary;

	rcu_assign_pointer(thresholds->primary, new);
4420

4421
	/* To be sure that nobody uses thresholds */
4422 4423 4424 4425 4426 4427 4428 4429
	synchronize_rcu();

unlock:
	mutex_unlock(&memcg->thresholds_lock);

	return ret;
}

4430
static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
K
KAMEZAWA Hiroyuki 已提交
4431
	struct cftype *cft, struct eventfd_ctx *eventfd)
4432 4433
{
	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
4434 4435
	struct mem_cgroup_thresholds *thresholds;
	struct mem_cgroup_threshold_ary *new;
4436 4437
	int type = MEMFILE_TYPE(cft->private);
	u64 usage;
4438
	int i, j, size;
4439 4440 4441

	mutex_lock(&memcg->thresholds_lock);
	if (type == _MEM)
4442
		thresholds = &memcg->thresholds;
4443
	else if (type == _MEMSWAP)
4444
		thresholds = &memcg->memsw_thresholds;
4445 4446 4447
	else
		BUG();

4448 4449 4450
	if (!thresholds->primary)
		goto unlock;

4451 4452 4453 4454 4455 4456
	usage = mem_cgroup_usage(memcg, type == _MEMSWAP);

	/* Check if a threshold crossed before removing */
	__mem_cgroup_threshold(memcg, type == _MEMSWAP);

	/* Calculate new number of threshold */
4457 4458 4459
	size = 0;
	for (i = 0; i < thresholds->primary->size; i++) {
		if (thresholds->primary->entries[i].eventfd != eventfd)
4460 4461 4462
			size++;
	}

4463
	new = thresholds->spare;
4464

4465 4466
	/* Set thresholds array to NULL if we don't have thresholds */
	if (!size) {
4467 4468
		kfree(new);
		new = NULL;
4469
		goto swap_buffers;
4470 4471
	}

4472
	new->size = size;
4473 4474

	/* Copy thresholds and find current threshold */
4475 4476 4477
	new->current_threshold = -1;
	for (i = 0, j = 0; i < thresholds->primary->size; i++) {
		if (thresholds->primary->entries[i].eventfd == eventfd)
4478 4479
			continue;

4480
		new->entries[j] = thresholds->primary->entries[i];
4481
		if (new->entries[j].threshold <= usage) {
4482
			/*
4483
			 * new->current_threshold will not be used
4484 4485 4486
			 * until rcu_assign_pointer(), so it's safe to increment
			 * it here.
			 */
4487
			++new->current_threshold;
4488 4489 4490 4491
		}
		j++;
	}

4492
swap_buffers:
4493 4494
	/* Swap primary and spare array */
	thresholds->spare = thresholds->primary;
4495 4496 4497 4498 4499 4500
	/* If all events are unregistered, free the spare array */
	if (!new) {
		kfree(thresholds->spare);
		thresholds->spare = NULL;
	}

4501
	rcu_assign_pointer(thresholds->primary, new);
4502

4503
	/* To be sure that nobody uses thresholds */
4504
	synchronize_rcu();
4505
unlock:
4506 4507
	mutex_unlock(&memcg->thresholds_lock);
}
4508

K
KAMEZAWA Hiroyuki 已提交
4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520
static int mem_cgroup_oom_register_event(struct cgroup *cgrp,
	struct cftype *cft, struct eventfd_ctx *eventfd, const char *args)
{
	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
	struct mem_cgroup_eventfd_list *event;
	int type = MEMFILE_TYPE(cft->private);

	BUG_ON(type != _OOM_TYPE);
	event = kmalloc(sizeof(*event),	GFP_KERNEL);
	if (!event)
		return -ENOMEM;

4521
	spin_lock(&memcg_oom_lock);
K
KAMEZAWA Hiroyuki 已提交
4522 4523 4524 4525 4526

	event->eventfd = eventfd;
	list_add(&event->list, &memcg->oom_notify);

	/* already in OOM ? */
4527
	if (atomic_read(&memcg->under_oom))
K
KAMEZAWA Hiroyuki 已提交
4528
		eventfd_signal(eventfd, 1);
4529
	spin_unlock(&memcg_oom_lock);
K
KAMEZAWA Hiroyuki 已提交
4530 4531 4532 4533

	return 0;
}

4534
static void mem_cgroup_oom_unregister_event(struct cgroup *cgrp,
K
KAMEZAWA Hiroyuki 已提交
4535 4536
	struct cftype *cft, struct eventfd_ctx *eventfd)
{
4537
	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
K
KAMEZAWA Hiroyuki 已提交
4538 4539 4540 4541 4542
	struct mem_cgroup_eventfd_list *ev, *tmp;
	int type = MEMFILE_TYPE(cft->private);

	BUG_ON(type != _OOM_TYPE);

4543
	spin_lock(&memcg_oom_lock);
K
KAMEZAWA Hiroyuki 已提交
4544

4545
	list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
K
KAMEZAWA Hiroyuki 已提交
4546 4547 4548 4549 4550 4551
		if (ev->eventfd == eventfd) {
			list_del(&ev->list);
			kfree(ev);
		}
	}

4552
	spin_unlock(&memcg_oom_lock);
K
KAMEZAWA Hiroyuki 已提交
4553 4554
}

4555 4556 4557
static int mem_cgroup_oom_control_read(struct cgroup *cgrp,
	struct cftype *cft,  struct cgroup_map_cb *cb)
{
4558
	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
4559

4560
	cb->fill(cb, "oom_kill_disable", memcg->oom_kill_disable);
4561

4562
	if (atomic_read(&memcg->under_oom))
4563 4564 4565 4566 4567 4568 4569 4570 4571
		cb->fill(cb, "under_oom", 1);
	else
		cb->fill(cb, "under_oom", 0);
	return 0;
}

static int mem_cgroup_oom_control_write(struct cgroup *cgrp,
	struct cftype *cft, u64 val)
{
4572
	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583
	struct mem_cgroup *parent;

	/* cannot set to root cgroup and only 0 and 1 are allowed */
	if (!cgrp->parent || !((val == 0) || (val == 1)))
		return -EINVAL;

	parent = mem_cgroup_from_cont(cgrp->parent);

	cgroup_lock();
	/* oom-kill-disable is a flag for subhierarchy. */
	if ((parent->use_hierarchy) ||
4584
	    (memcg->use_hierarchy && !list_empty(&cgrp->children))) {
4585 4586 4587
		cgroup_unlock();
		return -EINVAL;
	}
4588
	memcg->oom_kill_disable = val;
4589
	if (!val)
4590
		memcg_oom_recover(memcg);
4591 4592 4593 4594
	cgroup_unlock();
	return 0;
}

A
Andrew Morton 已提交
4595
#ifdef CONFIG_MEMCG_KMEM
4596
static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
4597
{
4598
	return mem_cgroup_sockets_init(memcg, ss);
4599 4600
};

4601
static void kmem_cgroup_destroy(struct mem_cgroup *memcg)
G
Glauber Costa 已提交
4602
{
4603
	mem_cgroup_sockets_destroy(memcg);
G
Glauber Costa 已提交
4604
}
4605
#else
4606
static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
4607 4608 4609
{
	return 0;
}
G
Glauber Costa 已提交
4610

4611
static void kmem_cgroup_destroy(struct mem_cgroup *memcg)
G
Glauber Costa 已提交
4612 4613
{
}
4614 4615
#endif

B
Balbir Singh 已提交
4616 4617
static struct cftype mem_cgroup_files[] = {
	{
4618
		.name = "usage_in_bytes",
4619
		.private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
4620
		.read = mem_cgroup_read,
K
KAMEZAWA Hiroyuki 已提交
4621 4622
		.register_event = mem_cgroup_usage_register_event,
		.unregister_event = mem_cgroup_usage_unregister_event,
B
Balbir Singh 已提交
4623
	},
4624 4625
	{
		.name = "max_usage_in_bytes",
4626
		.private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
4627
		.trigger = mem_cgroup_reset,
4628
		.read = mem_cgroup_read,
4629
	},
B
Balbir Singh 已提交
4630
	{
4631
		.name = "limit_in_bytes",
4632
		.private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
4633
		.write_string = mem_cgroup_write,
4634
		.read = mem_cgroup_read,
B
Balbir Singh 已提交
4635
	},
4636 4637 4638 4639
	{
		.name = "soft_limit_in_bytes",
		.private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
		.write_string = mem_cgroup_write,
4640
		.read = mem_cgroup_read,
4641
	},
B
Balbir Singh 已提交
4642 4643
	{
		.name = "failcnt",
4644
		.private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
4645
		.trigger = mem_cgroup_reset,
4646
		.read = mem_cgroup_read,
B
Balbir Singh 已提交
4647
	},
4648 4649
	{
		.name = "stat",
4650
		.read_seq_string = memcg_stat_show,
4651
	},
4652 4653 4654 4655
	{
		.name = "force_empty",
		.trigger = mem_cgroup_force_empty_write,
	},
4656 4657 4658 4659 4660
	{
		.name = "use_hierarchy",
		.write_u64 = mem_cgroup_hierarchy_write,
		.read_u64 = mem_cgroup_hierarchy_read,
	},
K
KOSAKI Motohiro 已提交
4661 4662 4663 4664 4665
	{
		.name = "swappiness",
		.read_u64 = mem_cgroup_swappiness_read,
		.write_u64 = mem_cgroup_swappiness_write,
	},
4666 4667 4668 4669 4670
	{
		.name = "move_charge_at_immigrate",
		.read_u64 = mem_cgroup_move_charge_read,
		.write_u64 = mem_cgroup_move_charge_write,
	},
K
KAMEZAWA Hiroyuki 已提交
4671 4672
	{
		.name = "oom_control",
4673 4674
		.read_map = mem_cgroup_oom_control_read,
		.write_u64 = mem_cgroup_oom_control_write,
K
KAMEZAWA Hiroyuki 已提交
4675 4676 4677 4678
		.register_event = mem_cgroup_oom_register_event,
		.unregister_event = mem_cgroup_oom_unregister_event,
		.private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
	},
4679 4680 4681
#ifdef CONFIG_NUMA
	{
		.name = "numa_stat",
4682
		.read_seq_string = memcg_numa_stat_show,
4683 4684
	},
#endif
A
Andrew Morton 已提交
4685
#ifdef CONFIG_MEMCG_SWAP
4686 4687 4688
	{
		.name = "memsw.usage_in_bytes",
		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
4689
		.read = mem_cgroup_read,
K
KAMEZAWA Hiroyuki 已提交
4690 4691
		.register_event = mem_cgroup_usage_register_event,
		.unregister_event = mem_cgroup_usage_unregister_event,
4692 4693 4694 4695 4696
	},
	{
		.name = "memsw.max_usage_in_bytes",
		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
		.trigger = mem_cgroup_reset,
4697
		.read = mem_cgroup_read,
4698 4699 4700 4701 4702
	},
	{
		.name = "memsw.limit_in_bytes",
		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
		.write_string = mem_cgroup_write,
4703
		.read = mem_cgroup_read,
4704 4705 4706 4707 4708
	},
	{
		.name = "memsw.failcnt",
		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
		.trigger = mem_cgroup_reset,
4709
		.read = mem_cgroup_read,
4710 4711
	},
#endif
4712
	{ },	/* terminate */
4713
};
4714

4715
static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
4716 4717
{
	struct mem_cgroup_per_node *pn;
4718
	struct mem_cgroup_per_zone *mz;
4719
	int zone, tmp = node;
4720 4721 4722 4723 4724 4725 4726 4727
	/*
	 * This routine is called against possible nodes.
	 * But it's BUG to call kmalloc() against offline node.
	 *
	 * TODO: this routine can waste much memory for nodes which will
	 *       never be onlined. It's better to use memory hotplug callback
	 *       function.
	 */
4728 4729
	if (!node_state(node, N_NORMAL_MEMORY))
		tmp = -1;
4730
	pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
4731 4732
	if (!pn)
		return 1;
4733 4734 4735

	for (zone = 0; zone < MAX_NR_ZONES; zone++) {
		mz = &pn->zoneinfo[zone];
4736
		lruvec_init(&mz->lruvec, &NODE_DATA(node)->node_zones[zone]);
4737
		mz->usage_in_excess = 0;
4738
		mz->on_tree = false;
4739
		mz->memcg = memcg;
4740
	}
4741
	memcg->info.nodeinfo[node] = pn;
4742 4743 4744
	return 0;
}

4745
static void free_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
4746
{
4747
	kfree(memcg->info.nodeinfo[node]);
4748 4749
}

4750 4751
static struct mem_cgroup *mem_cgroup_alloc(void)
{
4752
	struct mem_cgroup *memcg;
4753
	int size = sizeof(struct mem_cgroup);
4754

4755
	/* Can be very big if MAX_NUMNODES is very big */
4756
	if (size < PAGE_SIZE)
4757
		memcg = kzalloc(size, GFP_KERNEL);
4758
	else
4759
		memcg = vzalloc(size);
4760

4761
	if (!memcg)
4762 4763
		return NULL;

4764 4765
	memcg->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
	if (!memcg->stat)
4766
		goto out_free;
4767 4768
	spin_lock_init(&memcg->pcp_counter_lock);
	return memcg;
4769 4770 4771

out_free:
	if (size < PAGE_SIZE)
4772
		kfree(memcg);
4773
	else
4774
		vfree(memcg);
4775
	return NULL;
4776 4777
}

4778
/*
4779
 * Helpers for freeing a kmalloc()ed/vzalloc()ed mem_cgroup by RCU,
4780 4781 4782
 * but in process context.  The work_freeing structure is overlaid
 * on the rcu_freeing structure, which itself is overlaid on memsw.
 */
4783
static void free_work(struct work_struct *work)
4784 4785
{
	struct mem_cgroup *memcg;
4786
	int size = sizeof(struct mem_cgroup);
4787 4788

	memcg = container_of(work, struct mem_cgroup, work_freeing);
4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800
	/*
	 * We need to make sure that (at least for now), the jump label
	 * destruction code runs outside of the cgroup lock. This is because
	 * get_online_cpus(), which is called from the static_branch update,
	 * can't be called inside the cgroup_lock. cpusets are the ones
	 * enforcing this dependency, so if they ever change, we might as well.
	 *
	 * schedule_work() will guarantee this happens. Be careful if you need
	 * to move this code around, and make sure it is outside
	 * the cgroup_lock.
	 */
	disarm_sock_keys(memcg);
4801 4802 4803 4804
	if (size < PAGE_SIZE)
		kfree(memcg);
	else
		vfree(memcg);
4805
}
4806 4807

static void free_rcu(struct rcu_head *rcu_head)
4808 4809 4810 4811
{
	struct mem_cgroup *memcg;

	memcg = container_of(rcu_head, struct mem_cgroup, rcu_freeing);
4812
	INIT_WORK(&memcg->work_freeing, free_work);
4813 4814 4815
	schedule_work(&memcg->work_freeing);
}

4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826
/*
 * At destroying mem_cgroup, references from swap_cgroup can remain.
 * (scanning all at force_empty is too costly...)
 *
 * Instead of clearing all references at force_empty, we remember
 * the number of reference from swap_cgroup and free mem_cgroup when
 * it goes down to 0.
 *
 * Removal of cgroup itself succeeds regardless of refs from swap.
 */

4827
static void __mem_cgroup_free(struct mem_cgroup *memcg)
4828
{
K
KAMEZAWA Hiroyuki 已提交
4829 4830
	int node;

4831 4832
	mem_cgroup_remove_from_trees(memcg);
	free_css_id(&mem_cgroup_subsys, &memcg->css);
K
KAMEZAWA Hiroyuki 已提交
4833

B
Bob Liu 已提交
4834
	for_each_node(node)
4835
		free_mem_cgroup_per_zone_info(memcg, node);
K
KAMEZAWA Hiroyuki 已提交
4836

4837
	free_percpu(memcg->stat);
4838
	call_rcu(&memcg->rcu_freeing, free_rcu);
4839 4840
}

4841
static void mem_cgroup_get(struct mem_cgroup *memcg)
4842
{
4843
	atomic_inc(&memcg->refcnt);
4844 4845
}

4846
static void __mem_cgroup_put(struct mem_cgroup *memcg, int count)
4847
{
4848 4849 4850
	if (atomic_sub_and_test(count, &memcg->refcnt)) {
		struct mem_cgroup *parent = parent_mem_cgroup(memcg);
		__mem_cgroup_free(memcg);
4851 4852 4853
		if (parent)
			mem_cgroup_put(parent);
	}
4854 4855
}

4856
static void mem_cgroup_put(struct mem_cgroup *memcg)
4857
{
4858
	__mem_cgroup_put(memcg, 1);
4859 4860
}

4861 4862 4863
/*
 * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled.
 */
G
Glauber Costa 已提交
4864
struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
4865
{
4866
	if (!memcg->res.parent)
4867
		return NULL;
4868
	return mem_cgroup_from_res_counter(memcg->res.parent, res);
4869
}
G
Glauber Costa 已提交
4870
EXPORT_SYMBOL(parent_mem_cgroup);
4871

A
Andrew Morton 已提交
4872
#ifdef CONFIG_MEMCG_SWAP
4873 4874
static void __init enable_swap_cgroup(void)
{
4875
	if (!mem_cgroup_disabled() && really_do_swap_account)
4876 4877 4878 4879 4880 4881 4882 4883
		do_swap_account = 1;
}
#else
static void __init enable_swap_cgroup(void)
{
}
#endif

4884 4885 4886 4887 4888 4889
static int mem_cgroup_soft_limit_tree_init(void)
{
	struct mem_cgroup_tree_per_node *rtpn;
	struct mem_cgroup_tree_per_zone *rtpz;
	int tmp, node, zone;

B
Bob Liu 已提交
4890
	for_each_node(node) {
4891 4892 4893 4894 4895
		tmp = node;
		if (!node_state(node, N_NORMAL_MEMORY))
			tmp = -1;
		rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, tmp);
		if (!rtpn)
4896
			goto err_cleanup;
4897 4898 4899 4900 4901 4902 4903 4904 4905 4906

		soft_limit_tree.rb_tree_per_node[node] = rtpn;

		for (zone = 0; zone < MAX_NR_ZONES; zone++) {
			rtpz = &rtpn->rb_tree_per_zone[zone];
			rtpz->rb_root = RB_ROOT;
			spin_lock_init(&rtpz->lock);
		}
	}
	return 0;
4907 4908

err_cleanup:
B
Bob Liu 已提交
4909
	for_each_node(node) {
4910 4911 4912 4913 4914 4915 4916
		if (!soft_limit_tree.rb_tree_per_node[node])
			break;
		kfree(soft_limit_tree.rb_tree_per_node[node]);
		soft_limit_tree.rb_tree_per_node[node] = NULL;
	}
	return 1;

4917 4918
}

L
Li Zefan 已提交
4919
static struct cgroup_subsys_state * __ref
4920
mem_cgroup_create(struct cgroup *cont)
B
Balbir Singh 已提交
4921
{
4922
	struct mem_cgroup *memcg, *parent;
K
KAMEZAWA Hiroyuki 已提交
4923
	long error = -ENOMEM;
4924
	int node;
B
Balbir Singh 已提交
4925

4926 4927
	memcg = mem_cgroup_alloc();
	if (!memcg)
K
KAMEZAWA Hiroyuki 已提交
4928
		return ERR_PTR(error);
4929

B
Bob Liu 已提交
4930
	for_each_node(node)
4931
		if (alloc_mem_cgroup_per_zone_info(memcg, node))
4932
			goto free_out;
4933

4934
	/* root ? */
4935
	if (cont->parent == NULL) {
4936
		int cpu;
4937
		enable_swap_cgroup();
4938
		parent = NULL;
4939 4940
		if (mem_cgroup_soft_limit_tree_init())
			goto free_out;
4941
		root_mem_cgroup = memcg;
4942 4943 4944 4945 4946
		for_each_possible_cpu(cpu) {
			struct memcg_stock_pcp *stock =
						&per_cpu(memcg_stock, cpu);
			INIT_WORK(&stock->work, drain_local_stock);
		}
4947
		hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
4948
	} else {
4949
		parent = mem_cgroup_from_cont(cont->parent);
4950 4951
		memcg->use_hierarchy = parent->use_hierarchy;
		memcg->oom_kill_disable = parent->oom_kill_disable;
4952
	}
4953

4954
	if (parent && parent->use_hierarchy) {
4955 4956
		res_counter_init(&memcg->res, &parent->res);
		res_counter_init(&memcg->memsw, &parent->memsw);
4957 4958 4959 4960 4961 4962 4963
		/*
		 * We increment refcnt of the parent to ensure that we can
		 * safely access it on res_counter_charge/uncharge.
		 * This refcnt will be decremented when freeing this
		 * mem_cgroup(see mem_cgroup_put).
		 */
		mem_cgroup_get(parent);
4964
	} else {
4965 4966
		res_counter_init(&memcg->res, NULL);
		res_counter_init(&memcg->memsw, NULL);
4967
	}
4968 4969
	memcg->last_scanned_node = MAX_NUMNODES;
	INIT_LIST_HEAD(&memcg->oom_notify);
4970

K
KOSAKI Motohiro 已提交
4971
	if (parent)
4972 4973 4974 4975
		memcg->swappiness = mem_cgroup_swappiness(parent);
	atomic_set(&memcg->refcnt, 1);
	memcg->move_charge_at_immigrate = 0;
	mutex_init(&memcg->thresholds_lock);
4976
	spin_lock_init(&memcg->move_lock);
4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987

	error = memcg_init_kmem(memcg, &mem_cgroup_subsys);
	if (error) {
		/*
		 * We call put now because our (and parent's) refcnts
		 * are already in place. mem_cgroup_put() will internally
		 * call __mem_cgroup_free, so return directly
		 */
		mem_cgroup_put(memcg);
		return ERR_PTR(error);
	}
4988
	return &memcg->css;
4989
free_out:
4990
	__mem_cgroup_free(memcg);
K
KAMEZAWA Hiroyuki 已提交
4991
	return ERR_PTR(error);
B
Balbir Singh 已提交
4992 4993
}

4994
static int mem_cgroup_pre_destroy(struct cgroup *cont)
4995
{
4996
	struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
4997

4998
	return mem_cgroup_force_empty(memcg, false);
4999 5000
}

5001
static void mem_cgroup_destroy(struct cgroup *cont)
B
Balbir Singh 已提交
5002
{
5003
	struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
5004

5005
	kmem_cgroup_destroy(memcg);
G
Glauber Costa 已提交
5006

5007
	mem_cgroup_put(memcg);
B
Balbir Singh 已提交
5008 5009
}

5010
#ifdef CONFIG_MMU
5011
/* Handlers for move charge at task migration. */
5012 5013
#define PRECHARGE_COUNT_AT_ONCE	256
static int mem_cgroup_do_precharge(unsigned long count)
5014
{
5015 5016
	int ret = 0;
	int batch_count = PRECHARGE_COUNT_AT_ONCE;
5017
	struct mem_cgroup *memcg = mc.to;
5018

5019
	if (mem_cgroup_is_root(memcg)) {
5020 5021 5022 5023 5024 5025 5026 5027
		mc.precharge += count;
		/* we don't need css_get for root */
		return ret;
	}
	/* try to charge at once */
	if (count > 1) {
		struct res_counter *dummy;
		/*
5028
		 * "memcg" cannot be under rmdir() because we've already checked
5029 5030 5031 5032
		 * by cgroup_lock_live_cgroup() that it is not removed and we
		 * are still under the same cgroup_mutex. So we can postpone
		 * css_get().
		 */
5033
		if (res_counter_charge(&memcg->res, PAGE_SIZE * count, &dummy))
5034
			goto one_by_one;
5035
		if (do_swap_account && res_counter_charge(&memcg->memsw,
5036
						PAGE_SIZE * count, &dummy)) {
5037
			res_counter_uncharge(&memcg->res, PAGE_SIZE * count);
5038 5039 5040 5041 5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053
			goto one_by_one;
		}
		mc.precharge += count;
		return ret;
	}
one_by_one:
	/* fall back to one by one charge */
	while (count--) {
		if (signal_pending(current)) {
			ret = -EINTR;
			break;
		}
		if (!batch_count--) {
			batch_count = PRECHARGE_COUNT_AT_ONCE;
			cond_resched();
		}
5054 5055
		ret = __mem_cgroup_try_charge(NULL,
					GFP_KERNEL, 1, &memcg, false);
5056
		if (ret)
5057
			/* mem_cgroup_clear_mc() will do uncharge later */
5058
			return ret;
5059 5060
		mc.precharge++;
	}
5061 5062 5063 5064
	return ret;
}

/**
5065
 * get_mctgt_type - get target type of moving charge
5066 5067 5068
 * @vma: the vma the pte to be checked belongs
 * @addr: the address corresponding to the pte to be checked
 * @ptent: the pte to be checked
5069
 * @target: the pointer the target page or swap ent will be stored(can be NULL)
5070 5071 5072 5073 5074 5075
 *
 * Returns
 *   0(MC_TARGET_NONE): if the pte is not a target for move charge.
 *   1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
 *     move charge. if @target is not NULL, the page is stored in target->page
 *     with extra refcnt got(Callers should handle it).
5076 5077 5078
 *   2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
 *     target for charge migration. if @target is not NULL, the entry is stored
 *     in target->ent.
5079 5080 5081 5082 5083
 *
 * Called with pte lock held.
 */
union mc_target {
	struct page	*page;
5084
	swp_entry_t	ent;
5085 5086 5087
};

enum mc_target_type {
5088
	MC_TARGET_NONE = 0,
5089
	MC_TARGET_PAGE,
5090
	MC_TARGET_SWAP,
5091 5092
};

D
Daisuke Nishimura 已提交
5093 5094
static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
						unsigned long addr, pte_t ptent)
5095
{
D
Daisuke Nishimura 已提交
5096
	struct page *page = vm_normal_page(vma, addr, ptent);
5097

D
Daisuke Nishimura 已提交
5098 5099 5100 5101
	if (!page || !page_mapped(page))
		return NULL;
	if (PageAnon(page)) {
		/* we don't move shared anon */
5102
		if (!move_anon())
D
Daisuke Nishimura 已提交
5103
			return NULL;
5104 5105
	} else if (!move_file())
		/* we ignore mapcount for file pages */
D
Daisuke Nishimura 已提交
5106 5107 5108 5109 5110 5111 5112
		return NULL;
	if (!get_page_unless_zero(page))
		return NULL;

	return page;
}

5113
#ifdef CONFIG_SWAP
D
Daisuke Nishimura 已提交
5114 5115 5116 5117 5118 5119 5120 5121
static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
			unsigned long addr, pte_t ptent, swp_entry_t *entry)
{
	struct page *page = NULL;
	swp_entry_t ent = pte_to_swp_entry(ptent);

	if (!move_anon() || non_swap_entry(ent))
		return NULL;
5122 5123 5124 5125 5126
	/*
	 * Because lookup_swap_cache() updates some statistics counter,
	 * we call find_get_page() with swapper_space directly.
	 */
	page = find_get_page(&swapper_space, ent.val);
D
Daisuke Nishimura 已提交
5127 5128 5129 5130 5131
	if (do_swap_account)
		entry->val = ent.val;

	return page;
}
5132 5133 5134 5135 5136 5137 5138
#else
static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
			unsigned long addr, pte_t ptent, swp_entry_t *entry)
{
	return NULL;
}
#endif
D
Daisuke Nishimura 已提交
5139

5140 5141 5142 5143 5144 5145 5146 5147 5148 5149 5150 5151 5152 5153 5154 5155 5156 5157 5158
static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
			unsigned long addr, pte_t ptent, swp_entry_t *entry)
{
	struct page *page = NULL;
	struct address_space *mapping;
	pgoff_t pgoff;

	if (!vma->vm_file) /* anonymous vma */
		return NULL;
	if (!move_file())
		return NULL;

	mapping = vma->vm_file->f_mapping;
	if (pte_none(ptent))
		pgoff = linear_page_index(vma, addr);
	else /* pte_file(ptent) is true */
		pgoff = pte_to_pgoff(ptent);

	/* page is moved even if it's not RSS of this task(page-faulted). */
5159 5160 5161 5162 5163 5164
	page = find_get_page(mapping, pgoff);

#ifdef CONFIG_SWAP
	/* shmem/tmpfs may report page out on swap: account for that too. */
	if (radix_tree_exceptional_entry(page)) {
		swp_entry_t swap = radix_to_swp_entry(page);
5165
		if (do_swap_account)
5166 5167
			*entry = swap;
		page = find_get_page(&swapper_space, swap.val);
5168
	}
5169
#endif
5170 5171 5172
	return page;
}

5173
static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
D
Daisuke Nishimura 已提交
5174 5175 5176 5177
		unsigned long addr, pte_t ptent, union mc_target *target)
{
	struct page *page = NULL;
	struct page_cgroup *pc;
5178
	enum mc_target_type ret = MC_TARGET_NONE;
D
Daisuke Nishimura 已提交
5179 5180 5181 5182 5183 5184
	swp_entry_t ent = { .val = 0 };

	if (pte_present(ptent))
		page = mc_handle_present_pte(vma, addr, ptent);
	else if (is_swap_pte(ptent))
		page = mc_handle_swap_pte(vma, addr, ptent, &ent);
5185 5186
	else if (pte_none(ptent) || pte_file(ptent))
		page = mc_handle_file_pte(vma, addr, ptent, &ent);
D
Daisuke Nishimura 已提交
5187 5188

	if (!page && !ent.val)
5189
		return ret;
5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201 5202 5203 5204
	if (page) {
		pc = lookup_page_cgroup(page);
		/*
		 * Do only loose check w/o page_cgroup lock.
		 * mem_cgroup_move_account() checks the pc is valid or not under
		 * the lock.
		 */
		if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) {
			ret = MC_TARGET_PAGE;
			if (target)
				target->page = page;
		}
		if (!ret || !target)
			put_page(page);
	}
D
Daisuke Nishimura 已提交
5205 5206
	/* There is a swap entry and a page doesn't exist or isn't charged */
	if (ent.val && !ret &&
5207
			css_id(&mc.from->css) == lookup_swap_cgroup_id(ent)) {
5208 5209 5210
		ret = MC_TARGET_SWAP;
		if (target)
			target->ent = ent;
5211 5212 5213 5214
	}
	return ret;
}

5215 5216 5217 5218 5219 5220 5221 5222 5223 5224 5225 5226 5227 5228 5229 5230 5231 5232 5233 5234 5235 5236 5237 5238 5239 5240 5241 5242 5243 5244 5245 5246 5247 5248 5249
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/*
 * We don't consider swapping or file mapped pages because THP does not
 * support them for now.
 * Caller should make sure that pmd_trans_huge(pmd) is true.
 */
static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
		unsigned long addr, pmd_t pmd, union mc_target *target)
{
	struct page *page = NULL;
	struct page_cgroup *pc;
	enum mc_target_type ret = MC_TARGET_NONE;

	page = pmd_page(pmd);
	VM_BUG_ON(!page || !PageHead(page));
	if (!move_anon())
		return ret;
	pc = lookup_page_cgroup(page);
	if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) {
		ret = MC_TARGET_PAGE;
		if (target) {
			get_page(page);
			target->page = page;
		}
	}
	return ret;
}
#else
static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
		unsigned long addr, pmd_t pmd, union mc_target *target)
{
	return MC_TARGET_NONE;
}
#endif

5250 5251 5252 5253 5254 5255 5256 5257
static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
					unsigned long addr, unsigned long end,
					struct mm_walk *walk)
{
	struct vm_area_struct *vma = walk->private;
	pte_t *pte;
	spinlock_t *ptl;

5258 5259 5260 5261
	if (pmd_trans_huge_lock(pmd, vma) == 1) {
		if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
			mc.precharge += HPAGE_PMD_NR;
		spin_unlock(&vma->vm_mm->page_table_lock);
5262
		return 0;
5263
	}
5264

5265 5266
	if (pmd_trans_unstable(pmd))
		return 0;
5267 5268
	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
	for (; addr != end; pte++, addr += PAGE_SIZE)
5269
		if (get_mctgt_type(vma, addr, *pte, NULL))
5270 5271 5272 5273
			mc.precharge++;	/* increment precharge temporarily */
	pte_unmap_unlock(pte - 1, ptl);
	cond_resched();

5274 5275 5276
	return 0;
}

5277 5278 5279 5280 5281
static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
{
	unsigned long precharge;
	struct vm_area_struct *vma;

5282
	down_read(&mm->mmap_sem);
5283 5284 5285 5286 5287 5288 5289 5290 5291 5292 5293
	for (vma = mm->mmap; vma; vma = vma->vm_next) {
		struct mm_walk mem_cgroup_count_precharge_walk = {
			.pmd_entry = mem_cgroup_count_precharge_pte_range,
			.mm = mm,
			.private = vma,
		};
		if (is_vm_hugetlb_page(vma))
			continue;
		walk_page_range(vma->vm_start, vma->vm_end,
					&mem_cgroup_count_precharge_walk);
	}
5294
	up_read(&mm->mmap_sem);
5295 5296 5297 5298 5299 5300 5301 5302 5303

	precharge = mc.precharge;
	mc.precharge = 0;

	return precharge;
}

static int mem_cgroup_precharge_mc(struct mm_struct *mm)
{
5304 5305 5306 5307 5308
	unsigned long precharge = mem_cgroup_count_precharge(mm);

	VM_BUG_ON(mc.moving_task);
	mc.moving_task = current;
	return mem_cgroup_do_precharge(precharge);
5309 5310
}

5311 5312
/* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
static void __mem_cgroup_clear_mc(void)
5313
{
5314 5315 5316
	struct mem_cgroup *from = mc.from;
	struct mem_cgroup *to = mc.to;

5317
	/* we must uncharge all the leftover precharges from mc.to */
5318 5319 5320 5321 5322 5323 5324 5325 5326 5327 5328
	if (mc.precharge) {
		__mem_cgroup_cancel_charge(mc.to, mc.precharge);
		mc.precharge = 0;
	}
	/*
	 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
	 * we must uncharge here.
	 */
	if (mc.moved_charge) {
		__mem_cgroup_cancel_charge(mc.from, mc.moved_charge);
		mc.moved_charge = 0;
5329
	}
5330 5331 5332 5333 5334 5335 5336 5337 5338 5339 5340 5341 5342 5343 5344 5345 5346 5347 5348
	/* we must fixup refcnts and charges */
	if (mc.moved_swap) {
		/* uncharge swap account from the old cgroup */
		if (!mem_cgroup_is_root(mc.from))
			res_counter_uncharge(&mc.from->memsw,
						PAGE_SIZE * mc.moved_swap);
		__mem_cgroup_put(mc.from, mc.moved_swap);

		if (!mem_cgroup_is_root(mc.to)) {
			/*
			 * we charged both to->res and to->memsw, so we should
			 * uncharge to->res.
			 */
			res_counter_uncharge(&mc.to->res,
						PAGE_SIZE * mc.moved_swap);
		}
		/* we've already done mem_cgroup_get(mc.to) */
		mc.moved_swap = 0;
	}
5349 5350 5351 5352 5353 5354 5355 5356 5357 5358 5359 5360 5361 5362 5363
	memcg_oom_recover(from);
	memcg_oom_recover(to);
	wake_up_all(&mc.waitq);
}

static void mem_cgroup_clear_mc(void)
{
	struct mem_cgroup *from = mc.from;

	/*
	 * we must clear moving_task before waking up waiters at the end of
	 * task migration.
	 */
	mc.moving_task = NULL;
	__mem_cgroup_clear_mc();
5364
	spin_lock(&mc.lock);
5365 5366
	mc.from = NULL;
	mc.to = NULL;
5367
	spin_unlock(&mc.lock);
5368
	mem_cgroup_end_move(from);
5369 5370
}

5371 5372
static int mem_cgroup_can_attach(struct cgroup *cgroup,
				 struct cgroup_taskset *tset)
5373
{
5374
	struct task_struct *p = cgroup_taskset_first(tset);
5375
	int ret = 0;
5376
	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgroup);
5377

5378
	if (memcg->move_charge_at_immigrate) {
5379 5380 5381
		struct mm_struct *mm;
		struct mem_cgroup *from = mem_cgroup_from_task(p);

5382
		VM_BUG_ON(from == memcg);
5383 5384 5385 5386 5387

		mm = get_task_mm(p);
		if (!mm)
			return 0;
		/* We move charges only when we move a owner of the mm */
5388 5389 5390 5391
		if (mm->owner == p) {
			VM_BUG_ON(mc.from);
			VM_BUG_ON(mc.to);
			VM_BUG_ON(mc.precharge);
5392
			VM_BUG_ON(mc.moved_charge);
5393
			VM_BUG_ON(mc.moved_swap);
5394
			mem_cgroup_start_move(from);
5395
			spin_lock(&mc.lock);
5396
			mc.from = from;
5397
			mc.to = memcg;
5398
			spin_unlock(&mc.lock);
5399
			/* We set mc.moving_task later */
5400 5401 5402 5403

			ret = mem_cgroup_precharge_mc(mm);
			if (ret)
				mem_cgroup_clear_mc();
5404 5405
		}
		mmput(mm);
5406 5407 5408 5409
	}
	return ret;
}

5410 5411
static void mem_cgroup_cancel_attach(struct cgroup *cgroup,
				     struct cgroup_taskset *tset)
5412
{
5413
	mem_cgroup_clear_mc();
5414 5415
}

5416 5417 5418
static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
				unsigned long addr, unsigned long end,
				struct mm_walk *walk)
5419
{
5420 5421 5422 5423
	int ret = 0;
	struct vm_area_struct *vma = walk->private;
	pte_t *pte;
	spinlock_t *ptl;
5424 5425 5426 5427
	enum mc_target_type target_type;
	union mc_target target;
	struct page *page;
	struct page_cgroup *pc;
5428

5429 5430 5431 5432 5433 5434 5435 5436 5437 5438 5439
	/*
	 * We don't take compound_lock() here but no race with splitting thp
	 * happens because:
	 *  - if pmd_trans_huge_lock() returns 1, the relevant thp is not
	 *    under splitting, which means there's no concurrent thp split,
	 *  - if another thread runs into split_huge_page() just after we
	 *    entered this if-block, the thread must wait for page table lock
	 *    to be unlocked in __split_huge_page_splitting(), where the main
	 *    part of thp split is not executed yet.
	 */
	if (pmd_trans_huge_lock(pmd, vma) == 1) {
5440
		if (mc.precharge < HPAGE_PMD_NR) {
5441 5442 5443 5444 5445 5446 5447 5448 5449
			spin_unlock(&vma->vm_mm->page_table_lock);
			return 0;
		}
		target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
		if (target_type == MC_TARGET_PAGE) {
			page = target.page;
			if (!isolate_lru_page(page)) {
				pc = lookup_page_cgroup(page);
				if (!mem_cgroup_move_account(page, HPAGE_PMD_NR,
5450
							pc, mc.from, mc.to)) {
5451 5452 5453 5454 5455 5456 5457 5458
					mc.precharge -= HPAGE_PMD_NR;
					mc.moved_charge += HPAGE_PMD_NR;
				}
				putback_lru_page(page);
			}
			put_page(page);
		}
		spin_unlock(&vma->vm_mm->page_table_lock);
5459
		return 0;
5460 5461
	}

5462 5463
	if (pmd_trans_unstable(pmd))
		return 0;
5464 5465 5466 5467
retry:
	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
	for (; addr != end; addr += PAGE_SIZE) {
		pte_t ptent = *(pte++);
5468
		swp_entry_t ent;
5469 5470 5471 5472

		if (!mc.precharge)
			break;

5473
		switch (get_mctgt_type(vma, addr, ptent, &target)) {
5474 5475 5476 5477 5478
		case MC_TARGET_PAGE:
			page = target.page;
			if (isolate_lru_page(page))
				goto put;
			pc = lookup_page_cgroup(page);
5479
			if (!mem_cgroup_move_account(page, 1, pc,
5480
						     mc.from, mc.to)) {
5481
				mc.precharge--;
5482 5483
				/* we uncharge from mc.from later. */
				mc.moved_charge++;
5484 5485
			}
			putback_lru_page(page);
5486
put:			/* get_mctgt_type() gets the page */
5487 5488
			put_page(page);
			break;
5489 5490
		case MC_TARGET_SWAP:
			ent = target.ent;
5491
			if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
5492
				mc.precharge--;
5493 5494 5495
				/* we fixup refcnts and charges later. */
				mc.moved_swap++;
			}
5496
			break;
5497 5498 5499 5500 5501 5502 5503 5504 5505 5506 5507 5508 5509 5510
		default:
			break;
		}
	}
	pte_unmap_unlock(pte - 1, ptl);
	cond_resched();

	if (addr != end) {
		/*
		 * We have consumed all precharges we got in can_attach().
		 * We try charge one by one, but don't do any additional
		 * charges to mc.to if we have failed in charge once in attach()
		 * phase.
		 */
5511
		ret = mem_cgroup_do_precharge(1);
5512 5513 5514 5515 5516 5517 5518 5519 5520 5521 5522 5523
		if (!ret)
			goto retry;
	}

	return ret;
}

static void mem_cgroup_move_charge(struct mm_struct *mm)
{
	struct vm_area_struct *vma;

	lru_add_drain_all();
5524 5525 5526 5527 5528 5529 5530 5531 5532 5533 5534 5535 5536
retry:
	if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
		/*
		 * Someone who are holding the mmap_sem might be waiting in
		 * waitq. So we cancel all extra charges, wake up all waiters,
		 * and retry. Because we cancel precharges, we might not be able
		 * to move enough charges, but moving charge is a best-effort
		 * feature anyway, so it wouldn't be a big problem.
		 */
		__mem_cgroup_clear_mc();
		cond_resched();
		goto retry;
	}
5537 5538 5539 5540 5541 5542 5543 5544 5545 5546 5547 5548 5549 5550 5551 5552 5553 5554
	for (vma = mm->mmap; vma; vma = vma->vm_next) {
		int ret;
		struct mm_walk mem_cgroup_move_charge_walk = {
			.pmd_entry = mem_cgroup_move_charge_pte_range,
			.mm = mm,
			.private = vma,
		};
		if (is_vm_hugetlb_page(vma))
			continue;
		ret = walk_page_range(vma->vm_start, vma->vm_end,
						&mem_cgroup_move_charge_walk);
		if (ret)
			/*
			 * means we have consumed all precharges and failed in
			 * doing additional charge. Just abandon here.
			 */
			break;
	}
5555
	up_read(&mm->mmap_sem);
5556 5557
}

5558 5559
static void mem_cgroup_move_task(struct cgroup *cont,
				 struct cgroup_taskset *tset)
B
Balbir Singh 已提交
5560
{
5561
	struct task_struct *p = cgroup_taskset_first(tset);
5562
	struct mm_struct *mm = get_task_mm(p);
5563 5564

	if (mm) {
5565 5566
		if (mc.to)
			mem_cgroup_move_charge(mm);
5567 5568
		mmput(mm);
	}
5569 5570
	if (mc.to)
		mem_cgroup_clear_mc();
B
Balbir Singh 已提交
5571
}
5572
#else	/* !CONFIG_MMU */
5573 5574
static int mem_cgroup_can_attach(struct cgroup *cgroup,
				 struct cgroup_taskset *tset)
5575 5576 5577
{
	return 0;
}
5578 5579
static void mem_cgroup_cancel_attach(struct cgroup *cgroup,
				     struct cgroup_taskset *tset)
5580 5581
{
}
5582 5583
static void mem_cgroup_move_task(struct cgroup *cont,
				 struct cgroup_taskset *tset)
5584 5585 5586
{
}
#endif
B
Balbir Singh 已提交
5587

B
Balbir Singh 已提交
5588 5589 5590 5591
struct cgroup_subsys mem_cgroup_subsys = {
	.name = "memory",
	.subsys_id = mem_cgroup_subsys_id,
	.create = mem_cgroup_create,
5592
	.pre_destroy = mem_cgroup_pre_destroy,
B
Balbir Singh 已提交
5593
	.destroy = mem_cgroup_destroy,
5594 5595
	.can_attach = mem_cgroup_can_attach,
	.cancel_attach = mem_cgroup_cancel_attach,
B
Balbir Singh 已提交
5596
	.attach = mem_cgroup_move_task,
5597
	.base_cftypes = mem_cgroup_files,
5598
	.early_init = 0,
K
KAMEZAWA Hiroyuki 已提交
5599
	.use_id = 1,
5600
	.__DEPRECATED_clear_css_refs = true,
B
Balbir Singh 已提交
5601
};
5602

A
Andrew Morton 已提交
5603
#ifdef CONFIG_MEMCG_SWAP
5604 5605 5606
static int __init enable_swap_account(char *s)
{
	/* consider enabled if no parameter or 1 is given */
5607
	if (!strcmp(s, "1"))
5608
		really_do_swap_account = 1;
5609
	else if (!strcmp(s, "0"))
5610 5611 5612
		really_do_swap_account = 0;
	return 1;
}
5613
__setup("swapaccount=", enable_swap_account);
5614 5615

#endif