memcontrol.c 146.6 KB
Newer Older
B
Balbir Singh 已提交
1 2 3 4 5
/* memcontrol.c - Memory Controller
 *
 * Copyright IBM Corporation, 2007
 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
 *
6 7 8
 * Copyright 2007 OpenVZ SWsoft Inc
 * Author: Pavel Emelianov <xemul@openvz.org>
 *
9 10 11 12
 * Memory thresholds
 * Copyright (C) 2009 Nokia Corporation
 * Author: Kirill A. Shutemov
 *
B
Balbir Singh 已提交
13 14 15 16 17 18 19 20 21 22 23 24 25 26
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 */

#include <linux/res_counter.h>
#include <linux/memcontrol.h>
#include <linux/cgroup.h>
27
#include <linux/mm.h>
28
#include <linux/hugetlb.h>
K
KAMEZAWA Hiroyuki 已提交
29
#include <linux/pagemap.h>
30
#include <linux/smp.h>
31
#include <linux/page-flags.h>
32
#include <linux/backing-dev.h>
33 34
#include <linux/bit_spinlock.h>
#include <linux/rcupdate.h>
35
#include <linux/limits.h>
36
#include <linux/export.h>
37
#include <linux/mutex.h>
38
#include <linux/rbtree.h>
39
#include <linux/slab.h>
40
#include <linux/swap.h>
41
#include <linux/swapops.h>
42
#include <linux/spinlock.h>
43 44
#include <linux/eventfd.h>
#include <linux/sort.h>
45
#include <linux/fs.h>
46
#include <linux/seq_file.h>
47
#include <linux/vmalloc.h>
48
#include <linux/mm_inline.h>
49
#include <linux/page_cgroup.h>
50
#include <linux/cpu.h>
51
#include <linux/oom.h>
K
KAMEZAWA Hiroyuki 已提交
52
#include "internal.h"
G
Glauber Costa 已提交
53 54
#include <net/sock.h>
#include <net/tcp_memcontrol.h>
B
Balbir Singh 已提交
55

56 57
#include <asm/uaccess.h>

58 59
#include <trace/events/vmscan.h>

60 61
struct cgroup_subsys mem_cgroup_subsys __read_mostly;
#define MEM_CGROUP_RECLAIM_RETRIES	5
62
struct mem_cgroup *root_mem_cgroup __read_mostly;
B
Balbir Singh 已提交
63

64
#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
L
Li Zefan 已提交
65
/* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */
66
int do_swap_account __read_mostly;
67 68 69 70 71 72 73 74

/* for remember boot option*/
#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP_ENABLED
static int really_do_swap_account __initdata = 1;
#else
static int really_do_swap_account __initdata = 0;
#endif

75 76 77 78 79
#else
#define do_swap_account		(0)
#endif


80 81 82 83 84 85 86 87
/*
 * Statistics for memory cgroup.
 */
enum mem_cgroup_stat_index {
	/*
	 * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
	 */
	MEM_CGROUP_STAT_CACHE, 	   /* # of pages charged as cache */
88
	MEM_CGROUP_STAT_RSS,	   /* # of pages charged as anon rss */
89
	MEM_CGROUP_STAT_FILE_MAPPED,  /* # of pages charged as file rss */
90
	MEM_CGROUP_STAT_SWAPOUT, /* # of pages, swapped out */
91
	MEM_CGROUP_STAT_DATA, /* end of data requires synchronization */
92 93 94
	MEM_CGROUP_STAT_NSTATS,
};

95 96 97 98
enum mem_cgroup_events_index {
	MEM_CGROUP_EVENTS_PGPGIN,	/* # of pages paged in */
	MEM_CGROUP_EVENTS_PGPGOUT,	/* # of pages paged out */
	MEM_CGROUP_EVENTS_COUNT,	/* # of pages paged in/out */
99 100
	MEM_CGROUP_EVENTS_PGFAULT,	/* # of page-faults */
	MEM_CGROUP_EVENTS_PGMAJFAULT,	/* # of major page-faults */
101 102
	MEM_CGROUP_EVENTS_NSTATS,
};
103 104 105 106 107 108 109 110 111
/*
 * Per memcg event counter is incremented at every pagein/pageout. With THP,
 * it will be incremated by the number of pages. This counter is used for
 * for trigger some periodic events. This is straightforward and better
 * than using jiffies etc. to handle periodic memcg event.
 */
enum mem_cgroup_events_target {
	MEM_CGROUP_TARGET_THRESH,
	MEM_CGROUP_TARGET_SOFTLIMIT,
112
	MEM_CGROUP_TARGET_NUMAINFO,
113 114 115 116
	MEM_CGROUP_NTARGETS,
};
#define THRESHOLDS_EVENTS_TARGET (128)
#define SOFTLIMIT_EVENTS_TARGET (1024)
117
#define NUMAINFO_EVENTS_TARGET	(1024)
118

119
struct mem_cgroup_stat_cpu {
120
	long count[MEM_CGROUP_STAT_NSTATS];
121
	unsigned long events[MEM_CGROUP_EVENTS_NSTATS];
122
	unsigned long targets[MEM_CGROUP_NTARGETS];
123 124
};

125 126 127 128 129 130 131
struct mem_cgroup_reclaim_iter {
	/* css_id of the last scanned hierarchy member */
	int position;
	/* scan generation, increased every round-trip */
	unsigned int generation;
};

132 133 134 135
/*
 * per-zone information in memory controller.
 */
struct mem_cgroup_per_zone {
136
	struct lruvec		lruvec;
137
	unsigned long		lru_size[NR_LRU_LISTS];
K
KOSAKI Motohiro 已提交
138

139 140
	struct mem_cgroup_reclaim_iter reclaim_iter[DEF_PRIORITY + 1];

K
KOSAKI Motohiro 已提交
141
	struct zone_reclaim_stat reclaim_stat;
142 143 144 145
	struct rb_node		tree_node;	/* RB tree node */
	unsigned long long	usage_in_excess;/* Set to the value by which */
						/* the soft limit is exceeded*/
	bool			on_tree;
146
	struct mem_cgroup	*memcg;		/* Back pointer, we cannot */
147
						/* use container_of	   */
148 149 150 151 152 153 154 155 156 157
};

struct mem_cgroup_per_node {
	struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
};

struct mem_cgroup_lru_info {
	struct mem_cgroup_per_node *nodeinfo[MAX_NUMNODES];
};

158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177
/*
 * Cgroups above their limits are maintained in a RB-Tree, independent of
 * their hierarchy representation
 */

struct mem_cgroup_tree_per_zone {
	struct rb_root rb_root;
	spinlock_t lock;
};

struct mem_cgroup_tree_per_node {
	struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES];
};

struct mem_cgroup_tree {
	struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
};

static struct mem_cgroup_tree soft_limit_tree __read_mostly;

178 179 180 181 182
struct mem_cgroup_threshold {
	struct eventfd_ctx *eventfd;
	u64 threshold;
};

K
KAMEZAWA Hiroyuki 已提交
183
/* For threshold */
184 185
struct mem_cgroup_threshold_ary {
	/* An array index points to threshold just below usage. */
186
	int current_threshold;
187 188 189 190 191
	/* Size of entries[] */
	unsigned int size;
	/* Array of thresholds */
	struct mem_cgroup_threshold entries[0];
};
192 193 194 195 196 197 198 199 200 201 202 203

struct mem_cgroup_thresholds {
	/* Primary thresholds array */
	struct mem_cgroup_threshold_ary *primary;
	/*
	 * Spare threshold array.
	 * This is needed to make mem_cgroup_unregister_event() "never fail".
	 * It must be able to store at least primary->size - 1 entries.
	 */
	struct mem_cgroup_threshold_ary *spare;
};

K
KAMEZAWA Hiroyuki 已提交
204 205 206 207 208
/* for OOM */
struct mem_cgroup_eventfd_list {
	struct list_head list;
	struct eventfd_ctx *eventfd;
};
209

210 211
static void mem_cgroup_threshold(struct mem_cgroup *memcg);
static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
212

B
Balbir Singh 已提交
213 214 215 216 217 218 219
/*
 * The memory controller data structure. The memory controller controls both
 * page cache and RSS per cgroup. We would eventually like to provide
 * statistics based on the statistics developed by Rik Van Riel for clock-pro,
 * to help the administrator determine what knobs to tune.
 *
 * TODO: Add a water mark for the memory controller. Reclaim will begin when
220 221 222
 * we hit the water mark. May be even add a low water mark, such that
 * no reclaim occurs from a cgroup at it's low water mark, this is
 * a feature that will be implemented much later in the future.
B
Balbir Singh 已提交
223 224 225 226 227 228 229
 */
struct mem_cgroup {
	struct cgroup_subsys_state css;
	/*
	 * the counter to account for memory usage
	 */
	struct res_counter res;
230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253

	union {
		/*
		 * the counter to account for mem+swap usage.
		 */
		struct res_counter memsw;

		/*
		 * rcu_freeing is used only when freeing struct mem_cgroup,
		 * so put it into a union to avoid wasting more memory.
		 * It must be disjoint from the css field.  It could be
		 * in a union with the res field, but res plays a much
		 * larger part in mem_cgroup life than memsw, and might
		 * be of interest, even at time of free, when debugging.
		 * So share rcu_head with the less interesting memsw.
		 */
		struct rcu_head rcu_freeing;
		/*
		 * But when using vfree(), that cannot be done at
		 * interrupt time, so we must then queue the work.
		 */
		struct work_struct work_freeing;
	};

254 255 256 257
	/*
	 * Per cgroup active and inactive list, similar to the
	 * per zone LRU lists.
	 */
258
	struct mem_cgroup_lru_info info;
259 260 261
	int last_scanned_node;
#if MAX_NUMNODES > 1
	nodemask_t	scan_nodes;
262 263
	atomic_t	numainfo_events;
	atomic_t	numainfo_updating;
264
#endif
265 266 267 268
	/*
	 * Should the accounting and control be hierarchical, per subtree?
	 */
	bool use_hierarchy;
269 270 271 272

	bool		oom_lock;
	atomic_t	under_oom;

273
	atomic_t	refcnt;
274

275
	int	swappiness;
276 277
	/* OOM-Killer disable */
	int		oom_kill_disable;
K
KOSAKI Motohiro 已提交
278

279 280 281
	/* set when res.limit == memsw.limit */
	bool		memsw_is_minimum;

282 283 284 285
	/* protect arrays of thresholds */
	struct mutex thresholds_lock;

	/* thresholds for memory usage. RCU-protected */
286
	struct mem_cgroup_thresholds thresholds;
287

288
	/* thresholds for mem+swap usage. RCU-protected */
289
	struct mem_cgroup_thresholds memsw_thresholds;
290

K
KAMEZAWA Hiroyuki 已提交
291 292
	/* For oom notifier event fd */
	struct list_head oom_notify;
293

294 295 296 297 298
	/*
	 * Should we move charges of a task when a task is moved into this
	 * mem_cgroup ? And what type of charges should we move ?
	 */
	unsigned long 	move_charge_at_immigrate;
299 300 301 302
	/*
	 * set > 0 if pages under this cgroup are moving to other cgroup.
	 */
	atomic_t	moving_account;
303 304
	/* taken only while moving_account > 0 */
	spinlock_t	move_lock;
305
	/*
306
	 * percpu counter.
307
	 */
308
	struct mem_cgroup_stat_cpu *stat;
309 310 311 312 313 314
	/*
	 * used when a cpu is offlined or other synchronizations
	 * See mem_cgroup_read_stat().
	 */
	struct mem_cgroup_stat_cpu nocpu_base;
	spinlock_t pcp_counter_lock;
G
Glauber Costa 已提交
315 316 317 318

#ifdef CONFIG_INET
	struct tcp_memcontrol tcp_mem;
#endif
B
Balbir Singh 已提交
319 320
};

321 322 323 324 325 326
/* Stuffs for move charges at task migration. */
/*
 * Types of charges to be moved. "move_charge_at_immitgrate" is treated as a
 * left-shifted bitmap of these types.
 */
enum move_type {
327
	MOVE_CHARGE_TYPE_ANON,	/* private anonymous page and swap of it */
328
	MOVE_CHARGE_TYPE_FILE,	/* file page(including tmpfs) and swap of it */
329 330 331
	NR_MOVE_TYPE,
};

332 333
/* "mc" and its members are protected by cgroup_mutex */
static struct move_charge_struct {
334
	spinlock_t	  lock; /* for from, to */
335 336 337
	struct mem_cgroup *from;
	struct mem_cgroup *to;
	unsigned long precharge;
338
	unsigned long moved_charge;
339
	unsigned long moved_swap;
340 341 342
	struct task_struct *moving_task;	/* a task moving charges */
	wait_queue_head_t waitq;		/* a waitq for other context */
} mc = {
343
	.lock = __SPIN_LOCK_UNLOCKED(mc.lock),
344 345
	.waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
};
346

D
Daisuke Nishimura 已提交
347 348 349 350 351 352
static bool move_anon(void)
{
	return test_bit(MOVE_CHARGE_TYPE_ANON,
					&mc.to->move_charge_at_immigrate);
}

353 354 355 356 357 358
static bool move_file(void)
{
	return test_bit(MOVE_CHARGE_TYPE_FILE,
					&mc.to->move_charge_at_immigrate);
}

359 360 361 362 363 364 365
/*
 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
 * limit reclaim to prevent infinite loops, if they ever occur.
 */
#define	MEM_CGROUP_MAX_RECLAIM_LOOPS		(100)
#define	MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS	(2)

366 367 368
enum charge_type {
	MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
	MEM_CGROUP_CHARGE_TYPE_MAPPED,
369
	MEM_CGROUP_CHARGE_TYPE_SHMEM,	/* used by page migration of shmem */
370
	MEM_CGROUP_CHARGE_TYPE_FORCE,	/* used by force_empty */
K
KAMEZAWA Hiroyuki 已提交
371
	MEM_CGROUP_CHARGE_TYPE_SWAPOUT,	/* for accounting swapcache */
K
KAMEZAWA Hiroyuki 已提交
372
	MEM_CGROUP_CHARGE_TYPE_DROP,	/* a page was unused swap cache */
373 374 375
	NR_CHARGE_TYPE,
};

376
/* for encoding cft->private value on file */
377 378 379
#define _MEM			(0)
#define _MEMSWAP		(1)
#define _OOM_TYPE		(2)
380 381 382
#define MEMFILE_PRIVATE(x, val)	(((x) << 16) | (val))
#define MEMFILE_TYPE(val)	(((val) >> 16) & 0xffff)
#define MEMFILE_ATTR(val)	((val) & 0xffff)
K
KAMEZAWA Hiroyuki 已提交
383 384
/* Used for OOM nofiier */
#define OOM_CONTROL		(0)
385

386 387 388 389 390 391 392 393
/*
 * Reclaim flags for mem_cgroup_hierarchical_reclaim
 */
#define MEM_CGROUP_RECLAIM_NOSWAP_BIT	0x0
#define MEM_CGROUP_RECLAIM_NOSWAP	(1 << MEM_CGROUP_RECLAIM_NOSWAP_BIT)
#define MEM_CGROUP_RECLAIM_SHRINK_BIT	0x1
#define MEM_CGROUP_RECLAIM_SHRINK	(1 << MEM_CGROUP_RECLAIM_SHRINK_BIT)

394 395
static void mem_cgroup_get(struct mem_cgroup *memcg);
static void mem_cgroup_put(struct mem_cgroup *memcg);
G
Glauber Costa 已提交
396 397 398 399

/* Writing them here to avoid exposing memcg's inner layout */
#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
#include <net/sock.h>
G
Glauber Costa 已提交
400
#include <net/ip.h>
G
Glauber Costa 已提交
401 402 403 404

static bool mem_cgroup_is_root(struct mem_cgroup *memcg);
void sock_update_memcg(struct sock *sk)
{
405
	if (mem_cgroup_sockets_enabled) {
G
Glauber Costa 已提交
406 407 408 409
		struct mem_cgroup *memcg;

		BUG_ON(!sk->sk_prot->proto_cgroup);

410 411 412 413 414 415 416 417 418 419 420 421 422 423
		/* Socket cloning can throw us here with sk_cgrp already
		 * filled. It won't however, necessarily happen from
		 * process context. So the test for root memcg given
		 * the current task's memcg won't help us in this case.
		 *
		 * Respecting the original socket's memcg is a better
		 * decision in this case.
		 */
		if (sk->sk_cgrp) {
			BUG_ON(mem_cgroup_is_root(sk->sk_cgrp->memcg));
			mem_cgroup_get(sk->sk_cgrp->memcg);
			return;
		}

G
Glauber Costa 已提交
424 425 426 427 428 429 430 431 432 433 434 435 436
		rcu_read_lock();
		memcg = mem_cgroup_from_task(current);
		if (!mem_cgroup_is_root(memcg)) {
			mem_cgroup_get(memcg);
			sk->sk_cgrp = sk->sk_prot->proto_cgroup(memcg);
		}
		rcu_read_unlock();
	}
}
EXPORT_SYMBOL(sock_update_memcg);

void sock_release_memcg(struct sock *sk)
{
437
	if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
G
Glauber Costa 已提交
438 439 440 441 442 443
		struct mem_cgroup *memcg;
		WARN_ON(!sk->sk_cgrp->memcg);
		memcg = sk->sk_cgrp->memcg;
		mem_cgroup_put(memcg);
	}
}
G
Glauber Costa 已提交
444

445
#ifdef CONFIG_INET
G
Glauber Costa 已提交
446 447 448 449 450 451 452 453
struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg)
{
	if (!memcg || mem_cgroup_is_root(memcg))
		return NULL;

	return &memcg->tcp_mem.cg_proto;
}
EXPORT_SYMBOL(tcp_proto_cgroup);
G
Glauber Costa 已提交
454 455 456
#endif /* CONFIG_INET */
#endif /* CONFIG_CGROUP_MEM_RES_CTLR_KMEM */

457
static void drain_all_stock_async(struct mem_cgroup *memcg);
458

459
static struct mem_cgroup_per_zone *
460
mem_cgroup_zoneinfo(struct mem_cgroup *memcg, int nid, int zid)
461
{
462
	return &memcg->info.nodeinfo[nid]->zoneinfo[zid];
463 464
}

465
struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg)
466
{
467
	return &memcg->css;
468 469
}

470
static struct mem_cgroup_per_zone *
471
page_cgroup_zoneinfo(struct mem_cgroup *memcg, struct page *page)
472
{
473 474
	int nid = page_to_nid(page);
	int zid = page_zonenum(page);
475

476
	return mem_cgroup_zoneinfo(memcg, nid, zid);
477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494
}

static struct mem_cgroup_tree_per_zone *
soft_limit_tree_node_zone(int nid, int zid)
{
	return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
}

static struct mem_cgroup_tree_per_zone *
soft_limit_tree_from_page(struct page *page)
{
	int nid = page_to_nid(page);
	int zid = page_zonenum(page);

	return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
}

static void
495
__mem_cgroup_insert_exceeded(struct mem_cgroup *memcg,
496
				struct mem_cgroup_per_zone *mz,
497 498
				struct mem_cgroup_tree_per_zone *mctz,
				unsigned long long new_usage_in_excess)
499 500 501 502 503 504 505 506
{
	struct rb_node **p = &mctz->rb_root.rb_node;
	struct rb_node *parent = NULL;
	struct mem_cgroup_per_zone *mz_node;

	if (mz->on_tree)
		return;

507 508 509
	mz->usage_in_excess = new_usage_in_excess;
	if (!mz->usage_in_excess)
		return;
510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525
	while (*p) {
		parent = *p;
		mz_node = rb_entry(parent, struct mem_cgroup_per_zone,
					tree_node);
		if (mz->usage_in_excess < mz_node->usage_in_excess)
			p = &(*p)->rb_left;
		/*
		 * We can't avoid mem cgroups that are over their soft
		 * limit by the same amount
		 */
		else if (mz->usage_in_excess >= mz_node->usage_in_excess)
			p = &(*p)->rb_right;
	}
	rb_link_node(&mz->tree_node, parent, p);
	rb_insert_color(&mz->tree_node, &mctz->rb_root);
	mz->on_tree = true;
526 527 528
}

static void
529
__mem_cgroup_remove_exceeded(struct mem_cgroup *memcg,
530 531 532 533 534 535 536 537 538
				struct mem_cgroup_per_zone *mz,
				struct mem_cgroup_tree_per_zone *mctz)
{
	if (!mz->on_tree)
		return;
	rb_erase(&mz->tree_node, &mctz->rb_root);
	mz->on_tree = false;
}

539
static void
540
mem_cgroup_remove_exceeded(struct mem_cgroup *memcg,
541 542 543 544
				struct mem_cgroup_per_zone *mz,
				struct mem_cgroup_tree_per_zone *mctz)
{
	spin_lock(&mctz->lock);
545
	__mem_cgroup_remove_exceeded(memcg, mz, mctz);
546 547 548 549
	spin_unlock(&mctz->lock);
}


550
static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
551
{
552
	unsigned long long excess;
553 554
	struct mem_cgroup_per_zone *mz;
	struct mem_cgroup_tree_per_zone *mctz;
555 556
	int nid = page_to_nid(page);
	int zid = page_zonenum(page);
557 558 559
	mctz = soft_limit_tree_from_page(page);

	/*
560 561
	 * Necessary to update all ancestors when hierarchy is used.
	 * because their event counter is not touched.
562
	 */
563 564 565
	for (; memcg; memcg = parent_mem_cgroup(memcg)) {
		mz = mem_cgroup_zoneinfo(memcg, nid, zid);
		excess = res_counter_soft_limit_excess(&memcg->res);
566 567 568 569
		/*
		 * We have to update the tree if mz is on RB-tree or
		 * mem is over its softlimit.
		 */
570
		if (excess || mz->on_tree) {
571 572 573
			spin_lock(&mctz->lock);
			/* if on-tree, remove it */
			if (mz->on_tree)
574
				__mem_cgroup_remove_exceeded(memcg, mz, mctz);
575
			/*
576 577
			 * Insert again. mz->usage_in_excess will be updated.
			 * If excess is 0, no tree ops.
578
			 */
579
			__mem_cgroup_insert_exceeded(memcg, mz, mctz, excess);
580 581
			spin_unlock(&mctz->lock);
		}
582 583 584
	}
}

585
static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
586 587 588 589 590
{
	int node, zone;
	struct mem_cgroup_per_zone *mz;
	struct mem_cgroup_tree_per_zone *mctz;

B
Bob Liu 已提交
591
	for_each_node(node) {
592
		for (zone = 0; zone < MAX_NR_ZONES; zone++) {
593
			mz = mem_cgroup_zoneinfo(memcg, node, zone);
594
			mctz = soft_limit_tree_node_zone(node, zone);
595
			mem_cgroup_remove_exceeded(memcg, mz, mctz);
596 597 598 599
		}
	}
}

600 601 602 603
static struct mem_cgroup_per_zone *
__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
{
	struct rb_node *rightmost = NULL;
604
	struct mem_cgroup_per_zone *mz;
605 606

retry:
607
	mz = NULL;
608 609 610 611 612 613 614 615 616 617
	rightmost = rb_last(&mctz->rb_root);
	if (!rightmost)
		goto done;		/* Nothing to reclaim from */

	mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node);
	/*
	 * Remove the node now but someone else can add it back,
	 * we will to add it back at the end of reclaim to its correct
	 * position in the tree.
	 */
618 619 620
	__mem_cgroup_remove_exceeded(mz->memcg, mz, mctz);
	if (!res_counter_soft_limit_excess(&mz->memcg->res) ||
		!css_tryget(&mz->memcg->css))
621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636
		goto retry;
done:
	return mz;
}

static struct mem_cgroup_per_zone *
mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
{
	struct mem_cgroup_per_zone *mz;

	spin_lock(&mctz->lock);
	mz = __mem_cgroup_largest_soft_limit_node(mctz);
	spin_unlock(&mctz->lock);
	return mz;
}

637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655
/*
 * Implementation Note: reading percpu statistics for memcg.
 *
 * Both of vmstat[] and percpu_counter has threshold and do periodic
 * synchronization to implement "quick" read. There are trade-off between
 * reading cost and precision of value. Then, we may have a chance to implement
 * a periodic synchronizion of counter in memcg's counter.
 *
 * But this _read() function is used for user interface now. The user accounts
 * memory usage by memory cgroup and he _always_ requires exact value because
 * he accounts memory. Even if we provide quick-and-fuzzy read, we always
 * have to visit all online cpus and make sum. So, for now, unnecessary
 * synchronization is not implemented. (just implemented for cpu hotplug)
 *
 * If there are kernel internal actions which can make use of some not-exact
 * value, and reading all cpu value can be performance bottleneck in some
 * common workload, threashold and synchonization as vmstat[] should be
 * implemented.
 */
656
static long mem_cgroup_read_stat(struct mem_cgroup *memcg,
657
				 enum mem_cgroup_stat_index idx)
658
{
659
	long val = 0;
660 661
	int cpu;

662 663
	get_online_cpus();
	for_each_online_cpu(cpu)
664
		val += per_cpu(memcg->stat->count[idx], cpu);
665
#ifdef CONFIG_HOTPLUG_CPU
666 667 668
	spin_lock(&memcg->pcp_counter_lock);
	val += memcg->nocpu_base.count[idx];
	spin_unlock(&memcg->pcp_counter_lock);
669 670
#endif
	put_online_cpus();
671 672 673
	return val;
}

674
static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg,
675 676 677
					 bool charge)
{
	int val = (charge) ? 1 : -1;
678
	this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_SWAPOUT], val);
679 680
}

681
static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg,
682 683 684 685 686 687
					    enum mem_cgroup_events_index idx)
{
	unsigned long val = 0;
	int cpu;

	for_each_online_cpu(cpu)
688
		val += per_cpu(memcg->stat->events[idx], cpu);
689
#ifdef CONFIG_HOTPLUG_CPU
690 691 692
	spin_lock(&memcg->pcp_counter_lock);
	val += memcg->nocpu_base.events[idx];
	spin_unlock(&memcg->pcp_counter_lock);
693 694 695 696
#endif
	return val;
}

697
static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
698
					 bool anon, int nr_pages)
699
{
700 701
	preempt_disable();

702 703 704 705 706 707
	/*
	 * Here, RSS means 'mapped anon' and anon's SwapCache. Shmem/tmpfs is
	 * counted as CACHE even if it's on ANON LRU.
	 */
	if (anon)
		__this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS],
708
				nr_pages);
709
	else
710
		__this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE],
711
				nr_pages);
712

713 714
	/* pagein of a big page is an event. So, ignore page size */
	if (nr_pages > 0)
715
		__this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGIN]);
716
	else {
717
		__this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]);
718 719
		nr_pages = -nr_pages; /* for event */
	}
720

721
	__this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_COUNT], nr_pages);
722

723
	preempt_enable();
724 725
}

726
unsigned long
727
mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, int nid, int zid,
728
			unsigned int lru_mask)
729 730
{
	struct mem_cgroup_per_zone *mz;
H
Hugh Dickins 已提交
731
	enum lru_list lru;
732 733
	unsigned long ret = 0;

734
	mz = mem_cgroup_zoneinfo(memcg, nid, zid);
735

H
Hugh Dickins 已提交
736 737 738
	for_each_lru(lru) {
		if (BIT(lru) & lru_mask)
			ret += mz->lru_size[lru];
739 740 741 742 743
	}
	return ret;
}

static unsigned long
744
mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
745 746
			int nid, unsigned int lru_mask)
{
747 748 749
	u64 total = 0;
	int zid;

750
	for (zid = 0; zid < MAX_NR_ZONES; zid++)
751 752
		total += mem_cgroup_zone_nr_lru_pages(memcg,
						nid, zid, lru_mask);
753

754 755
	return total;
}
756

757
static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
758
			unsigned int lru_mask)
759
{
760
	int nid;
761 762
	u64 total = 0;

763
	for_each_node_state(nid, N_HIGH_MEMORY)
764
		total += mem_cgroup_node_nr_lru_pages(memcg, nid, lru_mask);
765
	return total;
766 767
}

768 769
static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
				       enum mem_cgroup_events_target target)
770 771 772
{
	unsigned long val, next;

773 774
	val = __this_cpu_read(memcg->stat->events[MEM_CGROUP_EVENTS_COUNT]);
	next = __this_cpu_read(memcg->stat->targets[target]);
775
	/* from time_after() in jiffies.h */
776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791
	if ((long)next - (long)val < 0) {
		switch (target) {
		case MEM_CGROUP_TARGET_THRESH:
			next = val + THRESHOLDS_EVENTS_TARGET;
			break;
		case MEM_CGROUP_TARGET_SOFTLIMIT:
			next = val + SOFTLIMIT_EVENTS_TARGET;
			break;
		case MEM_CGROUP_TARGET_NUMAINFO:
			next = val + NUMAINFO_EVENTS_TARGET;
			break;
		default:
			break;
		}
		__this_cpu_write(memcg->stat->targets[target], next);
		return true;
792
	}
793
	return false;
794 795 796 797 798 799
}

/*
 * Check events in order.
 *
 */
800
static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
801
{
802
	preempt_disable();
803
	/* threshold event is triggered in finer grain than soft limit */
804 805
	if (unlikely(mem_cgroup_event_ratelimit(memcg,
						MEM_CGROUP_TARGET_THRESH))) {
806 807
		bool do_softlimit;
		bool do_numainfo __maybe_unused;
808 809 810 811 812 813 814 815 816

		do_softlimit = mem_cgroup_event_ratelimit(memcg,
						MEM_CGROUP_TARGET_SOFTLIMIT);
#if MAX_NUMNODES > 1
		do_numainfo = mem_cgroup_event_ratelimit(memcg,
						MEM_CGROUP_TARGET_NUMAINFO);
#endif
		preempt_enable();

817
		mem_cgroup_threshold(memcg);
818
		if (unlikely(do_softlimit))
819
			mem_cgroup_update_tree(memcg, page);
820
#if MAX_NUMNODES > 1
821
		if (unlikely(do_numainfo))
822
			atomic_inc(&memcg->numainfo_events);
823
#endif
824 825
	} else
		preempt_enable();
826 827
}

G
Glauber Costa 已提交
828
struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
B
Balbir Singh 已提交
829 830 831 832 833 834
{
	return container_of(cgroup_subsys_state(cont,
				mem_cgroup_subsys_id), struct mem_cgroup,
				css);
}

835
struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
836
{
837 838 839 840 841 842 843 844
	/*
	 * mm_update_next_owner() may clear mm->owner to NULL
	 * if it races with swapoff, page migration, etc.
	 * So this can be called with p == NULL.
	 */
	if (unlikely(!p))
		return NULL;

845 846 847 848
	return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
				struct mem_cgroup, css);
}

849
struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
850
{
851
	struct mem_cgroup *memcg = NULL;
852 853 854

	if (!mm)
		return NULL;
855 856 857 858 859 860 861
	/*
	 * Because we have no locks, mm->owner's may be being moved to other
	 * cgroup. We use css_tryget() here even if this looks
	 * pessimistic (rather than adding locks here).
	 */
	rcu_read_lock();
	do {
862 863
		memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
		if (unlikely(!memcg))
864
			break;
865
	} while (!css_tryget(&memcg->css));
866
	rcu_read_unlock();
867
	return memcg;
868 869
}

870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889
/**
 * mem_cgroup_iter - iterate over memory cgroup hierarchy
 * @root: hierarchy root
 * @prev: previously returned memcg, NULL on first invocation
 * @reclaim: cookie for shared reclaim walks, NULL for full walks
 *
 * Returns references to children of the hierarchy below @root, or
 * @root itself, or %NULL after a full round-trip.
 *
 * Caller must pass the return value in @prev on subsequent
 * invocations for reference counting, or use mem_cgroup_iter_break()
 * to cancel a hierarchy walk before the round-trip is complete.
 *
 * Reclaimers can specify a zone and a priority level in @reclaim to
 * divide up the memcgs in the hierarchy among all concurrent
 * reclaimers operating on the same zone and priority.
 */
struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
				   struct mem_cgroup *prev,
				   struct mem_cgroup_reclaim_cookie *reclaim)
K
KAMEZAWA Hiroyuki 已提交
890
{
891 892
	struct mem_cgroup *memcg = NULL;
	int id = 0;
893

894 895 896
	if (mem_cgroup_disabled())
		return NULL;

897 898
	if (!root)
		root = root_mem_cgroup;
K
KAMEZAWA Hiroyuki 已提交
899

900 901
	if (prev && !reclaim)
		id = css_id(&prev->css);
K
KAMEZAWA Hiroyuki 已提交
902

903 904
	if (prev && prev != root)
		css_put(&prev->css);
K
KAMEZAWA Hiroyuki 已提交
905

906 907 908 909 910
	if (!root->use_hierarchy && root != root_mem_cgroup) {
		if (prev)
			return NULL;
		return root;
	}
K
KAMEZAWA Hiroyuki 已提交
911

912
	while (!memcg) {
913
		struct mem_cgroup_reclaim_iter *uninitialized_var(iter);
914
		struct cgroup_subsys_state *css;
915

916 917 918 919 920 921 922 923 924 925 926
		if (reclaim) {
			int nid = zone_to_nid(reclaim->zone);
			int zid = zone_idx(reclaim->zone);
			struct mem_cgroup_per_zone *mz;

			mz = mem_cgroup_zoneinfo(root, nid, zid);
			iter = &mz->reclaim_iter[reclaim->priority];
			if (prev && reclaim->generation != iter->generation)
				return NULL;
			id = iter->position;
		}
K
KAMEZAWA Hiroyuki 已提交
927

928 929 930 931 932 933 934 935
		rcu_read_lock();
		css = css_get_next(&mem_cgroup_subsys, id + 1, &root->css, &id);
		if (css) {
			if (css == &root->css || css_tryget(css))
				memcg = container_of(css,
						     struct mem_cgroup, css);
		} else
			id = 0;
K
KAMEZAWA Hiroyuki 已提交
936 937
		rcu_read_unlock();

938 939 940 941 942 943 944
		if (reclaim) {
			iter->position = id;
			if (!css)
				iter->generation++;
			else if (!prev && memcg)
				reclaim->generation = iter->generation;
		}
945 946 947 948 949

		if (prev && !css)
			return NULL;
	}
	return memcg;
K
KAMEZAWA Hiroyuki 已提交
950
}
K
KAMEZAWA Hiroyuki 已提交
951

952 953 954 955 956 957 958
/**
 * mem_cgroup_iter_break - abort a hierarchy walk prematurely
 * @root: hierarchy root
 * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
 */
void mem_cgroup_iter_break(struct mem_cgroup *root,
			   struct mem_cgroup *prev)
959 960 961 962 963 964
{
	if (!root)
		root = root_mem_cgroup;
	if (prev && prev != root)
		css_put(&prev->css);
}
K
KAMEZAWA Hiroyuki 已提交
965

966 967 968 969 970 971
/*
 * Iteration constructs for visiting all cgroups (under a tree).  If
 * loops are exited prematurely (break), mem_cgroup_iter_break() must
 * be used for reference counting.
 */
#define for_each_mem_cgroup_tree(iter, root)		\
972
	for (iter = mem_cgroup_iter(root, NULL, NULL);	\
973
	     iter != NULL;				\
974
	     iter = mem_cgroup_iter(root, iter, NULL))
975

976
#define for_each_mem_cgroup(iter)			\
977
	for (iter = mem_cgroup_iter(NULL, NULL, NULL);	\
978
	     iter != NULL;				\
979
	     iter = mem_cgroup_iter(NULL, iter, NULL))
K
KAMEZAWA Hiroyuki 已提交
980

981
static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
982
{
983
	return (memcg == root_mem_cgroup);
984 985
}

986 987
void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
{
988
	struct mem_cgroup *memcg;
989 990 991 992 993

	if (!mm)
		return;

	rcu_read_lock();
994 995
	memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
	if (unlikely(!memcg))
996 997 998 999
		goto out;

	switch (idx) {
	case PGFAULT:
1000 1001 1002 1003
		this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGFAULT]);
		break;
	case PGMAJFAULT:
		this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGMAJFAULT]);
1004 1005 1006 1007 1008 1009 1010 1011 1012
		break;
	default:
		BUG();
	}
out:
	rcu_read_unlock();
}
EXPORT_SYMBOL(mem_cgroup_count_vm_event);

1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033
/**
 * mem_cgroup_zone_lruvec - get the lru list vector for a zone and memcg
 * @zone: zone of the wanted lruvec
 * @mem: memcg of the wanted lruvec
 *
 * Returns the lru list vector holding pages for the given @zone and
 * @mem.  This can be the global zone lruvec, if the memory controller
 * is disabled.
 */
struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
				      struct mem_cgroup *memcg)
{
	struct mem_cgroup_per_zone *mz;

	if (mem_cgroup_disabled())
		return &zone->lruvec;

	mz = mem_cgroup_zoneinfo(memcg, zone_to_nid(zone), zone_idx(zone));
	return &mz->lruvec;
}

K
KAMEZAWA Hiroyuki 已提交
1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046
/*
 * Following LRU functions are allowed to be used without PCG_LOCK.
 * Operations are called by routine of global LRU independently from memcg.
 * What we have to take care of here is validness of pc->mem_cgroup.
 *
 * Changes to pc->mem_cgroup happens when
 * 1. charge
 * 2. moving account
 * In typical case, "charge" is done before add-to-lru. Exception is SwapCache.
 * It is added to LRU before charge.
 * If PCG_USED bit is not set, page_cgroup is not added to this private LRU.
 * When moving account, the page is not on LRU. It's isolated.
 */
1047

1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061
/**
 * mem_cgroup_lru_add_list - account for adding an lru page and return lruvec
 * @zone: zone of the page
 * @page: the page
 * @lru: current lru
 *
 * This function accounts for @page being added to @lru, and returns
 * the lruvec for the given @zone and the memcg @page is charged to.
 *
 * The callsite is then responsible for physically linking the page to
 * the returned lruvec->lists[@lru].
 */
struct lruvec *mem_cgroup_lru_add_list(struct zone *zone, struct page *page,
				       enum lru_list lru)
K
KAMEZAWA Hiroyuki 已提交
1062 1063
{
	struct mem_cgroup_per_zone *mz;
1064 1065
	struct mem_cgroup *memcg;
	struct page_cgroup *pc;
1066

1067
	if (mem_cgroup_disabled())
1068 1069
		return &zone->lruvec;

K
KAMEZAWA Hiroyuki 已提交
1070
	pc = lookup_page_cgroup(page);
1071
	memcg = pc->mem_cgroup;
1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084

	/*
	 * Surreptitiously switch any uncharged page to root:
	 * an uncharged page off lru does nothing to secure
	 * its former mem_cgroup from sudden removal.
	 *
	 * Our caller holds lru_lock, and PageCgroupUsed is updated
	 * under page_cgroup lock: between them, they make all uses
	 * of pc->mem_cgroup safe.
	 */
	if (!PageCgroupUsed(pc) && memcg != root_mem_cgroup)
		pc->mem_cgroup = memcg = root_mem_cgroup;

1085 1086
	mz = page_cgroup_zoneinfo(memcg, page);
	/* compound_order() is stabilized through lru_lock */
1087
	mz->lru_size[lru] += 1 << compound_order(page);
1088
	return &mz->lruvec;
K
KAMEZAWA Hiroyuki 已提交
1089
}
1090

1091 1092 1093 1094 1095 1096 1097 1098 1099
/**
 * mem_cgroup_lru_del_list - account for removing an lru page
 * @page: the page
 * @lru: target lru
 *
 * This function accounts for @page being removed from @lru.
 *
 * The callsite is then responsible for physically unlinking
 * @page->lru.
1100
 */
1101
void mem_cgroup_lru_del_list(struct page *page, enum lru_list lru)
1102 1103
{
	struct mem_cgroup_per_zone *mz;
1104
	struct mem_cgroup *memcg;
1105 1106 1107 1108 1109 1110
	struct page_cgroup *pc;

	if (mem_cgroup_disabled())
		return;

	pc = lookup_page_cgroup(page);
1111 1112
	memcg = pc->mem_cgroup;
	VM_BUG_ON(!memcg);
1113 1114
	mz = page_cgroup_zoneinfo(memcg, page);
	/* huge page split is done under lru_lock. so, we have no races. */
1115 1116
	VM_BUG_ON(mz->lru_size[lru] < (1 << compound_order(page)));
	mz->lru_size[lru] -= 1 << compound_order(page);
1117 1118
}

1119
void mem_cgroup_lru_del(struct page *page)
K
KAMEZAWA Hiroyuki 已提交
1120
{
1121
	mem_cgroup_lru_del_list(page, page_lru(page));
1122 1123
}

1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141
/**
 * mem_cgroup_lru_move_lists - account for moving a page between lrus
 * @zone: zone of the page
 * @page: the page
 * @from: current lru
 * @to: target lru
 *
 * This function accounts for @page being moved between the lrus @from
 * and @to, and returns the lruvec for the given @zone and the memcg
 * @page is charged to.
 *
 * The callsite is then responsible for physically relinking
 * @page->lru to the returned lruvec->lists[@to].
 */
struct lruvec *mem_cgroup_lru_move_lists(struct zone *zone,
					 struct page *page,
					 enum lru_list from,
					 enum lru_list to)
1142
{
1143 1144 1145
	/* XXX: Optimize this, especially for @from == @to */
	mem_cgroup_lru_del_list(page, from);
	return mem_cgroup_lru_add_list(zone, page, to);
K
KAMEZAWA Hiroyuki 已提交
1146
}
1147

1148
/*
1149
 * Checks whether given mem is same or in the root_mem_cgroup's
1150 1151
 * hierarchy subtree
 */
1152 1153
static bool mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
		struct mem_cgroup *memcg)
1154
{
1155 1156 1157
	if (root_memcg != memcg) {
		return (root_memcg->use_hierarchy &&
			css_is_ancestor(&memcg->css, &root_memcg->css));
1158 1159 1160 1161 1162
	}

	return true;
}

1163
int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *memcg)
1164 1165
{
	int ret;
1166
	struct mem_cgroup *curr = NULL;
1167
	struct task_struct *p;
1168

1169
	p = find_lock_task_mm(task);
1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184
	if (p) {
		curr = try_get_mem_cgroup_from_mm(p->mm);
		task_unlock(p);
	} else {
		/*
		 * All threads may have already detached their mm's, but the oom
		 * killer still needs to detect if they have already been oom
		 * killed to prevent needlessly killing additional tasks.
		 */
		task_lock(task);
		curr = mem_cgroup_from_task(task);
		if (curr)
			css_get(&curr->css);
		task_unlock(task);
	}
1185 1186
	if (!curr)
		return 0;
1187
	/*
1188
	 * We should check use_hierarchy of "memcg" not "curr". Because checking
1189
	 * use_hierarchy of "curr" here make this function true if hierarchy is
1190 1191
	 * enabled in "curr" and "curr" is a child of "memcg" in *cgroup*
	 * hierarchy(even if use_hierarchy is disabled in "memcg").
1192
	 */
1193
	ret = mem_cgroup_same_or_subtree(memcg, curr);
1194
	css_put(&curr->css);
1195 1196 1197
	return ret;
}

1198
int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg, struct zone *zone)
1199
{
1200 1201 1202
	unsigned long inactive_ratio;
	int nid = zone_to_nid(zone);
	int zid = zone_idx(zone);
1203
	unsigned long inactive;
1204
	unsigned long active;
1205
	unsigned long gb;
1206

1207 1208 1209 1210
	inactive = mem_cgroup_zone_nr_lru_pages(memcg, nid, zid,
						BIT(LRU_INACTIVE_ANON));
	active = mem_cgroup_zone_nr_lru_pages(memcg, nid, zid,
					      BIT(LRU_ACTIVE_ANON));
1211

1212 1213 1214 1215 1216 1217
	gb = (inactive + active) >> (30 - PAGE_SHIFT);
	if (gb)
		inactive_ratio = int_sqrt(10 * gb);
	else
		inactive_ratio = 1;

1218
	return inactive * inactive_ratio < active;
1219 1220
}

1221
int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg, struct zone *zone)
1222 1223 1224
{
	unsigned long active;
	unsigned long inactive;
1225 1226
	int zid = zone_idx(zone);
	int nid = zone_to_nid(zone);
1227

1228 1229 1230 1231
	inactive = mem_cgroup_zone_nr_lru_pages(memcg, nid, zid,
						BIT(LRU_INACTIVE_FILE));
	active = mem_cgroup_zone_nr_lru_pages(memcg, nid, zid,
					      BIT(LRU_ACTIVE_FILE));
1232 1233 1234 1235

	return (active > inactive);
}

K
KOSAKI Motohiro 已提交
1236 1237 1238
struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
						      struct zone *zone)
{
1239
	int nid = zone_to_nid(zone);
K
KOSAKI Motohiro 已提交
1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255
	int zid = zone_idx(zone);
	struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid);

	return &mz->reclaim_stat;
}

struct zone_reclaim_stat *
mem_cgroup_get_reclaim_stat_from_page(struct page *page)
{
	struct page_cgroup *pc;
	struct mem_cgroup_per_zone *mz;

	if (mem_cgroup_disabled())
		return NULL;

	pc = lookup_page_cgroup(page);
1256 1257
	if (!PageCgroupUsed(pc))
		return NULL;
1258 1259
	/* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
	smp_rmb();
1260
	mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
K
KOSAKI Motohiro 已提交
1261 1262 1263
	return &mz->reclaim_stat;
}

1264 1265 1266
#define mem_cgroup_from_res_counter(counter, member)	\
	container_of(counter, struct mem_cgroup, member)

1267
/**
1268 1269
 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
 * @mem: the memory cgroup
1270
 *
1271
 * Returns the maximum amount of memory @mem can be charged with, in
1272
 * pages.
1273
 */
1274
static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1275
{
1276 1277
	unsigned long long margin;

1278
	margin = res_counter_margin(&memcg->res);
1279
	if (do_swap_account)
1280
		margin = min(margin, res_counter_margin(&memcg->memsw));
1281
	return margin >> PAGE_SHIFT;
1282 1283
}

1284
int mem_cgroup_swappiness(struct mem_cgroup *memcg)
K
KOSAKI Motohiro 已提交
1285 1286 1287 1288 1289 1290 1291
{
	struct cgroup *cgrp = memcg->css.cgroup;

	/* root ? */
	if (cgrp->parent == NULL)
		return vm_swappiness;

1292
	return memcg->swappiness;
K
KOSAKI Motohiro 已提交
1293 1294
}

1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308
/*
 * memcg->moving_account is used for checking possibility that some thread is
 * calling move_account(). When a thread on CPU-A starts moving pages under
 * a memcg, other threads should check memcg->moving_account under
 * rcu_read_lock(), like this:
 *
 *         CPU-A                                    CPU-B
 *                                              rcu_read_lock()
 *         memcg->moving_account+1              if (memcg->mocing_account)
 *                                                   take heavy locks.
 *         synchronize_rcu()                    update something.
 *                                              rcu_read_unlock()
 *         start move here.
 */
1309 1310 1311 1312

/* for quick checking without looking up memcg */
atomic_t memcg_moving __read_mostly;

1313
static void mem_cgroup_start_move(struct mem_cgroup *memcg)
1314
{
1315
	atomic_inc(&memcg_moving);
1316
	atomic_inc(&memcg->moving_account);
1317 1318 1319
	synchronize_rcu();
}

1320
static void mem_cgroup_end_move(struct mem_cgroup *memcg)
1321
{
1322 1323 1324 1325
	/*
	 * Now, mem_cgroup_clear_mc() may call this function with NULL.
	 * We check NULL in callee rather than caller.
	 */
1326 1327
	if (memcg) {
		atomic_dec(&memcg_moving);
1328
		atomic_dec(&memcg->moving_account);
1329
	}
1330
}
1331

1332 1333 1334
/*
 * 2 routines for checking "mem" is under move_account() or not.
 *
1335 1336
 * mem_cgroup_stolen() -  checking whether a cgroup is mc.from or not. This
 *			  is used for avoiding races in accounting.  If true,
1337 1338 1339 1340 1341 1342 1343
 *			  pc->mem_cgroup may be overwritten.
 *
 * mem_cgroup_under_move() - checking a cgroup is mc.from or mc.to or
 *			  under hierarchy of moving cgroups. This is for
 *			  waiting at hith-memory prressure caused by "move".
 */

1344
static bool mem_cgroup_stolen(struct mem_cgroup *memcg)
1345 1346
{
	VM_BUG_ON(!rcu_read_lock_held());
1347
	return atomic_read(&memcg->moving_account) > 0;
1348
}
1349

1350
static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
1351
{
1352 1353
	struct mem_cgroup *from;
	struct mem_cgroup *to;
1354
	bool ret = false;
1355 1356 1357 1358 1359 1360 1361 1362 1363
	/*
	 * Unlike task_move routines, we access mc.to, mc.from not under
	 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
	 */
	spin_lock(&mc.lock);
	from = mc.from;
	to = mc.to;
	if (!from)
		goto unlock;
1364

1365 1366
	ret = mem_cgroup_same_or_subtree(memcg, from)
		|| mem_cgroup_same_or_subtree(memcg, to);
1367 1368
unlock:
	spin_unlock(&mc.lock);
1369 1370 1371
	return ret;
}

1372
static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
1373 1374
{
	if (mc.moving_task && current != mc.moving_task) {
1375
		if (mem_cgroup_under_move(memcg)) {
1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387
			DEFINE_WAIT(wait);
			prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
			/* moving charge context might have finished. */
			if (mc.moving_task)
				schedule();
			finish_wait(&mc.waitq, &wait);
			return true;
		}
	}
	return false;
}

1388 1389 1390 1391
/*
 * Take this lock when
 * - a code tries to modify page's memcg while it's USED.
 * - a code tries to modify page state accounting in a memcg.
1392
 * see mem_cgroup_stolen(), too.
1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405
 */
static void move_lock_mem_cgroup(struct mem_cgroup *memcg,
				  unsigned long *flags)
{
	spin_lock_irqsave(&memcg->move_lock, *flags);
}

static void move_unlock_mem_cgroup(struct mem_cgroup *memcg,
				unsigned long *flags)
{
	spin_unlock_irqrestore(&memcg->move_lock, *flags);
}

1406
/**
1407
 * mem_cgroup_print_oom_info: Called from OOM with tasklist_lock held in read mode.
1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425
 * @memcg: The memory cgroup that went over limit
 * @p: Task that is going to be killed
 *
 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
 * enabled
 */
void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
{
	struct cgroup *task_cgrp;
	struct cgroup *mem_cgrp;
	/*
	 * Need a buffer in BSS, can't rely on allocations. The code relies
	 * on the assumption that OOM is serialized for memory controller.
	 * If this assumption is broken, revisit this code.
	 */
	static char memcg_name[PATH_MAX];
	int ret;

1426
	if (!memcg || !p)
1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471
		return;

	rcu_read_lock();

	mem_cgrp = memcg->css.cgroup;
	task_cgrp = task_cgroup(p, mem_cgroup_subsys_id);

	ret = cgroup_path(task_cgrp, memcg_name, PATH_MAX);
	if (ret < 0) {
		/*
		 * Unfortunately, we are unable to convert to a useful name
		 * But we'll still print out the usage information
		 */
		rcu_read_unlock();
		goto done;
	}
	rcu_read_unlock();

	printk(KERN_INFO "Task in %s killed", memcg_name);

	rcu_read_lock();
	ret = cgroup_path(mem_cgrp, memcg_name, PATH_MAX);
	if (ret < 0) {
		rcu_read_unlock();
		goto done;
	}
	rcu_read_unlock();

	/*
	 * Continues from above, so we don't need an KERN_ level
	 */
	printk(KERN_CONT " as a result of limit of %s\n", memcg_name);
done:

	printk(KERN_INFO "memory: usage %llukB, limit %llukB, failcnt %llu\n",
		res_counter_read_u64(&memcg->res, RES_USAGE) >> 10,
		res_counter_read_u64(&memcg->res, RES_LIMIT) >> 10,
		res_counter_read_u64(&memcg->res, RES_FAILCNT));
	printk(KERN_INFO "memory+swap: usage %llukB, limit %llukB, "
		"failcnt %llu\n",
		res_counter_read_u64(&memcg->memsw, RES_USAGE) >> 10,
		res_counter_read_u64(&memcg->memsw, RES_LIMIT) >> 10,
		res_counter_read_u64(&memcg->memsw, RES_FAILCNT));
}

1472 1473 1474 1475
/*
 * This function returns the number of memcg under hierarchy tree. Returns
 * 1(self count) if no children.
 */
1476
static int mem_cgroup_count_children(struct mem_cgroup *memcg)
1477 1478
{
	int num = 0;
K
KAMEZAWA Hiroyuki 已提交
1479 1480
	struct mem_cgroup *iter;

1481
	for_each_mem_cgroup_tree(iter, memcg)
K
KAMEZAWA Hiroyuki 已提交
1482
		num++;
1483 1484 1485
	return num;
}

D
David Rientjes 已提交
1486 1487 1488 1489 1490 1491 1492 1493
/*
 * Return the memory (and swap, if configured) limit for a memcg.
 */
u64 mem_cgroup_get_limit(struct mem_cgroup *memcg)
{
	u64 limit;
	u64 memsw;

1494 1495 1496
	limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
	limit += total_swap_pages << PAGE_SHIFT;

D
David Rientjes 已提交
1497 1498 1499 1500 1501 1502 1503 1504
	memsw = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
	/*
	 * If memsw is finite and limits the amount of swap space available
	 * to this memcg, return that limit.
	 */
	return min(limit, memsw);
}

1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540
static unsigned long mem_cgroup_reclaim(struct mem_cgroup *memcg,
					gfp_t gfp_mask,
					unsigned long flags)
{
	unsigned long total = 0;
	bool noswap = false;
	int loop;

	if (flags & MEM_CGROUP_RECLAIM_NOSWAP)
		noswap = true;
	if (!(flags & MEM_CGROUP_RECLAIM_SHRINK) && memcg->memsw_is_minimum)
		noswap = true;

	for (loop = 0; loop < MEM_CGROUP_MAX_RECLAIM_LOOPS; loop++) {
		if (loop)
			drain_all_stock_async(memcg);
		total += try_to_free_mem_cgroup_pages(memcg, gfp_mask, noswap);
		/*
		 * Allow limit shrinkers, which are triggered directly
		 * by userspace, to catch signals and stop reclaim
		 * after minimal progress, regardless of the margin.
		 */
		if (total && (flags & MEM_CGROUP_RECLAIM_SHRINK))
			break;
		if (mem_cgroup_margin(memcg))
			break;
		/*
		 * If nothing was reclaimed after two attempts, there
		 * may be no reclaimable pages in this hierarchy.
		 */
		if (loop && !total)
			break;
	}
	return total;
}

1541 1542 1543 1544 1545 1546 1547 1548 1549 1550
/**
 * test_mem_cgroup_node_reclaimable
 * @mem: the target memcg
 * @nid: the node ID to be checked.
 * @noswap : specify true here if the user wants flle only information.
 *
 * This function returns whether the specified memcg contains any
 * reclaimable pages on a node. Returns true if there are any reclaimable
 * pages in the node.
 */
1551
static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg,
1552 1553
		int nid, bool noswap)
{
1554
	if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_FILE))
1555 1556 1557
		return true;
	if (noswap || !total_swap_pages)
		return false;
1558
	if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_ANON))
1559 1560 1561 1562
		return true;
	return false;

}
1563 1564 1565 1566 1567 1568 1569 1570
#if MAX_NUMNODES > 1

/*
 * Always updating the nodemask is not very good - even if we have an empty
 * list or the wrong list here, we can start from some node and traverse all
 * nodes based on the zonelist. So update the list loosely once per 10 secs.
 *
 */
1571
static void mem_cgroup_may_update_nodemask(struct mem_cgroup *memcg)
1572 1573
{
	int nid;
1574 1575 1576 1577
	/*
	 * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET
	 * pagein/pageout changes since the last update.
	 */
1578
	if (!atomic_read(&memcg->numainfo_events))
1579
		return;
1580
	if (atomic_inc_return(&memcg->numainfo_updating) > 1)
1581 1582 1583
		return;

	/* make a nodemask where this memcg uses memory from */
1584
	memcg->scan_nodes = node_states[N_HIGH_MEMORY];
1585 1586 1587

	for_each_node_mask(nid, node_states[N_HIGH_MEMORY]) {

1588 1589
		if (!test_mem_cgroup_node_reclaimable(memcg, nid, false))
			node_clear(nid, memcg->scan_nodes);
1590
	}
1591

1592 1593
	atomic_set(&memcg->numainfo_events, 0);
	atomic_set(&memcg->numainfo_updating, 0);
1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607
}

/*
 * Selecting a node where we start reclaim from. Because what we need is just
 * reducing usage counter, start from anywhere is O,K. Considering
 * memory reclaim from current node, there are pros. and cons.
 *
 * Freeing memory from current node means freeing memory from a node which
 * we'll use or we've used. So, it may make LRU bad. And if several threads
 * hit limits, it will see a contention on a node. But freeing from remote
 * node means more costs for memory reclaim because of memory latency.
 *
 * Now, we use round-robin. Better algorithm is welcomed.
 */
1608
int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
1609 1610 1611
{
	int node;

1612 1613
	mem_cgroup_may_update_nodemask(memcg);
	node = memcg->last_scanned_node;
1614

1615
	node = next_node(node, memcg->scan_nodes);
1616
	if (node == MAX_NUMNODES)
1617
		node = first_node(memcg->scan_nodes);
1618 1619 1620 1621 1622 1623 1624 1625 1626
	/*
	 * We call this when we hit limit, not when pages are added to LRU.
	 * No LRU may hold pages because all pages are UNEVICTABLE or
	 * memcg is too small and all pages are not on LRU. In that case,
	 * we use curret node.
	 */
	if (unlikely(node == MAX_NUMNODES))
		node = numa_node_id();

1627
	memcg->last_scanned_node = node;
1628 1629 1630
	return node;
}

1631 1632 1633 1634 1635 1636
/*
 * Check all nodes whether it contains reclaimable pages or not.
 * For quick scan, we make use of scan_nodes. This will allow us to skip
 * unused nodes. But scan_nodes is lazily updated and may not cotain
 * enough new information. We need to do double check.
 */
1637
bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
1638 1639 1640 1641 1642 1643 1644
{
	int nid;

	/*
	 * quick check...making use of scan_node.
	 * We can skip unused nodes.
	 */
1645 1646
	if (!nodes_empty(memcg->scan_nodes)) {
		for (nid = first_node(memcg->scan_nodes);
1647
		     nid < MAX_NUMNODES;
1648
		     nid = next_node(nid, memcg->scan_nodes)) {
1649

1650
			if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap))
1651 1652 1653 1654 1655 1656 1657
				return true;
		}
	}
	/*
	 * Check rest of nodes.
	 */
	for_each_node_state(nid, N_HIGH_MEMORY) {
1658
		if (node_isset(nid, memcg->scan_nodes))
1659
			continue;
1660
		if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap))
1661 1662 1663 1664 1665
			return true;
	}
	return false;
}

1666
#else
1667
int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
1668 1669 1670
{
	return 0;
}
1671

1672
bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
1673
{
1674
	return test_mem_cgroup_node_reclaimable(memcg, 0, noswap);
1675
}
1676 1677
#endif

1678 1679 1680 1681
static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
				   struct zone *zone,
				   gfp_t gfp_mask,
				   unsigned long *total_scanned)
1682
{
1683
	struct mem_cgroup *victim = NULL;
1684
	int total = 0;
K
KAMEZAWA Hiroyuki 已提交
1685
	int loop = 0;
1686
	unsigned long excess;
1687
	unsigned long nr_scanned;
1688 1689 1690 1691
	struct mem_cgroup_reclaim_cookie reclaim = {
		.zone = zone,
		.priority = 0,
	};
1692

1693
	excess = res_counter_soft_limit_excess(&root_memcg->res) >> PAGE_SHIFT;
K
KAMEZAWA Hiroyuki 已提交
1694

1695
	while (1) {
1696
		victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
1697
		if (!victim) {
K
KAMEZAWA Hiroyuki 已提交
1698
			loop++;
1699 1700 1701 1702 1703 1704
			if (loop >= 2) {
				/*
				 * If we have not been able to reclaim
				 * anything, it might because there are
				 * no reclaimable pages under this hierarchy
				 */
1705
				if (!total)
1706 1707
					break;
				/*
L
Lucas De Marchi 已提交
1708
				 * We want to do more targeted reclaim.
1709 1710 1711 1712 1713
				 * excess >> 2 is not to excessive so as to
				 * reclaim too much, nor too less that we keep
				 * coming back to reclaim from this cgroup
				 */
				if (total >= (excess >> 2) ||
1714
					(loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
1715 1716
					break;
			}
1717
			continue;
1718
		}
1719
		if (!mem_cgroup_reclaimable(victim, false))
1720
			continue;
1721 1722 1723 1724
		total += mem_cgroup_shrink_node_zone(victim, gfp_mask, false,
						     zone, &nr_scanned);
		*total_scanned += nr_scanned;
		if (!res_counter_soft_limit_excess(&root_memcg->res))
1725
			break;
1726
	}
1727
	mem_cgroup_iter_break(root_memcg, victim);
K
KAMEZAWA Hiroyuki 已提交
1728
	return total;
1729 1730
}

K
KAMEZAWA Hiroyuki 已提交
1731 1732 1733
/*
 * Check OOM-Killer is already running under our hierarchy.
 * If someone is running, return false.
1734
 * Has to be called with memcg_oom_lock
K
KAMEZAWA Hiroyuki 已提交
1735
 */
1736
static bool mem_cgroup_oom_lock(struct mem_cgroup *memcg)
K
KAMEZAWA Hiroyuki 已提交
1737
{
1738
	struct mem_cgroup *iter, *failed = NULL;
1739

1740
	for_each_mem_cgroup_tree(iter, memcg) {
1741
		if (iter->oom_lock) {
1742 1743 1744 1745 1746
			/*
			 * this subtree of our hierarchy is already locked
			 * so we cannot give a lock.
			 */
			failed = iter;
1747 1748
			mem_cgroup_iter_break(memcg, iter);
			break;
1749 1750
		} else
			iter->oom_lock = true;
K
KAMEZAWA Hiroyuki 已提交
1751
	}
K
KAMEZAWA Hiroyuki 已提交
1752

1753
	if (!failed)
1754
		return true;
1755 1756 1757 1758 1759

	/*
	 * OK, we failed to lock the whole subtree so we have to clean up
	 * what we set up to the failing subtree
	 */
1760
	for_each_mem_cgroup_tree(iter, memcg) {
1761
		if (iter == failed) {
1762 1763
			mem_cgroup_iter_break(memcg, iter);
			break;
1764 1765 1766
		}
		iter->oom_lock = false;
	}
1767
	return false;
1768
}
1769

1770
/*
1771
 * Has to be called with memcg_oom_lock
1772
 */
1773
static int mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
1774
{
K
KAMEZAWA Hiroyuki 已提交
1775 1776
	struct mem_cgroup *iter;

1777
	for_each_mem_cgroup_tree(iter, memcg)
1778 1779 1780 1781
		iter->oom_lock = false;
	return 0;
}

1782
static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
1783 1784 1785
{
	struct mem_cgroup *iter;

1786
	for_each_mem_cgroup_tree(iter, memcg)
1787 1788 1789
		atomic_inc(&iter->under_oom);
}

1790
static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
1791 1792 1793
{
	struct mem_cgroup *iter;

K
KAMEZAWA Hiroyuki 已提交
1794 1795 1796 1797 1798
	/*
	 * When a new child is created while the hierarchy is under oom,
	 * mem_cgroup_oom_lock() may not be called. We have to use
	 * atomic_add_unless() here.
	 */
1799
	for_each_mem_cgroup_tree(iter, memcg)
1800
		atomic_add_unless(&iter->under_oom, -1, 0);
1801 1802
}

1803
static DEFINE_SPINLOCK(memcg_oom_lock);
K
KAMEZAWA Hiroyuki 已提交
1804 1805
static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);

K
KAMEZAWA Hiroyuki 已提交
1806
struct oom_wait_info {
1807
	struct mem_cgroup *memcg;
K
KAMEZAWA Hiroyuki 已提交
1808 1809 1810 1811 1812 1813
	wait_queue_t	wait;
};

static int memcg_oom_wake_function(wait_queue_t *wait,
	unsigned mode, int sync, void *arg)
{
1814 1815
	struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
	struct mem_cgroup *oom_wait_memcg;
K
KAMEZAWA Hiroyuki 已提交
1816 1817 1818
	struct oom_wait_info *oom_wait_info;

	oom_wait_info = container_of(wait, struct oom_wait_info, wait);
1819
	oom_wait_memcg = oom_wait_info->memcg;
K
KAMEZAWA Hiroyuki 已提交
1820 1821

	/*
1822
	 * Both of oom_wait_info->memcg and wake_memcg are stable under us.
K
KAMEZAWA Hiroyuki 已提交
1823 1824
	 * Then we can use css_is_ancestor without taking care of RCU.
	 */
1825 1826
	if (!mem_cgroup_same_or_subtree(oom_wait_memcg, wake_memcg)
		&& !mem_cgroup_same_or_subtree(wake_memcg, oom_wait_memcg))
K
KAMEZAWA Hiroyuki 已提交
1827 1828 1829 1830
		return 0;
	return autoremove_wake_function(wait, mode, sync, arg);
}

1831
static void memcg_wakeup_oom(struct mem_cgroup *memcg)
K
KAMEZAWA Hiroyuki 已提交
1832
{
1833 1834
	/* for filtering, pass "memcg" as argument. */
	__wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
K
KAMEZAWA Hiroyuki 已提交
1835 1836
}

1837
static void memcg_oom_recover(struct mem_cgroup *memcg)
1838
{
1839 1840
	if (memcg && atomic_read(&memcg->under_oom))
		memcg_wakeup_oom(memcg);
1841 1842
}

K
KAMEZAWA Hiroyuki 已提交
1843 1844 1845
/*
 * try to call OOM killer. returns false if we should exit memory-reclaim loop.
 */
1846
bool mem_cgroup_handle_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
1847
{
K
KAMEZAWA Hiroyuki 已提交
1848
	struct oom_wait_info owait;
1849
	bool locked, need_to_kill;
K
KAMEZAWA Hiroyuki 已提交
1850

1851
	owait.memcg = memcg;
K
KAMEZAWA Hiroyuki 已提交
1852 1853 1854 1855
	owait.wait.flags = 0;
	owait.wait.func = memcg_oom_wake_function;
	owait.wait.private = current;
	INIT_LIST_HEAD(&owait.wait.task_list);
1856
	need_to_kill = true;
1857
	mem_cgroup_mark_under_oom(memcg);
1858

1859
	/* At first, try to OOM lock hierarchy under memcg.*/
1860
	spin_lock(&memcg_oom_lock);
1861
	locked = mem_cgroup_oom_lock(memcg);
K
KAMEZAWA Hiroyuki 已提交
1862 1863 1864 1865 1866
	/*
	 * Even if signal_pending(), we can't quit charge() loop without
	 * accounting. So, UNINTERRUPTIBLE is appropriate. But SIGKILL
	 * under OOM is always welcomed, use TASK_KILLABLE here.
	 */
1867
	prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
1868
	if (!locked || memcg->oom_kill_disable)
1869 1870
		need_to_kill = false;
	if (locked)
1871
		mem_cgroup_oom_notify(memcg);
1872
	spin_unlock(&memcg_oom_lock);
K
KAMEZAWA Hiroyuki 已提交
1873

1874 1875
	if (need_to_kill) {
		finish_wait(&memcg_oom_waitq, &owait.wait);
1876
		mem_cgroup_out_of_memory(memcg, mask, order);
1877
	} else {
K
KAMEZAWA Hiroyuki 已提交
1878
		schedule();
K
KAMEZAWA Hiroyuki 已提交
1879
		finish_wait(&memcg_oom_waitq, &owait.wait);
K
KAMEZAWA Hiroyuki 已提交
1880
	}
1881
	spin_lock(&memcg_oom_lock);
1882
	if (locked)
1883 1884
		mem_cgroup_oom_unlock(memcg);
	memcg_wakeup_oom(memcg);
1885
	spin_unlock(&memcg_oom_lock);
K
KAMEZAWA Hiroyuki 已提交
1886

1887
	mem_cgroup_unmark_under_oom(memcg);
1888

K
KAMEZAWA Hiroyuki 已提交
1889 1890 1891
	if (test_thread_flag(TIF_MEMDIE) || fatal_signal_pending(current))
		return false;
	/* Give chance to dying process */
1892
	schedule_timeout_uninterruptible(1);
K
KAMEZAWA Hiroyuki 已提交
1893
	return true;
1894 1895
}

1896 1897 1898
/*
 * Currently used to update mapped file statistics, but the routine can be
 * generalized to update other statistics as well.
1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915
 *
 * Notes: Race condition
 *
 * We usually use page_cgroup_lock() for accessing page_cgroup member but
 * it tends to be costly. But considering some conditions, we doesn't need
 * to do so _always_.
 *
 * Considering "charge", lock_page_cgroup() is not required because all
 * file-stat operations happen after a page is attached to radix-tree. There
 * are no race with "charge".
 *
 * Considering "uncharge", we know that memcg doesn't clear pc->mem_cgroup
 * at "uncharge" intentionally. So, we always see valid pc->mem_cgroup even
 * if there are race with "uncharge". Statistics itself is properly handled
 * by flags.
 *
 * Considering "move", this is an only case we see a race. To make the race
1916 1917
 * small, we check mm->moving_account and detect there are possibility of race
 * If there is, we take a lock.
1918
 */
1919

1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934
void __mem_cgroup_begin_update_page_stat(struct page *page,
				bool *locked, unsigned long *flags)
{
	struct mem_cgroup *memcg;
	struct page_cgroup *pc;

	pc = lookup_page_cgroup(page);
again:
	memcg = pc->mem_cgroup;
	if (unlikely(!memcg || !PageCgroupUsed(pc)))
		return;
	/*
	 * If this memory cgroup is not under account moving, we don't
	 * need to take move_lock_page_cgroup(). Because we already hold
	 * rcu_read_lock(), any calls to move_account will be delayed until
1935
	 * rcu_read_unlock() if mem_cgroup_stolen() == true.
1936
	 */
1937
	if (!mem_cgroup_stolen(memcg))
1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959
		return;

	move_lock_mem_cgroup(memcg, flags);
	if (memcg != pc->mem_cgroup || !PageCgroupUsed(pc)) {
		move_unlock_mem_cgroup(memcg, flags);
		goto again;
	}
	*locked = true;
}

void __mem_cgroup_end_update_page_stat(struct page *page, unsigned long *flags)
{
	struct page_cgroup *pc = lookup_page_cgroup(page);

	/*
	 * It's guaranteed that pc->mem_cgroup never changes while
	 * lock is held because a routine modifies pc->mem_cgroup
	 * should take move_lock_page_cgroup().
	 */
	move_unlock_mem_cgroup(pc->mem_cgroup, flags);
}

1960 1961
void mem_cgroup_update_page_stat(struct page *page,
				 enum mem_cgroup_page_stat_item idx, int val)
1962
{
1963
	struct mem_cgroup *memcg;
1964
	struct page_cgroup *pc = lookup_page_cgroup(page);
1965
	unsigned long uninitialized_var(flags);
1966

1967
	if (mem_cgroup_disabled())
1968
		return;
1969

1970 1971
	memcg = pc->mem_cgroup;
	if (unlikely(!memcg || !PageCgroupUsed(pc)))
1972
		return;
1973 1974

	switch (idx) {
1975 1976
	case MEMCG_NR_FILE_MAPPED:
		idx = MEM_CGROUP_STAT_FILE_MAPPED;
1977 1978 1979
		break;
	default:
		BUG();
1980
	}
1981

1982
	this_cpu_add(memcg->stat->count[idx], val);
1983
}
1984

1985 1986 1987 1988
/*
 * size of first charge trial. "32" comes from vmscan.c's magic value.
 * TODO: maybe necessary to use big numbers in big irons.
 */
1989
#define CHARGE_BATCH	32U
1990 1991
struct memcg_stock_pcp {
	struct mem_cgroup *cached; /* this never be root cgroup */
1992
	unsigned int nr_pages;
1993
	struct work_struct work;
1994 1995
	unsigned long flags;
#define FLUSHING_CACHED_CHARGE	(0)
1996 1997
};
static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
1998
static DEFINE_MUTEX(percpu_charge_mutex);
1999 2000

/*
2001
 * Try to consume stocked charge on this cpu. If success, one page is consumed
2002 2003 2004 2005
 * from local stock and true is returned. If the stock is 0 or charges from a
 * cgroup which is not current target, returns false. This stock will be
 * refilled.
 */
2006
static bool consume_stock(struct mem_cgroup *memcg)
2007 2008 2009 2010 2011
{
	struct memcg_stock_pcp *stock;
	bool ret = true;

	stock = &get_cpu_var(memcg_stock);
2012
	if (memcg == stock->cached && stock->nr_pages)
2013
		stock->nr_pages--;
2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026
	else /* need to call res_counter_charge */
		ret = false;
	put_cpu_var(memcg_stock);
	return ret;
}

/*
 * Returns stocks cached in percpu to res_counter and reset cached information.
 */
static void drain_stock(struct memcg_stock_pcp *stock)
{
	struct mem_cgroup *old = stock->cached;

2027 2028 2029 2030
	if (stock->nr_pages) {
		unsigned long bytes = stock->nr_pages * PAGE_SIZE;

		res_counter_uncharge(&old->res, bytes);
2031
		if (do_swap_account)
2032 2033
			res_counter_uncharge(&old->memsw, bytes);
		stock->nr_pages = 0;
2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045
	}
	stock->cached = NULL;
}

/*
 * This must be called under preempt disabled or must be called by
 * a thread which is pinned to local cpu.
 */
static void drain_local_stock(struct work_struct *dummy)
{
	struct memcg_stock_pcp *stock = &__get_cpu_var(memcg_stock);
	drain_stock(stock);
2046
	clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
2047 2048 2049 2050
}

/*
 * Cache charges(val) which is from res_counter, to local per_cpu area.
2051
 * This will be consumed by consume_stock() function, later.
2052
 */
2053
static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2054 2055 2056
{
	struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock);

2057
	if (stock->cached != memcg) { /* reset if necessary */
2058
		drain_stock(stock);
2059
		stock->cached = memcg;
2060
	}
2061
	stock->nr_pages += nr_pages;
2062 2063 2064 2065
	put_cpu_var(memcg_stock);
}

/*
2066
 * Drains all per-CPU charge caches for given root_memcg resp. subtree
2067 2068
 * of the hierarchy under it. sync flag says whether we should block
 * until the work is done.
2069
 */
2070
static void drain_all_stock(struct mem_cgroup *root_memcg, bool sync)
2071
{
2072
	int cpu, curcpu;
2073

2074 2075
	/* Notify other cpus that system-wide "drain" is running */
	get_online_cpus();
2076
	curcpu = get_cpu();
2077 2078
	for_each_online_cpu(cpu) {
		struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
2079
		struct mem_cgroup *memcg;
2080

2081 2082
		memcg = stock->cached;
		if (!memcg || !stock->nr_pages)
2083
			continue;
2084
		if (!mem_cgroup_same_or_subtree(root_memcg, memcg))
2085
			continue;
2086 2087 2088 2089 2090 2091
		if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
			if (cpu == curcpu)
				drain_local_stock(&stock->work);
			else
				schedule_work_on(cpu, &stock->work);
		}
2092
	}
2093
	put_cpu();
2094 2095 2096 2097 2098 2099

	if (!sync)
		goto out;

	for_each_online_cpu(cpu) {
		struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
2100
		if (test_bit(FLUSHING_CACHED_CHARGE, &stock->flags))
2101 2102 2103
			flush_work(&stock->work);
	}
out:
2104
 	put_online_cpus();
2105 2106 2107 2108 2109 2110 2111 2112
}

/*
 * Tries to drain stocked charges in other cpus. This function is asynchronous
 * and just put a work per cpu for draining localy on each cpu. Caller can
 * expects some charges will be back to res_counter later but cannot wait for
 * it.
 */
2113
static void drain_all_stock_async(struct mem_cgroup *root_memcg)
2114
{
2115 2116 2117 2118 2119
	/*
	 * If someone calls draining, avoid adding more kworker runs.
	 */
	if (!mutex_trylock(&percpu_charge_mutex))
		return;
2120
	drain_all_stock(root_memcg, false);
2121
	mutex_unlock(&percpu_charge_mutex);
2122 2123 2124
}

/* This is a synchronous drain interface. */
2125
static void drain_all_stock_sync(struct mem_cgroup *root_memcg)
2126 2127
{
	/* called when force_empty is called */
2128
	mutex_lock(&percpu_charge_mutex);
2129
	drain_all_stock(root_memcg, true);
2130
	mutex_unlock(&percpu_charge_mutex);
2131 2132
}

2133 2134 2135 2136
/*
 * This function drains percpu counter value from DEAD cpu and
 * move it to local cpu. Note that this function can be preempted.
 */
2137
static void mem_cgroup_drain_pcp_counter(struct mem_cgroup *memcg, int cpu)
2138 2139 2140
{
	int i;

2141
	spin_lock(&memcg->pcp_counter_lock);
2142
	for (i = 0; i < MEM_CGROUP_STAT_DATA; i++) {
2143
		long x = per_cpu(memcg->stat->count[i], cpu);
2144

2145 2146
		per_cpu(memcg->stat->count[i], cpu) = 0;
		memcg->nocpu_base.count[i] += x;
2147
	}
2148
	for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
2149
		unsigned long x = per_cpu(memcg->stat->events[i], cpu);
2150

2151 2152
		per_cpu(memcg->stat->events[i], cpu) = 0;
		memcg->nocpu_base.events[i] += x;
2153
	}
2154
	spin_unlock(&memcg->pcp_counter_lock);
2155 2156 2157
}

static int __cpuinit memcg_cpu_hotplug_callback(struct notifier_block *nb,
2158 2159 2160 2161 2162
					unsigned long action,
					void *hcpu)
{
	int cpu = (unsigned long)hcpu;
	struct memcg_stock_pcp *stock;
2163
	struct mem_cgroup *iter;
2164

2165
	if (action == CPU_ONLINE)
2166 2167
		return NOTIFY_OK;

2168
	if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
2169
		return NOTIFY_OK;
2170

2171
	for_each_mem_cgroup(iter)
2172 2173
		mem_cgroup_drain_pcp_counter(iter, cpu);

2174 2175 2176 2177 2178
	stock = &per_cpu(memcg_stock, cpu);
	drain_stock(stock);
	return NOTIFY_OK;
}

2179 2180 2181 2182 2183 2184 2185 2186 2187 2188

/* See __mem_cgroup_try_charge() for details */
enum {
	CHARGE_OK,		/* success */
	CHARGE_RETRY,		/* need to retry but retry is not bad */
	CHARGE_NOMEM,		/* we can't do more. return -ENOMEM */
	CHARGE_WOULDBLOCK,	/* GFP_WAIT wasn't set and no enough res. */
	CHARGE_OOM_DIE,		/* the current is killed because of OOM */
};

2189
static int mem_cgroup_do_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
2190
				unsigned int nr_pages, bool oom_check)
2191
{
2192
	unsigned long csize = nr_pages * PAGE_SIZE;
2193 2194 2195 2196 2197
	struct mem_cgroup *mem_over_limit;
	struct res_counter *fail_res;
	unsigned long flags = 0;
	int ret;

2198
	ret = res_counter_charge(&memcg->res, csize, &fail_res);
2199 2200 2201 2202

	if (likely(!ret)) {
		if (!do_swap_account)
			return CHARGE_OK;
2203
		ret = res_counter_charge(&memcg->memsw, csize, &fail_res);
2204 2205 2206
		if (likely(!ret))
			return CHARGE_OK;

2207
		res_counter_uncharge(&memcg->res, csize);
2208 2209 2210 2211
		mem_over_limit = mem_cgroup_from_res_counter(fail_res, memsw);
		flags |= MEM_CGROUP_RECLAIM_NOSWAP;
	} else
		mem_over_limit = mem_cgroup_from_res_counter(fail_res, res);
2212
	/*
2213 2214
	 * nr_pages can be either a huge page (HPAGE_PMD_NR), a batch
	 * of regular pages (CHARGE_BATCH), or a single regular page (1).
2215 2216 2217 2218
	 *
	 * Never reclaim on behalf of optional batching, retry with a
	 * single page instead.
	 */
2219
	if (nr_pages == CHARGE_BATCH)
2220 2221 2222 2223 2224
		return CHARGE_RETRY;

	if (!(gfp_mask & __GFP_WAIT))
		return CHARGE_WOULDBLOCK;

2225
	ret = mem_cgroup_reclaim(mem_over_limit, gfp_mask, flags);
2226
	if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
2227
		return CHARGE_RETRY;
2228
	/*
2229 2230 2231 2232 2233 2234 2235
	 * Even though the limit is exceeded at this point, reclaim
	 * may have been able to free some pages.  Retry the charge
	 * before killing the task.
	 *
	 * Only for regular pages, though: huge pages are rather
	 * unlikely to succeed so close to the limit, and we fall back
	 * to regular pages anyway in case of failure.
2236
	 */
2237
	if (nr_pages == 1 && ret)
2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250
		return CHARGE_RETRY;

	/*
	 * At task move, charge accounts can be doubly counted. So, it's
	 * better to wait until the end of task_move if something is going on.
	 */
	if (mem_cgroup_wait_acct_move(mem_over_limit))
		return CHARGE_RETRY;

	/* If we don't need to call oom-killer at el, return immediately */
	if (!oom_check)
		return CHARGE_NOMEM;
	/* check OOM */
2251
	if (!mem_cgroup_handle_oom(mem_over_limit, gfp_mask, get_order(csize)))
2252 2253 2254 2255 2256
		return CHARGE_OOM_DIE;

	return CHARGE_RETRY;
}

2257
/*
2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276
 * __mem_cgroup_try_charge() does
 * 1. detect memcg to be charged against from passed *mm and *ptr,
 * 2. update res_counter
 * 3. call memory reclaim if necessary.
 *
 * In some special case, if the task is fatal, fatal_signal_pending() or
 * has TIF_MEMDIE, this function returns -EINTR while writing root_mem_cgroup
 * to *ptr. There are two reasons for this. 1: fatal threads should quit as soon
 * as possible without any hazards. 2: all pages should have a valid
 * pc->mem_cgroup. If mm is NULL and the caller doesn't pass a valid memcg
 * pointer, that is treated as a charge to root_mem_cgroup.
 *
 * So __mem_cgroup_try_charge() will return
 *  0       ...  on success, filling *ptr with a valid memcg pointer.
 *  -ENOMEM ...  charge failure because of resource limits.
 *  -EINTR  ...  if thread is fatal. *ptr is filled with root_mem_cgroup.
 *
 * Unlike the exported interface, an "oom" parameter is added. if oom==true,
 * the oom-killer can be invoked.
2277
 */
2278
static int __mem_cgroup_try_charge(struct mm_struct *mm,
A
Andrea Arcangeli 已提交
2279
				   gfp_t gfp_mask,
2280
				   unsigned int nr_pages,
2281
				   struct mem_cgroup **ptr,
2282
				   bool oom)
2283
{
2284
	unsigned int batch = max(CHARGE_BATCH, nr_pages);
2285
	int nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES;
2286
	struct mem_cgroup *memcg = NULL;
2287
	int ret;
2288

K
KAMEZAWA Hiroyuki 已提交
2289 2290 2291 2292 2293 2294 2295 2296
	/*
	 * Unlike gloval-vm's OOM-kill, we're not in memory shortage
	 * in system level. So, allow to go ahead dying process in addition to
	 * MEMDIE process.
	 */
	if (unlikely(test_thread_flag(TIF_MEMDIE)
		     || fatal_signal_pending(current)))
		goto bypass;
2297

2298
	/*
2299 2300
	 * We always charge the cgroup the mm_struct belongs to.
	 * The mm_struct's mem_cgroup changes on task migration if the
2301 2302 2303
	 * thread group leader migrates. It's possible that mm is not
	 * set, if so charge the init_mm (happens for pagecache usage).
	 */
2304
	if (!*ptr && !mm)
2305
		*ptr = root_mem_cgroup;
K
KAMEZAWA Hiroyuki 已提交
2306
again:
2307 2308 2309 2310
	if (*ptr) { /* css should be a valid one */
		memcg = *ptr;
		VM_BUG_ON(css_is_removed(&memcg->css));
		if (mem_cgroup_is_root(memcg))
K
KAMEZAWA Hiroyuki 已提交
2311
			goto done;
2312
		if (nr_pages == 1 && consume_stock(memcg))
K
KAMEZAWA Hiroyuki 已提交
2313
			goto done;
2314
		css_get(&memcg->css);
2315
	} else {
K
KAMEZAWA Hiroyuki 已提交
2316
		struct task_struct *p;
2317

K
KAMEZAWA Hiroyuki 已提交
2318 2319 2320
		rcu_read_lock();
		p = rcu_dereference(mm->owner);
		/*
2321
		 * Because we don't have task_lock(), "p" can exit.
2322
		 * In that case, "memcg" can point to root or p can be NULL with
2323 2324 2325 2326 2327 2328
		 * race with swapoff. Then, we have small risk of mis-accouning.
		 * But such kind of mis-account by race always happens because
		 * we don't have cgroup_mutex(). It's overkill and we allo that
		 * small race, here.
		 * (*) swapoff at el will charge against mm-struct not against
		 * task-struct. So, mm->owner can be NULL.
K
KAMEZAWA Hiroyuki 已提交
2329
		 */
2330
		memcg = mem_cgroup_from_task(p);
2331 2332 2333
		if (!memcg)
			memcg = root_mem_cgroup;
		if (mem_cgroup_is_root(memcg)) {
K
KAMEZAWA Hiroyuki 已提交
2334 2335 2336
			rcu_read_unlock();
			goto done;
		}
2337
		if (nr_pages == 1 && consume_stock(memcg)) {
K
KAMEZAWA Hiroyuki 已提交
2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349
			/*
			 * It seems dagerous to access memcg without css_get().
			 * But considering how consume_stok works, it's not
			 * necessary. If consume_stock success, some charges
			 * from this memcg are cached on this cpu. So, we
			 * don't need to call css_get()/css_tryget() before
			 * calling consume_stock().
			 */
			rcu_read_unlock();
			goto done;
		}
		/* after here, we may be blocked. we need to get refcnt */
2350
		if (!css_tryget(&memcg->css)) {
K
KAMEZAWA Hiroyuki 已提交
2351 2352 2353 2354 2355
			rcu_read_unlock();
			goto again;
		}
		rcu_read_unlock();
	}
2356

2357 2358
	do {
		bool oom_check;
2359

2360
		/* If killed, bypass charge */
K
KAMEZAWA Hiroyuki 已提交
2361
		if (fatal_signal_pending(current)) {
2362
			css_put(&memcg->css);
2363
			goto bypass;
K
KAMEZAWA Hiroyuki 已提交
2364
		}
2365

2366 2367 2368 2369
		oom_check = false;
		if (oom && !nr_oom_retries) {
			oom_check = true;
			nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES;
2370
		}
2371

2372
		ret = mem_cgroup_do_charge(memcg, gfp_mask, batch, oom_check);
2373 2374 2375 2376
		switch (ret) {
		case CHARGE_OK:
			break;
		case CHARGE_RETRY: /* not in OOM situation but retry */
2377
			batch = nr_pages;
2378 2379
			css_put(&memcg->css);
			memcg = NULL;
K
KAMEZAWA Hiroyuki 已提交
2380
			goto again;
2381
		case CHARGE_WOULDBLOCK: /* !__GFP_WAIT */
2382
			css_put(&memcg->css);
2383 2384
			goto nomem;
		case CHARGE_NOMEM: /* OOM routine works */
K
KAMEZAWA Hiroyuki 已提交
2385
			if (!oom) {
2386
				css_put(&memcg->css);
K
KAMEZAWA Hiroyuki 已提交
2387
				goto nomem;
K
KAMEZAWA Hiroyuki 已提交
2388
			}
2389 2390 2391 2392
			/* If oom, we never return -ENOMEM */
			nr_oom_retries--;
			break;
		case CHARGE_OOM_DIE: /* Killed by OOM Killer */
2393
			css_put(&memcg->css);
K
KAMEZAWA Hiroyuki 已提交
2394
			goto bypass;
2395
		}
2396 2397
	} while (ret != CHARGE_OK);

2398
	if (batch > nr_pages)
2399 2400
		refill_stock(memcg, batch - nr_pages);
	css_put(&memcg->css);
2401
done:
2402
	*ptr = memcg;
2403 2404
	return 0;
nomem:
2405
	*ptr = NULL;
2406
	return -ENOMEM;
K
KAMEZAWA Hiroyuki 已提交
2407
bypass:
2408 2409
	*ptr = root_mem_cgroup;
	return -EINTR;
2410
}
2411

2412 2413 2414 2415 2416
/*
 * Somemtimes we have to undo a charge we got by try_charge().
 * This function is for that and do uncharge, put css's refcnt.
 * gotten by try_charge().
 */
2417
static void __mem_cgroup_cancel_charge(struct mem_cgroup *memcg,
2418
				       unsigned int nr_pages)
2419
{
2420
	if (!mem_cgroup_is_root(memcg)) {
2421 2422
		unsigned long bytes = nr_pages * PAGE_SIZE;

2423
		res_counter_uncharge(&memcg->res, bytes);
2424
		if (do_swap_account)
2425
			res_counter_uncharge(&memcg->memsw, bytes);
2426
	}
2427 2428
}

2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447
/*
 * A helper function to get mem_cgroup from ID. must be called under
 * rcu_read_lock(). The caller must check css_is_removed() or some if
 * it's concern. (dropping refcnt from swap can be called against removed
 * memcg.)
 */
static struct mem_cgroup *mem_cgroup_lookup(unsigned short id)
{
	struct cgroup_subsys_state *css;

	/* ID 0 is unused ID */
	if (!id)
		return NULL;
	css = css_lookup(&mem_cgroup_subsys, id);
	if (!css)
		return NULL;
	return container_of(css, struct mem_cgroup, css);
}

2448
struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
2449
{
2450
	struct mem_cgroup *memcg = NULL;
2451
	struct page_cgroup *pc;
2452
	unsigned short id;
2453 2454
	swp_entry_t ent;

2455 2456 2457
	VM_BUG_ON(!PageLocked(page));

	pc = lookup_page_cgroup(page);
2458
	lock_page_cgroup(pc);
2459
	if (PageCgroupUsed(pc)) {
2460 2461 2462
		memcg = pc->mem_cgroup;
		if (memcg && !css_tryget(&memcg->css))
			memcg = NULL;
2463
	} else if (PageSwapCache(page)) {
2464
		ent.val = page_private(page);
2465
		id = lookup_swap_cgroup_id(ent);
2466
		rcu_read_lock();
2467 2468 2469
		memcg = mem_cgroup_lookup(id);
		if (memcg && !css_tryget(&memcg->css))
			memcg = NULL;
2470
		rcu_read_unlock();
2471
	}
2472
	unlock_page_cgroup(pc);
2473
	return memcg;
2474 2475
}

2476
static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
2477
				       struct page *page,
2478
				       unsigned int nr_pages,
2479 2480
				       enum charge_type ctype,
				       bool lrucare)
2481
{
2482
	struct page_cgroup *pc = lookup_page_cgroup(page);
2483 2484
	struct zone *uninitialized_var(zone);
	bool was_on_lru = false;
2485
	bool anon;
2486

2487 2488 2489
	lock_page_cgroup(pc);
	if (unlikely(PageCgroupUsed(pc))) {
		unlock_page_cgroup(pc);
2490
		__mem_cgroup_cancel_charge(memcg, nr_pages);
2491 2492 2493 2494 2495 2496
		return;
	}
	/*
	 * we don't need page_cgroup_lock about tail pages, becase they are not
	 * accessed by any other context at this point.
	 */
2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511

	/*
	 * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page
	 * may already be on some other mem_cgroup's LRU.  Take care of it.
	 */
	if (lrucare) {
		zone = page_zone(page);
		spin_lock_irq(&zone->lru_lock);
		if (PageLRU(page)) {
			ClearPageLRU(page);
			del_page_from_lru_list(zone, page, page_lru(page));
			was_on_lru = true;
		}
	}

2512
	pc->mem_cgroup = memcg;
2513 2514 2515 2516 2517 2518 2519
	/*
	 * We access a page_cgroup asynchronously without lock_page_cgroup().
	 * Especially when a page_cgroup is taken from a page, pc->mem_cgroup
	 * is accessed after testing USED bit. To make pc->mem_cgroup visible
	 * before USED bit, we need memory barrier here.
	 * See mem_cgroup_add_lru_list(), etc.
 	 */
K
KAMEZAWA Hiroyuki 已提交
2520
	smp_wmb();
2521
	SetPageCgroupUsed(pc);
2522

2523 2524 2525 2526 2527 2528 2529 2530 2531
	if (lrucare) {
		if (was_on_lru) {
			VM_BUG_ON(PageLRU(page));
			SetPageLRU(page);
			add_page_to_lru_list(zone, page, page_lru(page));
		}
		spin_unlock_irq(&zone->lru_lock);
	}

2532 2533 2534 2535 2536 2537
	if (ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED)
		anon = true;
	else
		anon = false;

	mem_cgroup_charge_statistics(memcg, anon, nr_pages);
2538
	unlock_page_cgroup(pc);
2539

2540 2541 2542 2543 2544
	/*
	 * "charge_statistics" updated event counter. Then, check it.
	 * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
	 * if they exceeds softlimit.
	 */
2545
	memcg_check_events(memcg, page);
2546
}
2547

2548 2549
#ifdef CONFIG_TRANSPARENT_HUGEPAGE

2550
#define PCGF_NOCOPY_AT_SPLIT ((1 << PCG_LOCK) | (1 << PCG_MIGRATION))
2551 2552
/*
 * Because tail pages are not marked as "used", set it. We're under
2553 2554 2555
 * zone->lru_lock, 'splitting on pmd' and compound_lock.
 * charge/uncharge will be never happen and move_account() is done under
 * compound_lock(), so we don't have to take care of races.
2556
 */
2557
void mem_cgroup_split_huge_fixup(struct page *head)
2558 2559
{
	struct page_cgroup *head_pc = lookup_page_cgroup(head);
2560 2561
	struct page_cgroup *pc;
	int i;
2562

2563 2564
	if (mem_cgroup_disabled())
		return;
2565 2566 2567 2568 2569 2570
	for (i = 1; i < HPAGE_PMD_NR; i++) {
		pc = head_pc + i;
		pc->mem_cgroup = head_pc->mem_cgroup;
		smp_wmb();/* see __commit_charge() */
		pc->flags = head_pc->flags & ~PCGF_NOCOPY_AT_SPLIT;
	}
2571
}
2572
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
2573

2574
/**
2575
 * mem_cgroup_move_account - move account of the page
2576
 * @page: the page
2577
 * @nr_pages: number of regular pages (>1 for huge pages)
2578 2579 2580
 * @pc:	page_cgroup of the page.
 * @from: mem_cgroup which the page is moved from.
 * @to:	mem_cgroup which the page is moved to. @from != @to.
2581
 * @uncharge: whether we should call uncharge and css_put against @from.
2582 2583
 *
 * The caller must confirm following.
K
KAMEZAWA Hiroyuki 已提交
2584
 * - page is not on LRU (isolate_page() is useful.)
2585
 * - compound_lock is held when nr_pages > 1
2586
 *
2587
 * This function doesn't do "charge" nor css_get to new cgroup. It should be
L
Lucas De Marchi 已提交
2588
 * done by a caller(__mem_cgroup_try_charge would be useful). If @uncharge is
2589 2590
 * true, this function does "uncharge" from old cgroup, but it doesn't if
 * @uncharge is false, so a caller should do "uncharge".
2591
 */
2592 2593 2594 2595 2596 2597
static int mem_cgroup_move_account(struct page *page,
				   unsigned int nr_pages,
				   struct page_cgroup *pc,
				   struct mem_cgroup *from,
				   struct mem_cgroup *to,
				   bool uncharge)
2598
{
2599 2600
	unsigned long flags;
	int ret;
2601
	bool anon = PageAnon(page);
2602

2603
	VM_BUG_ON(from == to);
2604
	VM_BUG_ON(PageLRU(page));
2605 2606 2607 2608 2609 2610 2611
	/*
	 * The page is isolated from LRU. So, collapse function
	 * will not handle this page. But page splitting can happen.
	 * Do this check under compound_page_lock(). The caller should
	 * hold it.
	 */
	ret = -EBUSY;
2612
	if (nr_pages > 1 && !PageTransHuge(page))
2613 2614 2615 2616 2617 2618 2619 2620
		goto out;

	lock_page_cgroup(pc);

	ret = -EINVAL;
	if (!PageCgroupUsed(pc) || pc->mem_cgroup != from)
		goto unlock;

2621
	move_lock_mem_cgroup(from, &flags);
2622

2623
	if (!anon && page_mapped(page)) {
2624 2625 2626 2627 2628
		/* Update mapped_file data for mem_cgroup */
		preempt_disable();
		__this_cpu_dec(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
		__this_cpu_inc(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
		preempt_enable();
2629
	}
2630
	mem_cgroup_charge_statistics(from, anon, -nr_pages);
2631 2632
	if (uncharge)
		/* This is not "cancel", but cancel_charge does all we need. */
2633
		__mem_cgroup_cancel_charge(from, nr_pages);
2634

2635
	/* caller should have done css_get */
K
KAMEZAWA Hiroyuki 已提交
2636
	pc->mem_cgroup = to;
2637
	mem_cgroup_charge_statistics(to, anon, nr_pages);
2638 2639 2640
	/*
	 * We charges against "to" which may not have any tasks. Then, "to"
	 * can be under rmdir(). But in current implementation, caller of
2641
	 * this function is just force_empty() and move charge, so it's
L
Lucas De Marchi 已提交
2642
	 * guaranteed that "to" is never removed. So, we don't check rmdir
2643
	 * status here.
2644
	 */
2645
	move_unlock_mem_cgroup(from, &flags);
2646 2647
	ret = 0;
unlock:
2648
	unlock_page_cgroup(pc);
2649 2650 2651
	/*
	 * check events
	 */
2652 2653
	memcg_check_events(to, page);
	memcg_check_events(from, page);
2654
out:
2655 2656 2657 2658 2659 2660 2661
	return ret;
}

/*
 * move charges to its parent.
 */

2662 2663
static int mem_cgroup_move_parent(struct page *page,
				  struct page_cgroup *pc,
2664 2665 2666 2667 2668 2669
				  struct mem_cgroup *child,
				  gfp_t gfp_mask)
{
	struct cgroup *cg = child->css.cgroup;
	struct cgroup *pcg = cg->parent;
	struct mem_cgroup *parent;
2670
	unsigned int nr_pages;
2671
	unsigned long uninitialized_var(flags);
2672 2673 2674 2675 2676 2677
	int ret;

	/* Is ROOT ? */
	if (!pcg)
		return -EINVAL;

2678 2679 2680 2681 2682
	ret = -EBUSY;
	if (!get_page_unless_zero(page))
		goto out;
	if (isolate_lru_page(page))
		goto put;
2683

2684
	nr_pages = hpage_nr_pages(page);
K
KAMEZAWA Hiroyuki 已提交
2685

2686
	parent = mem_cgroup_from_cont(pcg);
2687
	ret = __mem_cgroup_try_charge(NULL, gfp_mask, nr_pages, &parent, false);
2688
	if (ret)
2689
		goto put_back;
2690

2691
	if (nr_pages > 1)
2692 2693
		flags = compound_lock_irqsave(page);

2694
	ret = mem_cgroup_move_account(page, nr_pages, pc, child, parent, true);
2695
	if (ret)
2696
		__mem_cgroup_cancel_charge(parent, nr_pages);
2697

2698
	if (nr_pages > 1)
2699
		compound_unlock_irqrestore(page, flags);
2700
put_back:
K
KAMEZAWA Hiroyuki 已提交
2701
	putback_lru_page(page);
2702
put:
2703
	put_page(page);
2704
out:
2705 2706 2707
	return ret;
}

2708 2709 2710 2711 2712 2713 2714
/*
 * Charge the memory controller for page usage.
 * Return
 * 0 if the charge was successful
 * < 0 if the cgroup is over its limit
 */
static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
2715
				gfp_t gfp_mask, enum charge_type ctype)
2716
{
2717
	struct mem_cgroup *memcg = NULL;
2718
	unsigned int nr_pages = 1;
2719
	bool oom = true;
2720
	int ret;
A
Andrea Arcangeli 已提交
2721

A
Andrea Arcangeli 已提交
2722
	if (PageTransHuge(page)) {
2723
		nr_pages <<= compound_order(page);
A
Andrea Arcangeli 已提交
2724
		VM_BUG_ON(!PageTransHuge(page));
2725 2726 2727 2728 2729
		/*
		 * Never OOM-kill a process for a huge page.  The
		 * fault handler will fall back to regular pages.
		 */
		oom = false;
A
Andrea Arcangeli 已提交
2730
	}
2731

2732
	ret = __mem_cgroup_try_charge(mm, gfp_mask, nr_pages, &memcg, oom);
2733
	if (ret == -ENOMEM)
2734
		return ret;
2735
	__mem_cgroup_commit_charge(memcg, page, nr_pages, ctype, false);
2736 2737 2738
	return 0;
}

2739 2740
int mem_cgroup_newpage_charge(struct page *page,
			      struct mm_struct *mm, gfp_t gfp_mask)
2741
{
2742
	if (mem_cgroup_disabled())
2743
		return 0;
2744 2745 2746
	VM_BUG_ON(page_mapped(page));
	VM_BUG_ON(page->mapping && !PageAnon(page));
	VM_BUG_ON(!mm);
2747
	return mem_cgroup_charge_common(page, mm, gfp_mask,
2748
					MEM_CGROUP_CHARGE_TYPE_MAPPED);
2749 2750
}

D
Daisuke Nishimura 已提交
2751 2752 2753 2754
static void
__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
					enum charge_type ctype);

2755 2756
int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
				gfp_t gfp_mask)
2757
{
2758
	struct mem_cgroup *memcg = NULL;
2759
	enum charge_type type = MEM_CGROUP_CHARGE_TYPE_CACHE;
2760 2761
	int ret;

2762
	if (mem_cgroup_disabled())
2763
		return 0;
2764 2765
	if (PageCompound(page))
		return 0;
2766

2767
	if (unlikely(!mm))
2768
		mm = &init_mm;
2769 2770
	if (!page_is_file_cache(page))
		type = MEM_CGROUP_CHARGE_TYPE_SHMEM;
2771

2772
	if (!PageSwapCache(page))
2773
		ret = mem_cgroup_charge_common(page, mm, gfp_mask, type);
2774
	else { /* page is swapcache/shmem */
2775
		ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &memcg);
D
Daisuke Nishimura 已提交
2776
		if (!ret)
2777 2778
			__mem_cgroup_commit_charge_swapin(page, memcg, type);
	}
2779
	return ret;
2780 2781
}

2782 2783 2784
/*
 * While swap-in, try_charge -> commit or cancel, the page is locked.
 * And when try_charge() successfully returns, one refcnt to memcg without
2785
 * struct page_cgroup is acquired. This refcnt will be consumed by
2786 2787
 * "commit()" or removed by "cancel()"
 */
2788 2789
int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
				 struct page *page,
2790
				 gfp_t mask, struct mem_cgroup **memcgp)
2791
{
2792
	struct mem_cgroup *memcg;
2793
	int ret;
2794

2795
	*memcgp = NULL;
2796

2797
	if (mem_cgroup_disabled())
2798 2799 2800 2801 2802 2803
		return 0;

	if (!do_swap_account)
		goto charge_cur_mm;
	/*
	 * A racing thread's fault, or swapoff, may have already updated
H
Hugh Dickins 已提交
2804 2805 2806
	 * the pte, and even removed page from swap cache: in those cases
	 * do_swap_page()'s pte_same() test will fail; but there's also a
	 * KSM case which does need to charge the page.
2807 2808
	 */
	if (!PageSwapCache(page))
H
Hugh Dickins 已提交
2809
		goto charge_cur_mm;
2810 2811
	memcg = try_get_mem_cgroup_from_page(page);
	if (!memcg)
2812
		goto charge_cur_mm;
2813 2814
	*memcgp = memcg;
	ret = __mem_cgroup_try_charge(NULL, mask, 1, memcgp, true);
2815
	css_put(&memcg->css);
2816 2817
	if (ret == -EINTR)
		ret = 0;
2818
	return ret;
2819 2820 2821
charge_cur_mm:
	if (unlikely(!mm))
		mm = &init_mm;
2822 2823 2824 2825
	ret = __mem_cgroup_try_charge(mm, mask, 1, memcgp, true);
	if (ret == -EINTR)
		ret = 0;
	return ret;
2826 2827
}

D
Daisuke Nishimura 已提交
2828
static void
2829
__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *memcg,
D
Daisuke Nishimura 已提交
2830
					enum charge_type ctype)
2831
{
2832
	if (mem_cgroup_disabled())
2833
		return;
2834
	if (!memcg)
2835
		return;
2836
	cgroup_exclude_rmdir(&memcg->css);
2837

2838
	__mem_cgroup_commit_charge(memcg, page, 1, ctype, true);
2839 2840 2841
	/*
	 * Now swap is on-memory. This means this page may be
	 * counted both as mem and swap....double count.
2842 2843 2844
	 * Fix it by uncharging from memsw. Basically, this SwapCache is stable
	 * under lock_page(). But in do_swap_page()::memory.c, reuse_swap_page()
	 * may call delete_from_swap_cache() before reach here.
2845
	 */
2846
	if (do_swap_account && PageSwapCache(page)) {
2847
		swp_entry_t ent = {.val = page_private(page)};
2848
		struct mem_cgroup *swap_memcg;
2849 2850 2851 2852
		unsigned short id;

		id = swap_cgroup_record(ent, 0);
		rcu_read_lock();
2853 2854
		swap_memcg = mem_cgroup_lookup(id);
		if (swap_memcg) {
2855 2856 2857 2858
			/*
			 * This recorded memcg can be obsolete one. So, avoid
			 * calling css_tryget
			 */
2859 2860 2861 2862 2863
			if (!mem_cgroup_is_root(swap_memcg))
				res_counter_uncharge(&swap_memcg->memsw,
						     PAGE_SIZE);
			mem_cgroup_swap_statistics(swap_memcg, false);
			mem_cgroup_put(swap_memcg);
2864
		}
2865
		rcu_read_unlock();
2866
	}
2867 2868 2869 2870 2871
	/*
	 * At swapin, we may charge account against cgroup which has no tasks.
	 * So, rmdir()->pre_destroy() can be called while we do this charge.
	 * In that case, we need to call pre_destroy() again. check it here.
	 */
2872
	cgroup_release_and_wakeup_rmdir(&memcg->css);
2873 2874
}

2875 2876
void mem_cgroup_commit_charge_swapin(struct page *page,
				     struct mem_cgroup *memcg)
D
Daisuke Nishimura 已提交
2877
{
2878 2879
	__mem_cgroup_commit_charge_swapin(page, memcg,
					  MEM_CGROUP_CHARGE_TYPE_MAPPED);
D
Daisuke Nishimura 已提交
2880 2881
}

2882
void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg)
2883
{
2884
	if (mem_cgroup_disabled())
2885
		return;
2886
	if (!memcg)
2887
		return;
2888
	__mem_cgroup_cancel_charge(memcg, 1);
2889 2890
}

2891
static void mem_cgroup_do_uncharge(struct mem_cgroup *memcg,
2892 2893
				   unsigned int nr_pages,
				   const enum charge_type ctype)
2894 2895 2896
{
	struct memcg_batch_info *batch = NULL;
	bool uncharge_memsw = true;
2897

2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908
	/* If swapout, usage of swap doesn't decrease */
	if (!do_swap_account || ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
		uncharge_memsw = false;

	batch = &current->memcg_batch;
	/*
	 * In usual, we do css_get() when we remember memcg pointer.
	 * But in this case, we keep res->usage until end of a series of
	 * uncharges. Then, it's ok to ignore memcg's refcnt.
	 */
	if (!batch->memcg)
2909
		batch->memcg = memcg;
2910 2911
	/*
	 * do_batch > 0 when unmapping pages or inode invalidate/truncate.
L
Lucas De Marchi 已提交
2912
	 * In those cases, all pages freed continuously can be expected to be in
2913 2914 2915 2916 2917 2918 2919 2920
	 * the same cgroup and we have chance to coalesce uncharges.
	 * But we do uncharge one by one if this is killed by OOM(TIF_MEMDIE)
	 * because we want to do uncharge as soon as possible.
	 */

	if (!batch->do_batch || test_thread_flag(TIF_MEMDIE))
		goto direct_uncharge;

2921
	if (nr_pages > 1)
A
Andrea Arcangeli 已提交
2922 2923
		goto direct_uncharge;

2924 2925 2926 2927 2928
	/*
	 * In typical case, batch->memcg == mem. This means we can
	 * merge a series of uncharges to an uncharge of res_counter.
	 * If not, we uncharge res_counter ony by one.
	 */
2929
	if (batch->memcg != memcg)
2930 2931
		goto direct_uncharge;
	/* remember freed charge and uncharge it later */
2932
	batch->nr_pages++;
2933
	if (uncharge_memsw)
2934
		batch->memsw_nr_pages++;
2935 2936
	return;
direct_uncharge:
2937
	res_counter_uncharge(&memcg->res, nr_pages * PAGE_SIZE);
2938
	if (uncharge_memsw)
2939 2940 2941
		res_counter_uncharge(&memcg->memsw, nr_pages * PAGE_SIZE);
	if (unlikely(batch->memcg != memcg))
		memcg_oom_recover(memcg);
2942
}
2943

2944
/*
2945
 * uncharge if !page_mapped(page)
2946
 */
2947
static struct mem_cgroup *
2948
__mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
2949
{
2950
	struct mem_cgroup *memcg = NULL;
2951 2952
	unsigned int nr_pages = 1;
	struct page_cgroup *pc;
2953
	bool anon;
2954

2955
	if (mem_cgroup_disabled())
2956
		return NULL;
2957

K
KAMEZAWA Hiroyuki 已提交
2958
	if (PageSwapCache(page))
2959
		return NULL;
K
KAMEZAWA Hiroyuki 已提交
2960

A
Andrea Arcangeli 已提交
2961
	if (PageTransHuge(page)) {
2962
		nr_pages <<= compound_order(page);
A
Andrea Arcangeli 已提交
2963 2964
		VM_BUG_ON(!PageTransHuge(page));
	}
2965
	/*
2966
	 * Check if our page_cgroup is valid
2967
	 */
2968
	pc = lookup_page_cgroup(page);
2969
	if (unlikely(!PageCgroupUsed(pc)))
2970
		return NULL;
2971

2972
	lock_page_cgroup(pc);
K
KAMEZAWA Hiroyuki 已提交
2973

2974
	memcg = pc->mem_cgroup;
2975

K
KAMEZAWA Hiroyuki 已提交
2976 2977 2978
	if (!PageCgroupUsed(pc))
		goto unlock_out;

2979 2980
	anon = PageAnon(page);

K
KAMEZAWA Hiroyuki 已提交
2981 2982
	switch (ctype) {
	case MEM_CGROUP_CHARGE_TYPE_MAPPED:
2983 2984 2985 2986 2987
		/*
		 * Generally PageAnon tells if it's the anon statistics to be
		 * updated; but sometimes e.g. mem_cgroup_uncharge_page() is
		 * used before page reached the stage of being marked PageAnon.
		 */
2988 2989
		anon = true;
		/* fallthrough */
K
KAMEZAWA Hiroyuki 已提交
2990
	case MEM_CGROUP_CHARGE_TYPE_DROP:
2991 2992
		/* See mem_cgroup_prepare_migration() */
		if (page_mapped(page) || PageCgroupMigration(pc))
K
KAMEZAWA Hiroyuki 已提交
2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003
			goto unlock_out;
		break;
	case MEM_CGROUP_CHARGE_TYPE_SWAPOUT:
		if (!PageAnon(page)) {	/* Shared memory */
			if (page->mapping && !page_is_file_cache(page))
				goto unlock_out;
		} else if (page_mapped(page)) /* Anon */
				goto unlock_out;
		break;
	default:
		break;
3004
	}
K
KAMEZAWA Hiroyuki 已提交
3005

3006
	mem_cgroup_charge_statistics(memcg, anon, -nr_pages);
K
KAMEZAWA Hiroyuki 已提交
3007

3008
	ClearPageCgroupUsed(pc);
3009 3010 3011 3012 3013 3014
	/*
	 * pc->mem_cgroup is not cleared here. It will be accessed when it's
	 * freed from LRU. This is safe because uncharged page is expected not
	 * to be reused (freed soon). Exception is SwapCache, it's handled by
	 * special functions.
	 */
3015

3016
	unlock_page_cgroup(pc);
K
KAMEZAWA Hiroyuki 已提交
3017
	/*
3018
	 * even after unlock, we have memcg->res.usage here and this memcg
K
KAMEZAWA Hiroyuki 已提交
3019 3020
	 * will never be freed.
	 */
3021
	memcg_check_events(memcg, page);
K
KAMEZAWA Hiroyuki 已提交
3022
	if (do_swap_account && ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT) {
3023 3024
		mem_cgroup_swap_statistics(memcg, true);
		mem_cgroup_get(memcg);
K
KAMEZAWA Hiroyuki 已提交
3025
	}
3026 3027
	if (!mem_cgroup_is_root(memcg))
		mem_cgroup_do_uncharge(memcg, nr_pages, ctype);
3028

3029
	return memcg;
K
KAMEZAWA Hiroyuki 已提交
3030 3031 3032

unlock_out:
	unlock_page_cgroup(pc);
3033
	return NULL;
3034 3035
}

3036 3037
void mem_cgroup_uncharge_page(struct page *page)
{
3038 3039 3040
	/* early check. */
	if (page_mapped(page))
		return;
3041
	VM_BUG_ON(page->mapping && !PageAnon(page));
3042 3043 3044 3045 3046 3047
	__mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_MAPPED);
}

void mem_cgroup_uncharge_cache_page(struct page *page)
{
	VM_BUG_ON(page_mapped(page));
3048
	VM_BUG_ON(page->mapping);
3049 3050 3051
	__mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE);
}

3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065
/*
 * Batch_start/batch_end is called in unmap_page_range/invlidate/trucate.
 * In that cases, pages are freed continuously and we can expect pages
 * are in the same memcg. All these calls itself limits the number of
 * pages freed at once, then uncharge_start/end() is called properly.
 * This may be called prural(2) times in a context,
 */

void mem_cgroup_uncharge_start(void)
{
	current->memcg_batch.do_batch++;
	/* We can do nest. */
	if (current->memcg_batch.do_batch == 1) {
		current->memcg_batch.memcg = NULL;
3066 3067
		current->memcg_batch.nr_pages = 0;
		current->memcg_batch.memsw_nr_pages = 0;
3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087
	}
}

void mem_cgroup_uncharge_end(void)
{
	struct memcg_batch_info *batch = &current->memcg_batch;

	if (!batch->do_batch)
		return;

	batch->do_batch--;
	if (batch->do_batch) /* If stacked, do nothing. */
		return;

	if (!batch->memcg)
		return;
	/*
	 * This "batch->memcg" is valid without any css_get/put etc...
	 * bacause we hide charges behind us.
	 */
3088 3089 3090 3091 3092 3093
	if (batch->nr_pages)
		res_counter_uncharge(&batch->memcg->res,
				     batch->nr_pages * PAGE_SIZE);
	if (batch->memsw_nr_pages)
		res_counter_uncharge(&batch->memcg->memsw,
				     batch->memsw_nr_pages * PAGE_SIZE);
3094
	memcg_oom_recover(batch->memcg);
3095 3096 3097 3098
	/* forget this pointer (for sanity check) */
	batch->memcg = NULL;
}

3099
#ifdef CONFIG_SWAP
3100
/*
3101
 * called after __delete_from_swap_cache() and drop "page" account.
3102 3103
 * memcg information is recorded to swap_cgroup of "ent"
 */
K
KAMEZAWA Hiroyuki 已提交
3104 3105
void
mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
3106 3107
{
	struct mem_cgroup *memcg;
K
KAMEZAWA Hiroyuki 已提交
3108 3109 3110 3111 3112 3113
	int ctype = MEM_CGROUP_CHARGE_TYPE_SWAPOUT;

	if (!swapout) /* this was a swap cache but the swap is unused ! */
		ctype = MEM_CGROUP_CHARGE_TYPE_DROP;

	memcg = __mem_cgroup_uncharge_common(page, ctype);
3114

K
KAMEZAWA Hiroyuki 已提交
3115 3116 3117 3118 3119
	/*
	 * record memcg information,  if swapout && memcg != NULL,
	 * mem_cgroup_get() was called in uncharge().
	 */
	if (do_swap_account && swapout && memcg)
3120
		swap_cgroup_record(ent, css_id(&memcg->css));
3121
}
3122
#endif
3123 3124 3125 3126 3127 3128 3129

#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
/*
 * called from swap_entry_free(). remove record in swap_cgroup and
 * uncharge "memsw" account.
 */
void mem_cgroup_uncharge_swap(swp_entry_t ent)
K
KAMEZAWA Hiroyuki 已提交
3130
{
3131
	struct mem_cgroup *memcg;
3132
	unsigned short id;
3133 3134 3135 3136

	if (!do_swap_account)
		return;

3137 3138 3139
	id = swap_cgroup_record(ent, 0);
	rcu_read_lock();
	memcg = mem_cgroup_lookup(id);
3140
	if (memcg) {
3141 3142 3143 3144
		/*
		 * We uncharge this because swap is freed.
		 * This memcg can be obsolete one. We avoid calling css_tryget
		 */
3145
		if (!mem_cgroup_is_root(memcg))
3146
			res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
3147
		mem_cgroup_swap_statistics(memcg, false);
3148 3149
		mem_cgroup_put(memcg);
	}
3150
	rcu_read_unlock();
K
KAMEZAWA Hiroyuki 已提交
3151
}
3152 3153 3154 3155 3156 3157

/**
 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
 * @entry: swap entry to be moved
 * @from:  mem_cgroup which the entry is moved from
 * @to:  mem_cgroup which the entry is moved to
3158
 * @need_fixup: whether we should fixup res_counters and refcounts.
3159 3160 3161 3162 3163 3164 3165 3166 3167 3168
 *
 * It succeeds only when the swap_cgroup's record for this entry is the same
 * as the mem_cgroup's id of @from.
 *
 * Returns 0 on success, -EINVAL on failure.
 *
 * The caller must have charged to @to, IOW, called res_counter_charge() about
 * both res and memsw, and called css_get().
 */
static int mem_cgroup_move_swap_account(swp_entry_t entry,
3169
		struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup)
3170 3171 3172 3173 3174 3175 3176 3177
{
	unsigned short old_id, new_id;

	old_id = css_id(&from->css);
	new_id = css_id(&to->css);

	if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
		mem_cgroup_swap_statistics(from, false);
3178
		mem_cgroup_swap_statistics(to, true);
3179
		/*
3180 3181 3182 3183 3184 3185
		 * This function is only called from task migration context now.
		 * It postpones res_counter and refcount handling till the end
		 * of task migration(mem_cgroup_clear_mc()) for performance
		 * improvement. But we cannot postpone mem_cgroup_get(to)
		 * because if the process that has been moved to @to does
		 * swap-in, the refcount of @to might be decreased to 0.
3186 3187
		 */
		mem_cgroup_get(to);
3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198
		if (need_fixup) {
			if (!mem_cgroup_is_root(from))
				res_counter_uncharge(&from->memsw, PAGE_SIZE);
			mem_cgroup_put(from);
			/*
			 * we charged both to->res and to->memsw, so we should
			 * uncharge to->res.
			 */
			if (!mem_cgroup_is_root(to))
				res_counter_uncharge(&to->res, PAGE_SIZE);
		}
3199 3200 3201 3202 3203 3204
		return 0;
	}
	return -EINVAL;
}
#else
static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
3205
		struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup)
3206 3207 3208
{
	return -EINVAL;
}
3209
#endif
K
KAMEZAWA Hiroyuki 已提交
3210

3211
/*
3212 3213
 * Before starting migration, account PAGE_SIZE to mem_cgroup that the old
 * page belongs to.
3214
 */
3215
int mem_cgroup_prepare_migration(struct page *page,
3216
	struct page *newpage, struct mem_cgroup **memcgp, gfp_t gfp_mask)
3217
{
3218
	struct mem_cgroup *memcg = NULL;
3219
	struct page_cgroup *pc;
3220
	enum charge_type ctype;
3221
	int ret = 0;
3222

3223
	*memcgp = NULL;
3224

A
Andrea Arcangeli 已提交
3225
	VM_BUG_ON(PageTransHuge(page));
3226
	if (mem_cgroup_disabled())
3227 3228
		return 0;

3229 3230 3231
	pc = lookup_page_cgroup(page);
	lock_page_cgroup(pc);
	if (PageCgroupUsed(pc)) {
3232 3233
		memcg = pc->mem_cgroup;
		css_get(&memcg->css);
3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264
		/*
		 * At migrating an anonymous page, its mapcount goes down
		 * to 0 and uncharge() will be called. But, even if it's fully
		 * unmapped, migration may fail and this page has to be
		 * charged again. We set MIGRATION flag here and delay uncharge
		 * until end_migration() is called
		 *
		 * Corner Case Thinking
		 * A)
		 * When the old page was mapped as Anon and it's unmap-and-freed
		 * while migration was ongoing.
		 * If unmap finds the old page, uncharge() of it will be delayed
		 * until end_migration(). If unmap finds a new page, it's
		 * uncharged when it make mapcount to be 1->0. If unmap code
		 * finds swap_migration_entry, the new page will not be mapped
		 * and end_migration() will find it(mapcount==0).
		 *
		 * B)
		 * When the old page was mapped but migraion fails, the kernel
		 * remaps it. A charge for it is kept by MIGRATION flag even
		 * if mapcount goes down to 0. We can do remap successfully
		 * without charging it again.
		 *
		 * C)
		 * The "old" page is under lock_page() until the end of
		 * migration, so, the old page itself will not be swapped-out.
		 * If the new page is swapped out before end_migraton, our
		 * hook to usual swap-out path will catch the event.
		 */
		if (PageAnon(page))
			SetPageCgroupMigration(pc);
3265
	}
3266
	unlock_page_cgroup(pc);
3267 3268 3269 3270
	/*
	 * If the page is not charged at this point,
	 * we return here.
	 */
3271
	if (!memcg)
3272
		return 0;
3273

3274 3275
	*memcgp = memcg;
	ret = __mem_cgroup_try_charge(NULL, gfp_mask, 1, memcgp, false);
3276
	css_put(&memcg->css);/* drop extra refcnt */
3277
	if (ret) {
3278 3279 3280 3281 3282 3283 3284 3285 3286
		if (PageAnon(page)) {
			lock_page_cgroup(pc);
			ClearPageCgroupMigration(pc);
			unlock_page_cgroup(pc);
			/*
			 * The old page may be fully unmapped while we kept it.
			 */
			mem_cgroup_uncharge_page(page);
		}
3287
		/* we'll need to revisit this error code (we have -EINTR) */
3288
		return -ENOMEM;
3289
	}
3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301
	/*
	 * We charge new page before it's used/mapped. So, even if unlock_page()
	 * is called before end_migration, we can catch all events on this new
	 * page. In the case new page is migrated but not remapped, new page's
	 * mapcount will be finally 0 and we call uncharge in end_migration().
	 */
	if (PageAnon(page))
		ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED;
	else if (page_is_file_cache(page))
		ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
	else
		ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
3302
	__mem_cgroup_commit_charge(memcg, newpage, 1, ctype, false);
3303
	return ret;
3304
}
3305

3306
/* remove redundant charge if migration failed*/
3307
void mem_cgroup_end_migration(struct mem_cgroup *memcg,
3308
	struct page *oldpage, struct page *newpage, bool migration_ok)
3309
{
3310
	struct page *used, *unused;
3311
	struct page_cgroup *pc;
3312
	bool anon;
3313

3314
	if (!memcg)
3315
		return;
3316
	/* blocks rmdir() */
3317
	cgroup_exclude_rmdir(&memcg->css);
3318
	if (!migration_ok) {
3319 3320
		used = oldpage;
		unused = newpage;
3321
	} else {
3322
		used = newpage;
3323 3324
		unused = oldpage;
	}
3325
	/*
3326 3327 3328
	 * We disallowed uncharge of pages under migration because mapcount
	 * of the page goes down to zero, temporarly.
	 * Clear the flag and check the page should be charged.
3329
	 */
3330 3331 3332 3333
	pc = lookup_page_cgroup(oldpage);
	lock_page_cgroup(pc);
	ClearPageCgroupMigration(pc);
	unlock_page_cgroup(pc);
3334 3335 3336 3337
	anon = PageAnon(used);
	__mem_cgroup_uncharge_common(unused,
		anon ? MEM_CGROUP_CHARGE_TYPE_MAPPED
		     : MEM_CGROUP_CHARGE_TYPE_CACHE);
3338

3339
	/*
3340 3341 3342 3343 3344 3345
	 * If a page is a file cache, radix-tree replacement is very atomic
	 * and we can skip this check. When it was an Anon page, its mapcount
	 * goes down to 0. But because we added MIGRATION flage, it's not
	 * uncharged yet. There are several case but page->mapcount check
	 * and USED bit check in mem_cgroup_uncharge_page() will do enough
	 * check. (see prepare_charge() also)
3346
	 */
3347
	if (anon)
3348
		mem_cgroup_uncharge_page(used);
3349
	/*
3350 3351
	 * At migration, we may charge account against cgroup which has no
	 * tasks.
3352 3353 3354
	 * So, rmdir()->pre_destroy() can be called while we do this charge.
	 * In that case, we need to call pre_destroy() again. check it here.
	 */
3355
	cgroup_release_and_wakeup_rmdir(&memcg->css);
3356
}
3357

3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376
/*
 * At replace page cache, newpage is not under any memcg but it's on
 * LRU. So, this function doesn't touch res_counter but handles LRU
 * in correct way. Both pages are locked so we cannot race with uncharge.
 */
void mem_cgroup_replace_page_cache(struct page *oldpage,
				  struct page *newpage)
{
	struct mem_cgroup *memcg;
	struct page_cgroup *pc;
	enum charge_type type = MEM_CGROUP_CHARGE_TYPE_CACHE;

	if (mem_cgroup_disabled())
		return;

	pc = lookup_page_cgroup(oldpage);
	/* fix accounting on old pages */
	lock_page_cgroup(pc);
	memcg = pc->mem_cgroup;
3377
	mem_cgroup_charge_statistics(memcg, false, -1);
3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388
	ClearPageCgroupUsed(pc);
	unlock_page_cgroup(pc);

	if (PageSwapBacked(oldpage))
		type = MEM_CGROUP_CHARGE_TYPE_SHMEM;

	/*
	 * Even if newpage->mapping was NULL before starting replacement,
	 * the newpage may be on LRU(or pagevec for LRU) already. We lock
	 * LRU while we overwrite pc->mem_cgroup.
	 */
3389
	__mem_cgroup_commit_charge(memcg, newpage, 1, type, true);
3390 3391
}

3392 3393 3394 3395 3396 3397
#ifdef CONFIG_DEBUG_VM
static struct page_cgroup *lookup_page_cgroup_used(struct page *page)
{
	struct page_cgroup *pc;

	pc = lookup_page_cgroup(page);
3398 3399 3400 3401 3402
	/*
	 * Can be NULL while feeding pages into the page allocator for
	 * the first time, i.e. during boot or memory hotplug;
	 * or when mem_cgroup_disabled().
	 */
3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421
	if (likely(pc) && PageCgroupUsed(pc))
		return pc;
	return NULL;
}

bool mem_cgroup_bad_page_check(struct page *page)
{
	if (mem_cgroup_disabled())
		return false;

	return lookup_page_cgroup_used(page) != NULL;
}

void mem_cgroup_print_bad_page(struct page *page)
{
	struct page_cgroup *pc;

	pc = lookup_page_cgroup_used(page);
	if (pc) {
3422
		printk(KERN_ALERT "pc:%p pc->flags:%lx pc->mem_cgroup:%p\n",
3423 3424 3425 3426 3427
		       pc, pc->flags, pc->mem_cgroup);
	}
}
#endif

3428 3429
static DEFINE_MUTEX(set_limit_mutex);

3430
static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
3431
				unsigned long long val)
3432
{
3433
	int retry_count;
3434
	u64 memswlimit, memlimit;
3435
	int ret = 0;
3436 3437
	int children = mem_cgroup_count_children(memcg);
	u64 curusage, oldusage;
3438
	int enlarge;
3439 3440 3441 3442 3443 3444 3445 3446 3447

	/*
	 * For keeping hierarchical_reclaim simple, how long we should retry
	 * is depends on callers. We set our retry-count to be function
	 * of # of children which we should visit in this loop.
	 */
	retry_count = MEM_CGROUP_RECLAIM_RETRIES * children;

	oldusage = res_counter_read_u64(&memcg->res, RES_USAGE);
3448

3449
	enlarge = 0;
3450
	while (retry_count) {
3451 3452 3453 3454
		if (signal_pending(current)) {
			ret = -EINTR;
			break;
		}
3455 3456 3457
		/*
		 * Rather than hide all in some function, I do this in
		 * open coded manner. You see what this really does.
3458
		 * We have to guarantee memcg->res.limit < memcg->memsw.limit.
3459 3460 3461 3462 3463 3464
		 */
		mutex_lock(&set_limit_mutex);
		memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
		if (memswlimit < val) {
			ret = -EINVAL;
			mutex_unlock(&set_limit_mutex);
3465 3466
			break;
		}
3467 3468 3469 3470 3471

		memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
		if (memlimit < val)
			enlarge = 1;

3472
		ret = res_counter_set_limit(&memcg->res, val);
3473 3474 3475 3476 3477 3478
		if (!ret) {
			if (memswlimit == val)
				memcg->memsw_is_minimum = true;
			else
				memcg->memsw_is_minimum = false;
		}
3479 3480 3481 3482 3483
		mutex_unlock(&set_limit_mutex);

		if (!ret)
			break;

3484 3485
		mem_cgroup_reclaim(memcg, GFP_KERNEL,
				   MEM_CGROUP_RECLAIM_SHRINK);
3486 3487 3488 3489 3490 3491
		curusage = res_counter_read_u64(&memcg->res, RES_USAGE);
		/* Usage is reduced ? */
  		if (curusage >= oldusage)
			retry_count--;
		else
			oldusage = curusage;
3492
	}
3493 3494
	if (!ret && enlarge)
		memcg_oom_recover(memcg);
3495

3496 3497 3498
	return ret;
}

L
Li Zefan 已提交
3499 3500
static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
					unsigned long long val)
3501
{
3502
	int retry_count;
3503
	u64 memlimit, memswlimit, oldusage, curusage;
3504 3505
	int children = mem_cgroup_count_children(memcg);
	int ret = -EBUSY;
3506
	int enlarge = 0;
3507

3508 3509 3510
	/* see mem_cgroup_resize_res_limit */
 	retry_count = children * MEM_CGROUP_RECLAIM_RETRIES;
	oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
3511 3512 3513 3514 3515 3516 3517 3518
	while (retry_count) {
		if (signal_pending(current)) {
			ret = -EINTR;
			break;
		}
		/*
		 * Rather than hide all in some function, I do this in
		 * open coded manner. You see what this really does.
3519
		 * We have to guarantee memcg->res.limit < memcg->memsw.limit.
3520 3521 3522 3523 3524 3525 3526 3527
		 */
		mutex_lock(&set_limit_mutex);
		memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
		if (memlimit > val) {
			ret = -EINVAL;
			mutex_unlock(&set_limit_mutex);
			break;
		}
3528 3529 3530
		memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
		if (memswlimit < val)
			enlarge = 1;
3531
		ret = res_counter_set_limit(&memcg->memsw, val);
3532 3533 3534 3535 3536 3537
		if (!ret) {
			if (memlimit == val)
				memcg->memsw_is_minimum = true;
			else
				memcg->memsw_is_minimum = false;
		}
3538 3539 3540 3541 3542
		mutex_unlock(&set_limit_mutex);

		if (!ret)
			break;

3543 3544 3545
		mem_cgroup_reclaim(memcg, GFP_KERNEL,
				   MEM_CGROUP_RECLAIM_NOSWAP |
				   MEM_CGROUP_RECLAIM_SHRINK);
3546
		curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
3547
		/* Usage is reduced ? */
3548
		if (curusage >= oldusage)
3549
			retry_count--;
3550 3551
		else
			oldusage = curusage;
3552
	}
3553 3554
	if (!ret && enlarge)
		memcg_oom_recover(memcg);
3555 3556 3557
	return ret;
}

3558
unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
3559 3560
					    gfp_t gfp_mask,
					    unsigned long *total_scanned)
3561 3562 3563 3564 3565 3566
{
	unsigned long nr_reclaimed = 0;
	struct mem_cgroup_per_zone *mz, *next_mz = NULL;
	unsigned long reclaimed;
	int loop = 0;
	struct mem_cgroup_tree_per_zone *mctz;
3567
	unsigned long long excess;
3568
	unsigned long nr_scanned;
3569 3570 3571 3572

	if (order > 0)
		return 0;

3573
	mctz = soft_limit_tree_node_zone(zone_to_nid(zone), zone_idx(zone));
3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586
	/*
	 * This loop can run a while, specially if mem_cgroup's continuously
	 * keep exceeding their soft limit and putting the system under
	 * pressure
	 */
	do {
		if (next_mz)
			mz = next_mz;
		else
			mz = mem_cgroup_largest_soft_limit_node(mctz);
		if (!mz)
			break;

3587
		nr_scanned = 0;
3588
		reclaimed = mem_cgroup_soft_reclaim(mz->memcg, zone,
3589
						    gfp_mask, &nr_scanned);
3590
		nr_reclaimed += reclaimed;
3591
		*total_scanned += nr_scanned;
3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613
		spin_lock(&mctz->lock);

		/*
		 * If we failed to reclaim anything from this memory cgroup
		 * it is time to move on to the next cgroup
		 */
		next_mz = NULL;
		if (!reclaimed) {
			do {
				/*
				 * Loop until we find yet another one.
				 *
				 * By the time we get the soft_limit lock
				 * again, someone might have aded the
				 * group back on the RB tree. Iterate to
				 * make sure we get a different mem.
				 * mem_cgroup_largest_soft_limit_node returns
				 * NULL if no other cgroup is present on
				 * the tree
				 */
				next_mz =
				__mem_cgroup_largest_soft_limit_node(mctz);
3614
				if (next_mz == mz)
3615
					css_put(&next_mz->memcg->css);
3616
				else /* next_mz == NULL or other memcg */
3617 3618 3619
					break;
			} while (1);
		}
3620 3621
		__mem_cgroup_remove_exceeded(mz->memcg, mz, mctz);
		excess = res_counter_soft_limit_excess(&mz->memcg->res);
3622 3623 3624 3625 3626 3627 3628 3629
		/*
		 * One school of thought says that we should not add
		 * back the node to the tree if reclaim returns 0.
		 * But our reclaim could return 0, simply because due
		 * to priority we are exposing a smaller subset of
		 * memory to reclaim from. Consider this as a longer
		 * term TODO.
		 */
3630
		/* If excess == 0, no tree ops */
3631
		__mem_cgroup_insert_exceeded(mz->memcg, mz, mctz, excess);
3632
		spin_unlock(&mctz->lock);
3633
		css_put(&mz->memcg->css);
3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645
		loop++;
		/*
		 * Could not reclaim anything and there are no more
		 * mem cgroups to try or we seem to be looping without
		 * reclaiming anything.
		 */
		if (!nr_reclaimed &&
			(next_mz == NULL ||
			loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
			break;
	} while (!nr_reclaimed);
	if (next_mz)
3646
		css_put(&next_mz->memcg->css);
3647 3648 3649
	return nr_reclaimed;
}

3650 3651 3652 3653
/*
 * This routine traverse page_cgroup in given list and drop them all.
 * *And* this routine doesn't reclaim page itself, just removes page_cgroup.
 */
3654
static int mem_cgroup_force_empty_list(struct mem_cgroup *memcg,
K
KAMEZAWA Hiroyuki 已提交
3655
				int node, int zid, enum lru_list lru)
3656
{
K
KAMEZAWA Hiroyuki 已提交
3657 3658
	struct mem_cgroup_per_zone *mz;
	unsigned long flags, loop;
3659
	struct list_head *list;
3660 3661
	struct page *busy;
	struct zone *zone;
3662
	int ret = 0;
3663

K
KAMEZAWA Hiroyuki 已提交
3664
	zone = &NODE_DATA(node)->node_zones[zid];
3665
	mz = mem_cgroup_zoneinfo(memcg, node, zid);
3666
	list = &mz->lruvec.lists[lru];
3667

3668
	loop = mz->lru_size[lru];
3669 3670 3671 3672
	/* give some margin against EBUSY etc...*/
	loop += 256;
	busy = NULL;
	while (loop--) {
3673
		struct page_cgroup *pc;
3674 3675
		struct page *page;

3676
		ret = 0;
K
KAMEZAWA Hiroyuki 已提交
3677
		spin_lock_irqsave(&zone->lru_lock, flags);
3678
		if (list_empty(list)) {
K
KAMEZAWA Hiroyuki 已提交
3679
			spin_unlock_irqrestore(&zone->lru_lock, flags);
3680
			break;
3681
		}
3682 3683 3684
		page = list_entry(list->prev, struct page, lru);
		if (busy == page) {
			list_move(&page->lru, list);
3685
			busy = NULL;
K
KAMEZAWA Hiroyuki 已提交
3686
			spin_unlock_irqrestore(&zone->lru_lock, flags);
3687 3688
			continue;
		}
K
KAMEZAWA Hiroyuki 已提交
3689
		spin_unlock_irqrestore(&zone->lru_lock, flags);
3690

3691
		pc = lookup_page_cgroup(page);
3692

3693
		ret = mem_cgroup_move_parent(page, pc, memcg, GFP_KERNEL);
3694
		if (ret == -ENOMEM || ret == -EINTR)
3695
			break;
3696 3697 3698

		if (ret == -EBUSY || ret == -EINVAL) {
			/* found lock contention or "pc" is obsolete. */
3699
			busy = page;
3700 3701 3702
			cond_resched();
		} else
			busy = NULL;
3703
	}
K
KAMEZAWA Hiroyuki 已提交
3704

3705 3706 3707
	if (!ret && !list_empty(list))
		return -EBUSY;
	return ret;
3708 3709 3710 3711 3712 3713
}

/*
 * make mem_cgroup's charge to be 0 if there is no task.
 * This enables deleting this mem_cgroup.
 */
3714
static int mem_cgroup_force_empty(struct mem_cgroup *memcg, bool free_all)
3715
{
3716 3717 3718
	int ret;
	int node, zid, shrink;
	int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
3719
	struct cgroup *cgrp = memcg->css.cgroup;
3720

3721
	css_get(&memcg->css);
3722 3723

	shrink = 0;
3724 3725 3726
	/* should free all ? */
	if (free_all)
		goto try_to_free;
3727
move_account:
3728
	do {
3729
		ret = -EBUSY;
3730 3731 3732 3733
		if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children))
			goto out;
		ret = -EINTR;
		if (signal_pending(current))
3734
			goto out;
3735 3736
		/* This is for making all *used* pages to be on LRU. */
		lru_add_drain_all();
3737
		drain_all_stock_sync(memcg);
3738
		ret = 0;
3739
		mem_cgroup_start_move(memcg);
3740
		for_each_node_state(node, N_HIGH_MEMORY) {
3741
			for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) {
H
Hugh Dickins 已提交
3742 3743
				enum lru_list lru;
				for_each_lru(lru) {
3744
					ret = mem_cgroup_force_empty_list(memcg,
H
Hugh Dickins 已提交
3745
							node, zid, lru);
3746 3747 3748
					if (ret)
						break;
				}
3749
			}
3750 3751 3752
			if (ret)
				break;
		}
3753 3754
		mem_cgroup_end_move(memcg);
		memcg_oom_recover(memcg);
3755 3756 3757
		/* it seems parent cgroup doesn't have enough mem */
		if (ret == -ENOMEM)
			goto try_to_free;
3758
		cond_resched();
3759
	/* "ret" should also be checked to ensure all lists are empty. */
3760
	} while (res_counter_read_u64(&memcg->res, RES_USAGE) > 0 || ret);
3761
out:
3762
	css_put(&memcg->css);
3763
	return ret;
3764 3765

try_to_free:
3766 3767
	/* returns EBUSY if there is a task or if we come here twice. */
	if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children) || shrink) {
3768 3769 3770
		ret = -EBUSY;
		goto out;
	}
3771 3772
	/* we call try-to-free pages for make this cgroup empty */
	lru_add_drain_all();
3773 3774
	/* try to free all pages in this cgroup */
	shrink = 1;
3775
	while (nr_retries && res_counter_read_u64(&memcg->res, RES_USAGE) > 0) {
3776
		int progress;
3777 3778 3779 3780 3781

		if (signal_pending(current)) {
			ret = -EINTR;
			goto out;
		}
3782
		progress = try_to_free_mem_cgroup_pages(memcg, GFP_KERNEL,
3783
						false);
3784
		if (!progress) {
3785
			nr_retries--;
3786
			/* maybe some writeback is necessary */
3787
			congestion_wait(BLK_RW_ASYNC, HZ/10);
3788
		}
3789 3790

	}
K
KAMEZAWA Hiroyuki 已提交
3791
	lru_add_drain();
3792
	/* try move_account...there may be some *locked* pages. */
3793
	goto move_account;
3794 3795
}

3796 3797 3798 3799 3800 3801
int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event)
{
	return mem_cgroup_force_empty(mem_cgroup_from_cont(cont), true);
}


3802 3803 3804 3805 3806 3807 3808 3809 3810
static u64 mem_cgroup_hierarchy_read(struct cgroup *cont, struct cftype *cft)
{
	return mem_cgroup_from_cont(cont)->use_hierarchy;
}

static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft,
					u64 val)
{
	int retval = 0;
3811
	struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
3812
	struct cgroup *parent = cont->parent;
3813
	struct mem_cgroup *parent_memcg = NULL;
3814 3815

	if (parent)
3816
		parent_memcg = mem_cgroup_from_cont(parent);
3817 3818 3819

	cgroup_lock();
	/*
3820
	 * If parent's use_hierarchy is set, we can't make any modifications
3821 3822 3823 3824 3825 3826
	 * in the child subtrees. If it is unset, then the change can
	 * occur, provided the current cgroup has no children.
	 *
	 * For the root cgroup, parent_mem is NULL, we allow value to be
	 * set if there are no children.
	 */
3827
	if ((!parent_memcg || !parent_memcg->use_hierarchy) &&
3828 3829
				(val == 1 || val == 0)) {
		if (list_empty(&cont->children))
3830
			memcg->use_hierarchy = val;
3831 3832 3833 3834 3835 3836 3837 3838 3839
		else
			retval = -EBUSY;
	} else
		retval = -EINVAL;
	cgroup_unlock();

	return retval;
}

3840

3841
static unsigned long mem_cgroup_recursive_stat(struct mem_cgroup *memcg,
3842
					       enum mem_cgroup_stat_index idx)
3843
{
K
KAMEZAWA Hiroyuki 已提交
3844
	struct mem_cgroup *iter;
3845
	long val = 0;
3846

3847
	/* Per-cpu values can be negative, use a signed accumulator */
3848
	for_each_mem_cgroup_tree(iter, memcg)
K
KAMEZAWA Hiroyuki 已提交
3849 3850 3851 3852 3853
		val += mem_cgroup_read_stat(iter, idx);

	if (val < 0) /* race ? */
		val = 0;
	return val;
3854 3855
}

3856
static inline u64 mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
3857
{
K
KAMEZAWA Hiroyuki 已提交
3858
	u64 val;
3859

3860
	if (!mem_cgroup_is_root(memcg)) {
3861
		if (!swap)
3862
			return res_counter_read_u64(&memcg->res, RES_USAGE);
3863
		else
3864
			return res_counter_read_u64(&memcg->memsw, RES_USAGE);
3865 3866
	}

3867 3868
	val = mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_CACHE);
	val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_RSS);
3869

K
KAMEZAWA Hiroyuki 已提交
3870
	if (swap)
3871
		val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_SWAPOUT);
3872 3873 3874 3875

	return val << PAGE_SHIFT;
}

3876
static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft)
B
Balbir Singh 已提交
3877
{
3878
	struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
3879
	u64 val;
3880 3881 3882 3883 3884 3885
	int type, name;

	type = MEMFILE_TYPE(cft->private);
	name = MEMFILE_ATTR(cft->private);
	switch (type) {
	case _MEM:
3886
		if (name == RES_USAGE)
3887
			val = mem_cgroup_usage(memcg, false);
3888
		else
3889
			val = res_counter_read_u64(&memcg->res, name);
3890 3891
		break;
	case _MEMSWAP:
3892
		if (name == RES_USAGE)
3893
			val = mem_cgroup_usage(memcg, true);
3894
		else
3895
			val = res_counter_read_u64(&memcg->memsw, name);
3896 3897 3898 3899 3900
		break;
	default:
		BUG();
	}
	return val;
B
Balbir Singh 已提交
3901
}
3902 3903 3904 3905
/*
 * The user of this function is...
 * RES_LIMIT.
 */
3906 3907
static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
			    const char *buffer)
B
Balbir Singh 已提交
3908
{
3909
	struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
3910
	int type, name;
3911 3912 3913
	unsigned long long val;
	int ret;

3914 3915 3916
	type = MEMFILE_TYPE(cft->private);
	name = MEMFILE_ATTR(cft->private);
	switch (name) {
3917
	case RES_LIMIT:
3918 3919 3920 3921
		if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
			ret = -EINVAL;
			break;
		}
3922 3923
		/* This function does all necessary parse...reuse it */
		ret = res_counter_memparse_write_strategy(buffer, &val);
3924 3925 3926
		if (ret)
			break;
		if (type == _MEM)
3927
			ret = mem_cgroup_resize_limit(memcg, val);
3928 3929
		else
			ret = mem_cgroup_resize_memsw_limit(memcg, val);
3930
		break;
3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944
	case RES_SOFT_LIMIT:
		ret = res_counter_memparse_write_strategy(buffer, &val);
		if (ret)
			break;
		/*
		 * For memsw, soft limits are hard to implement in terms
		 * of semantics, for now, we support soft limits for
		 * control without swap
		 */
		if (type == _MEM)
			ret = res_counter_set_soft_limit(&memcg->res, val);
		else
			ret = -EINVAL;
		break;
3945 3946 3947 3948 3949
	default:
		ret = -EINVAL; /* should be BUG() ? */
		break;
	}
	return ret;
B
Balbir Singh 已提交
3950 3951
}

3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978
static void memcg_get_hierarchical_limit(struct mem_cgroup *memcg,
		unsigned long long *mem_limit, unsigned long long *memsw_limit)
{
	struct cgroup *cgroup;
	unsigned long long min_limit, min_memsw_limit, tmp;

	min_limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
	min_memsw_limit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
	cgroup = memcg->css.cgroup;
	if (!memcg->use_hierarchy)
		goto out;

	while (cgroup->parent) {
		cgroup = cgroup->parent;
		memcg = mem_cgroup_from_cont(cgroup);
		if (!memcg->use_hierarchy)
			break;
		tmp = res_counter_read_u64(&memcg->res, RES_LIMIT);
		min_limit = min(min_limit, tmp);
		tmp = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
		min_memsw_limit = min(min_memsw_limit, tmp);
	}
out:
	*mem_limit = min_limit;
	*memsw_limit = min_memsw_limit;
}

3979
static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
3980
{
3981
	struct mem_cgroup *memcg;
3982
	int type, name;
3983

3984
	memcg = mem_cgroup_from_cont(cont);
3985 3986 3987
	type = MEMFILE_TYPE(event);
	name = MEMFILE_ATTR(event);
	switch (name) {
3988
	case RES_MAX_USAGE:
3989
		if (type == _MEM)
3990
			res_counter_reset_max(&memcg->res);
3991
		else
3992
			res_counter_reset_max(&memcg->memsw);
3993 3994
		break;
	case RES_FAILCNT:
3995
		if (type == _MEM)
3996
			res_counter_reset_failcnt(&memcg->res);
3997
		else
3998
			res_counter_reset_failcnt(&memcg->memsw);
3999 4000
		break;
	}
4001

4002
	return 0;
4003 4004
}

4005 4006 4007 4008 4009 4010
static u64 mem_cgroup_move_charge_read(struct cgroup *cgrp,
					struct cftype *cft)
{
	return mem_cgroup_from_cont(cgrp)->move_charge_at_immigrate;
}

4011
#ifdef CONFIG_MMU
4012 4013 4014
static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
					struct cftype *cft, u64 val)
{
4015
	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
4016 4017 4018 4019 4020 4021 4022 4023 4024

	if (val >= (1 << NR_MOVE_TYPE))
		return -EINVAL;
	/*
	 * We check this value several times in both in can_attach() and
	 * attach(), so we need cgroup lock to prevent this value from being
	 * inconsistent.
	 */
	cgroup_lock();
4025
	memcg->move_charge_at_immigrate = val;
4026 4027 4028 4029
	cgroup_unlock();

	return 0;
}
4030 4031 4032 4033 4034 4035 4036
#else
static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
					struct cftype *cft, u64 val)
{
	return -ENOSYS;
}
#endif
4037

K
KAMEZAWA Hiroyuki 已提交
4038 4039 4040 4041 4042

/* For read statistics */
enum {
	MCS_CACHE,
	MCS_RSS,
4043
	MCS_FILE_MAPPED,
K
KAMEZAWA Hiroyuki 已提交
4044 4045
	MCS_PGPGIN,
	MCS_PGPGOUT,
4046
	MCS_SWAP,
4047 4048
	MCS_PGFAULT,
	MCS_PGMAJFAULT,
K
KAMEZAWA Hiroyuki 已提交
4049 4050 4051 4052 4053 4054 4055 4056 4057 4058
	MCS_INACTIVE_ANON,
	MCS_ACTIVE_ANON,
	MCS_INACTIVE_FILE,
	MCS_ACTIVE_FILE,
	MCS_UNEVICTABLE,
	NR_MCS_STAT,
};

struct mcs_total_stat {
	s64 stat[NR_MCS_STAT];
4059 4060
};

K
KAMEZAWA Hiroyuki 已提交
4061 4062 4063 4064 4065 4066
struct {
	char *local_name;
	char *total_name;
} memcg_stat_strings[NR_MCS_STAT] = {
	{"cache", "total_cache"},
	{"rss", "total_rss"},
4067
	{"mapped_file", "total_mapped_file"},
K
KAMEZAWA Hiroyuki 已提交
4068 4069
	{"pgpgin", "total_pgpgin"},
	{"pgpgout", "total_pgpgout"},
4070
	{"swap", "total_swap"},
4071 4072
	{"pgfault", "total_pgfault"},
	{"pgmajfault", "total_pgmajfault"},
K
KAMEZAWA Hiroyuki 已提交
4073 4074 4075 4076 4077 4078 4079 4080
	{"inactive_anon", "total_inactive_anon"},
	{"active_anon", "total_active_anon"},
	{"inactive_file", "total_inactive_file"},
	{"active_file", "total_active_file"},
	{"unevictable", "total_unevictable"}
};


K
KAMEZAWA Hiroyuki 已提交
4081
static void
4082
mem_cgroup_get_local_stat(struct mem_cgroup *memcg, struct mcs_total_stat *s)
K
KAMEZAWA Hiroyuki 已提交
4083 4084 4085 4086
{
	s64 val;

	/* per cpu stat */
4087
	val = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_CACHE);
K
KAMEZAWA Hiroyuki 已提交
4088
	s->stat[MCS_CACHE] += val * PAGE_SIZE;
4089
	val = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_RSS);
K
KAMEZAWA Hiroyuki 已提交
4090
	s->stat[MCS_RSS] += val * PAGE_SIZE;
4091
	val = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_FILE_MAPPED);
4092
	s->stat[MCS_FILE_MAPPED] += val * PAGE_SIZE;
4093
	val = mem_cgroup_read_events(memcg, MEM_CGROUP_EVENTS_PGPGIN);
K
KAMEZAWA Hiroyuki 已提交
4094
	s->stat[MCS_PGPGIN] += val;
4095
	val = mem_cgroup_read_events(memcg, MEM_CGROUP_EVENTS_PGPGOUT);
K
KAMEZAWA Hiroyuki 已提交
4096
	s->stat[MCS_PGPGOUT] += val;
4097
	if (do_swap_account) {
4098
		val = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_SWAPOUT);
4099 4100
		s->stat[MCS_SWAP] += val * PAGE_SIZE;
	}
4101
	val = mem_cgroup_read_events(memcg, MEM_CGROUP_EVENTS_PGFAULT);
4102
	s->stat[MCS_PGFAULT] += val;
4103
	val = mem_cgroup_read_events(memcg, MEM_CGROUP_EVENTS_PGMAJFAULT);
4104
	s->stat[MCS_PGMAJFAULT] += val;
K
KAMEZAWA Hiroyuki 已提交
4105 4106

	/* per zone stat */
4107
	val = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_INACTIVE_ANON));
K
KAMEZAWA Hiroyuki 已提交
4108
	s->stat[MCS_INACTIVE_ANON] += val * PAGE_SIZE;
4109
	val = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_ACTIVE_ANON));
K
KAMEZAWA Hiroyuki 已提交
4110
	s->stat[MCS_ACTIVE_ANON] += val * PAGE_SIZE;
4111
	val = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_INACTIVE_FILE));
K
KAMEZAWA Hiroyuki 已提交
4112
	s->stat[MCS_INACTIVE_FILE] += val * PAGE_SIZE;
4113
	val = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_ACTIVE_FILE));
K
KAMEZAWA Hiroyuki 已提交
4114
	s->stat[MCS_ACTIVE_FILE] += val * PAGE_SIZE;
4115
	val = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_UNEVICTABLE));
K
KAMEZAWA Hiroyuki 已提交
4116 4117 4118 4119
	s->stat[MCS_UNEVICTABLE] += val * PAGE_SIZE;
}

static void
4120
mem_cgroup_get_total_stat(struct mem_cgroup *memcg, struct mcs_total_stat *s)
K
KAMEZAWA Hiroyuki 已提交
4121
{
K
KAMEZAWA Hiroyuki 已提交
4122 4123
	struct mem_cgroup *iter;

4124
	for_each_mem_cgroup_tree(iter, memcg)
K
KAMEZAWA Hiroyuki 已提交
4125
		mem_cgroup_get_local_stat(iter, s);
K
KAMEZAWA Hiroyuki 已提交
4126 4127
}

4128 4129 4130 4131 4132 4133 4134
#ifdef CONFIG_NUMA
static int mem_control_numa_stat_show(struct seq_file *m, void *arg)
{
	int nid;
	unsigned long total_nr, file_nr, anon_nr, unevictable_nr;
	unsigned long node_nr;
	struct cgroup *cont = m->private;
4135
	struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
4136

4137
	total_nr = mem_cgroup_nr_lru_pages(memcg, LRU_ALL);
4138 4139
	seq_printf(m, "total=%lu", total_nr);
	for_each_node_state(nid, N_HIGH_MEMORY) {
4140
		node_nr = mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL);
4141 4142 4143 4144
		seq_printf(m, " N%d=%lu", nid, node_nr);
	}
	seq_putc(m, '\n');

4145
	file_nr = mem_cgroup_nr_lru_pages(memcg, LRU_ALL_FILE);
4146 4147
	seq_printf(m, "file=%lu", file_nr);
	for_each_node_state(nid, N_HIGH_MEMORY) {
4148
		node_nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
4149
				LRU_ALL_FILE);
4150 4151 4152 4153
		seq_printf(m, " N%d=%lu", nid, node_nr);
	}
	seq_putc(m, '\n');

4154
	anon_nr = mem_cgroup_nr_lru_pages(memcg, LRU_ALL_ANON);
4155 4156
	seq_printf(m, "anon=%lu", anon_nr);
	for_each_node_state(nid, N_HIGH_MEMORY) {
4157
		node_nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
4158
				LRU_ALL_ANON);
4159 4160 4161 4162
		seq_printf(m, " N%d=%lu", nid, node_nr);
	}
	seq_putc(m, '\n');

4163
	unevictable_nr = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_UNEVICTABLE));
4164 4165
	seq_printf(m, "unevictable=%lu", unevictable_nr);
	for_each_node_state(nid, N_HIGH_MEMORY) {
4166
		node_nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
4167
				BIT(LRU_UNEVICTABLE));
4168 4169 4170 4171 4172 4173 4174
		seq_printf(m, " N%d=%lu", nid, node_nr);
	}
	seq_putc(m, '\n');
	return 0;
}
#endif /* CONFIG_NUMA */

4175 4176
static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
				 struct cgroup_map_cb *cb)
4177
{
4178
	struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
K
KAMEZAWA Hiroyuki 已提交
4179
	struct mcs_total_stat mystat;
4180 4181
	int i;

K
KAMEZAWA Hiroyuki 已提交
4182
	memset(&mystat, 0, sizeof(mystat));
4183
	mem_cgroup_get_local_stat(memcg, &mystat);
4184

4185

4186 4187 4188
	for (i = 0; i < NR_MCS_STAT; i++) {
		if (i == MCS_SWAP && !do_swap_account)
			continue;
K
KAMEZAWA Hiroyuki 已提交
4189
		cb->fill(cb, memcg_stat_strings[i].local_name, mystat.stat[i]);
4190
	}
L
Lee Schermerhorn 已提交
4191

K
KAMEZAWA Hiroyuki 已提交
4192
	/* Hierarchical information */
4193 4194
	{
		unsigned long long limit, memsw_limit;
4195
		memcg_get_hierarchical_limit(memcg, &limit, &memsw_limit);
4196 4197 4198 4199
		cb->fill(cb, "hierarchical_memory_limit", limit);
		if (do_swap_account)
			cb->fill(cb, "hierarchical_memsw_limit", memsw_limit);
	}
K
KOSAKI Motohiro 已提交
4200

K
KAMEZAWA Hiroyuki 已提交
4201
	memset(&mystat, 0, sizeof(mystat));
4202
	mem_cgroup_get_total_stat(memcg, &mystat);
4203 4204 4205
	for (i = 0; i < NR_MCS_STAT; i++) {
		if (i == MCS_SWAP && !do_swap_account)
			continue;
K
KAMEZAWA Hiroyuki 已提交
4206
		cb->fill(cb, memcg_stat_strings[i].total_name, mystat.stat[i]);
4207
	}
K
KAMEZAWA Hiroyuki 已提交
4208

K
KOSAKI Motohiro 已提交
4209 4210 4211 4212 4213 4214 4215 4216 4217
#ifdef CONFIG_DEBUG_VM
	{
		int nid, zid;
		struct mem_cgroup_per_zone *mz;
		unsigned long recent_rotated[2] = {0, 0};
		unsigned long recent_scanned[2] = {0, 0};

		for_each_online_node(nid)
			for (zid = 0; zid < MAX_NR_ZONES; zid++) {
4218
				mz = mem_cgroup_zoneinfo(memcg, nid, zid);
K
KOSAKI Motohiro 已提交
4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235

				recent_rotated[0] +=
					mz->reclaim_stat.recent_rotated[0];
				recent_rotated[1] +=
					mz->reclaim_stat.recent_rotated[1];
				recent_scanned[0] +=
					mz->reclaim_stat.recent_scanned[0];
				recent_scanned[1] +=
					mz->reclaim_stat.recent_scanned[1];
			}
		cb->fill(cb, "recent_rotated_anon", recent_rotated[0]);
		cb->fill(cb, "recent_rotated_file", recent_rotated[1]);
		cb->fill(cb, "recent_scanned_anon", recent_scanned[0]);
		cb->fill(cb, "recent_scanned_file", recent_scanned[1]);
	}
#endif

4236 4237 4238
	return 0;
}

K
KOSAKI Motohiro 已提交
4239 4240 4241 4242
static u64 mem_cgroup_swappiness_read(struct cgroup *cgrp, struct cftype *cft)
{
	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);

4243
	return mem_cgroup_swappiness(memcg);
K
KOSAKI Motohiro 已提交
4244 4245 4246 4247 4248 4249 4250
}

static int mem_cgroup_swappiness_write(struct cgroup *cgrp, struct cftype *cft,
				       u64 val)
{
	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
	struct mem_cgroup *parent;
4251

K
KOSAKI Motohiro 已提交
4252 4253 4254 4255 4256 4257 4258
	if (val > 100)
		return -EINVAL;

	if (cgrp->parent == NULL)
		return -EINVAL;

	parent = mem_cgroup_from_cont(cgrp->parent);
4259 4260 4261

	cgroup_lock();

K
KOSAKI Motohiro 已提交
4262 4263
	/* If under hierarchy, only empty-root can set this value */
	if ((parent->use_hierarchy) ||
4264 4265
	    (memcg->use_hierarchy && !list_empty(&cgrp->children))) {
		cgroup_unlock();
K
KOSAKI Motohiro 已提交
4266
		return -EINVAL;
4267
	}
K
KOSAKI Motohiro 已提交
4268 4269 4270

	memcg->swappiness = val;

4271 4272
	cgroup_unlock();

K
KOSAKI Motohiro 已提交
4273 4274 4275
	return 0;
}

4276 4277 4278 4279 4280 4281 4282 4283
static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
{
	struct mem_cgroup_threshold_ary *t;
	u64 usage;
	int i;

	rcu_read_lock();
	if (!swap)
4284
		t = rcu_dereference(memcg->thresholds.primary);
4285
	else
4286
		t = rcu_dereference(memcg->memsw_thresholds.primary);
4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297

	if (!t)
		goto unlock;

	usage = mem_cgroup_usage(memcg, swap);

	/*
	 * current_threshold points to threshold just below usage.
	 * If it's not true, a threshold was crossed after last
	 * call of __mem_cgroup_threshold().
	 */
4298
	i = t->current_threshold;
4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321

	/*
	 * Iterate backward over array of thresholds starting from
	 * current_threshold and check if a threshold is crossed.
	 * If none of thresholds below usage is crossed, we read
	 * only one element of the array here.
	 */
	for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
		eventfd_signal(t->entries[i].eventfd, 1);

	/* i = current_threshold + 1 */
	i++;

	/*
	 * Iterate forward over array of thresholds starting from
	 * current_threshold+1 and check if a threshold is crossed.
	 * If none of thresholds above usage is crossed, we read
	 * only one element of the array here.
	 */
	for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
		eventfd_signal(t->entries[i].eventfd, 1);

	/* Update current_threshold */
4322
	t->current_threshold = i - 1;
4323 4324 4325 4326 4327 4328
unlock:
	rcu_read_unlock();
}

static void mem_cgroup_threshold(struct mem_cgroup *memcg)
{
4329 4330 4331 4332 4333 4334 4335
	while (memcg) {
		__mem_cgroup_threshold(memcg, false);
		if (do_swap_account)
			__mem_cgroup_threshold(memcg, true);

		memcg = parent_mem_cgroup(memcg);
	}
4336 4337 4338 4339 4340 4341 4342 4343 4344 4345
}

static int compare_thresholds(const void *a, const void *b)
{
	const struct mem_cgroup_threshold *_a = a;
	const struct mem_cgroup_threshold *_b = b;

	return _a->threshold - _b->threshold;
}

4346
static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
K
KAMEZAWA Hiroyuki 已提交
4347 4348 4349
{
	struct mem_cgroup_eventfd_list *ev;

4350
	list_for_each_entry(ev, &memcg->oom_notify, list)
K
KAMEZAWA Hiroyuki 已提交
4351 4352 4353 4354
		eventfd_signal(ev->eventfd, 1);
	return 0;
}

4355
static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
K
KAMEZAWA Hiroyuki 已提交
4356
{
K
KAMEZAWA Hiroyuki 已提交
4357 4358
	struct mem_cgroup *iter;

4359
	for_each_mem_cgroup_tree(iter, memcg)
K
KAMEZAWA Hiroyuki 已提交
4360
		mem_cgroup_oom_notify_cb(iter);
K
KAMEZAWA Hiroyuki 已提交
4361 4362 4363 4364
}

static int mem_cgroup_usage_register_event(struct cgroup *cgrp,
	struct cftype *cft, struct eventfd_ctx *eventfd, const char *args)
4365 4366
{
	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
4367 4368
	struct mem_cgroup_thresholds *thresholds;
	struct mem_cgroup_threshold_ary *new;
4369 4370
	int type = MEMFILE_TYPE(cft->private);
	u64 threshold, usage;
4371
	int i, size, ret;
4372 4373 4374 4375 4376 4377

	ret = res_counter_memparse_write_strategy(args, &threshold);
	if (ret)
		return ret;

	mutex_lock(&memcg->thresholds_lock);
4378

4379
	if (type == _MEM)
4380
		thresholds = &memcg->thresholds;
4381
	else if (type == _MEMSWAP)
4382
		thresholds = &memcg->memsw_thresholds;
4383 4384 4385 4386 4387 4388
	else
		BUG();

	usage = mem_cgroup_usage(memcg, type == _MEMSWAP);

	/* Check if a threshold crossed before adding a new one */
4389
	if (thresholds->primary)
4390 4391
		__mem_cgroup_threshold(memcg, type == _MEMSWAP);

4392
	size = thresholds->primary ? thresholds->primary->size + 1 : 1;
4393 4394

	/* Allocate memory for new array of thresholds */
4395
	new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold),
4396
			GFP_KERNEL);
4397
	if (!new) {
4398 4399 4400
		ret = -ENOMEM;
		goto unlock;
	}
4401
	new->size = size;
4402 4403

	/* Copy thresholds (if any) to new array */
4404 4405
	if (thresholds->primary) {
		memcpy(new->entries, thresholds->primary->entries, (size - 1) *
4406
				sizeof(struct mem_cgroup_threshold));
4407 4408
	}

4409
	/* Add new threshold */
4410 4411
	new->entries[size - 1].eventfd = eventfd;
	new->entries[size - 1].threshold = threshold;
4412 4413

	/* Sort thresholds. Registering of new threshold isn't time-critical */
4414
	sort(new->entries, size, sizeof(struct mem_cgroup_threshold),
4415 4416 4417
			compare_thresholds, NULL);

	/* Find current threshold */
4418
	new->current_threshold = -1;
4419
	for (i = 0; i < size; i++) {
4420
		if (new->entries[i].threshold < usage) {
4421
			/*
4422 4423
			 * new->current_threshold will not be used until
			 * rcu_assign_pointer(), so it's safe to increment
4424 4425
			 * it here.
			 */
4426
			++new->current_threshold;
4427 4428 4429
		}
	}

4430 4431 4432 4433 4434
	/* Free old spare buffer and save old primary buffer as spare */
	kfree(thresholds->spare);
	thresholds->spare = thresholds->primary;

	rcu_assign_pointer(thresholds->primary, new);
4435

4436
	/* To be sure that nobody uses thresholds */
4437 4438 4439 4440 4441 4442 4443 4444
	synchronize_rcu();

unlock:
	mutex_unlock(&memcg->thresholds_lock);

	return ret;
}

4445
static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
K
KAMEZAWA Hiroyuki 已提交
4446
	struct cftype *cft, struct eventfd_ctx *eventfd)
4447 4448
{
	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
4449 4450
	struct mem_cgroup_thresholds *thresholds;
	struct mem_cgroup_threshold_ary *new;
4451 4452
	int type = MEMFILE_TYPE(cft->private);
	u64 usage;
4453
	int i, j, size;
4454 4455 4456

	mutex_lock(&memcg->thresholds_lock);
	if (type == _MEM)
4457
		thresholds = &memcg->thresholds;
4458
	else if (type == _MEMSWAP)
4459
		thresholds = &memcg->memsw_thresholds;
4460 4461 4462
	else
		BUG();

4463 4464 4465
	if (!thresholds->primary)
		goto unlock;

4466 4467 4468 4469 4470 4471
	usage = mem_cgroup_usage(memcg, type == _MEMSWAP);

	/* Check if a threshold crossed before removing */
	__mem_cgroup_threshold(memcg, type == _MEMSWAP);

	/* Calculate new number of threshold */
4472 4473 4474
	size = 0;
	for (i = 0; i < thresholds->primary->size; i++) {
		if (thresholds->primary->entries[i].eventfd != eventfd)
4475 4476 4477
			size++;
	}

4478
	new = thresholds->spare;
4479

4480 4481
	/* Set thresholds array to NULL if we don't have thresholds */
	if (!size) {
4482 4483
		kfree(new);
		new = NULL;
4484
		goto swap_buffers;
4485 4486
	}

4487
	new->size = size;
4488 4489

	/* Copy thresholds and find current threshold */
4490 4491 4492
	new->current_threshold = -1;
	for (i = 0, j = 0; i < thresholds->primary->size; i++) {
		if (thresholds->primary->entries[i].eventfd == eventfd)
4493 4494
			continue;

4495 4496
		new->entries[j] = thresholds->primary->entries[i];
		if (new->entries[j].threshold < usage) {
4497
			/*
4498
			 * new->current_threshold will not be used
4499 4500 4501
			 * until rcu_assign_pointer(), so it's safe to increment
			 * it here.
			 */
4502
			++new->current_threshold;
4503 4504 4505 4506
		}
		j++;
	}

4507
swap_buffers:
4508 4509
	/* Swap primary and spare array */
	thresholds->spare = thresholds->primary;
4510 4511 4512 4513 4514 4515
	/* If all events are unregistered, free the spare array */
	if (!new) {
		kfree(thresholds->spare);
		thresholds->spare = NULL;
	}

4516
	rcu_assign_pointer(thresholds->primary, new);
4517

4518
	/* To be sure that nobody uses thresholds */
4519
	synchronize_rcu();
4520
unlock:
4521 4522
	mutex_unlock(&memcg->thresholds_lock);
}
4523

K
KAMEZAWA Hiroyuki 已提交
4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535
static int mem_cgroup_oom_register_event(struct cgroup *cgrp,
	struct cftype *cft, struct eventfd_ctx *eventfd, const char *args)
{
	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
	struct mem_cgroup_eventfd_list *event;
	int type = MEMFILE_TYPE(cft->private);

	BUG_ON(type != _OOM_TYPE);
	event = kmalloc(sizeof(*event),	GFP_KERNEL);
	if (!event)
		return -ENOMEM;

4536
	spin_lock(&memcg_oom_lock);
K
KAMEZAWA Hiroyuki 已提交
4537 4538 4539 4540 4541

	event->eventfd = eventfd;
	list_add(&event->list, &memcg->oom_notify);

	/* already in OOM ? */
4542
	if (atomic_read(&memcg->under_oom))
K
KAMEZAWA Hiroyuki 已提交
4543
		eventfd_signal(eventfd, 1);
4544
	spin_unlock(&memcg_oom_lock);
K
KAMEZAWA Hiroyuki 已提交
4545 4546 4547 4548

	return 0;
}

4549
static void mem_cgroup_oom_unregister_event(struct cgroup *cgrp,
K
KAMEZAWA Hiroyuki 已提交
4550 4551
	struct cftype *cft, struct eventfd_ctx *eventfd)
{
4552
	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
K
KAMEZAWA Hiroyuki 已提交
4553 4554 4555 4556 4557
	struct mem_cgroup_eventfd_list *ev, *tmp;
	int type = MEMFILE_TYPE(cft->private);

	BUG_ON(type != _OOM_TYPE);

4558
	spin_lock(&memcg_oom_lock);
K
KAMEZAWA Hiroyuki 已提交
4559

4560
	list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
K
KAMEZAWA Hiroyuki 已提交
4561 4562 4563 4564 4565 4566
		if (ev->eventfd == eventfd) {
			list_del(&ev->list);
			kfree(ev);
		}
	}

4567
	spin_unlock(&memcg_oom_lock);
K
KAMEZAWA Hiroyuki 已提交
4568 4569
}

4570 4571 4572
static int mem_cgroup_oom_control_read(struct cgroup *cgrp,
	struct cftype *cft,  struct cgroup_map_cb *cb)
{
4573
	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
4574

4575
	cb->fill(cb, "oom_kill_disable", memcg->oom_kill_disable);
4576

4577
	if (atomic_read(&memcg->under_oom))
4578 4579 4580 4581 4582 4583 4584 4585 4586
		cb->fill(cb, "under_oom", 1);
	else
		cb->fill(cb, "under_oom", 0);
	return 0;
}

static int mem_cgroup_oom_control_write(struct cgroup *cgrp,
	struct cftype *cft, u64 val)
{
4587
	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598
	struct mem_cgroup *parent;

	/* cannot set to root cgroup and only 0 and 1 are allowed */
	if (!cgrp->parent || !((val == 0) || (val == 1)))
		return -EINVAL;

	parent = mem_cgroup_from_cont(cgrp->parent);

	cgroup_lock();
	/* oom-kill-disable is a flag for subhierarchy. */
	if ((parent->use_hierarchy) ||
4599
	    (memcg->use_hierarchy && !list_empty(&cgrp->children))) {
4600 4601 4602
		cgroup_unlock();
		return -EINVAL;
	}
4603
	memcg->oom_kill_disable = val;
4604
	if (!val)
4605
		memcg_oom_recover(memcg);
4606 4607 4608 4609
	cgroup_unlock();
	return 0;
}

4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625
#ifdef CONFIG_NUMA
static const struct file_operations mem_control_numa_stat_file_operations = {
	.read = seq_read,
	.llseek = seq_lseek,
	.release = single_release,
};

static int mem_control_numa_stat_open(struct inode *unused, struct file *file)
{
	struct cgroup *cont = file->f_dentry->d_parent->d_fsdata;

	file->f_op = &mem_control_numa_stat_file_operations;
	return single_open(file, mem_control_numa_stat_show, cont);
}
#endif /* CONFIG_NUMA */

4626 4627 4628
#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
static int register_kmem_files(struct cgroup *cont, struct cgroup_subsys *ss)
{
G
Glauber Costa 已提交
4629 4630 4631 4632 4633 4634 4635
	/*
	 * Part of this would be better living in a separate allocation
	 * function, leaving us with just the cgroup tree population work.
	 * We, however, depend on state such as network's proto_list that
	 * is only initialized after cgroup creation. I found the less
	 * cumbersome way to deal with it to defer it all to populate time
	 */
4636
	return mem_cgroup_sockets_init(cont, ss);
4637 4638
};

4639
static void kmem_cgroup_destroy(struct cgroup *cont)
G
Glauber Costa 已提交
4640
{
4641
	mem_cgroup_sockets_destroy(cont);
G
Glauber Costa 已提交
4642
}
4643 4644 4645 4646 4647
#else
static int register_kmem_files(struct cgroup *cont, struct cgroup_subsys *ss)
{
	return 0;
}
G
Glauber Costa 已提交
4648

4649
static void kmem_cgroup_destroy(struct cgroup *cont)
G
Glauber Costa 已提交
4650 4651
{
}
4652 4653
#endif

B
Balbir Singh 已提交
4654 4655
static struct cftype mem_cgroup_files[] = {
	{
4656
		.name = "usage_in_bytes",
4657
		.private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
4658
		.read_u64 = mem_cgroup_read,
K
KAMEZAWA Hiroyuki 已提交
4659 4660
		.register_event = mem_cgroup_usage_register_event,
		.unregister_event = mem_cgroup_usage_unregister_event,
B
Balbir Singh 已提交
4661
	},
4662 4663
	{
		.name = "max_usage_in_bytes",
4664
		.private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
4665
		.trigger = mem_cgroup_reset,
4666 4667
		.read_u64 = mem_cgroup_read,
	},
B
Balbir Singh 已提交
4668
	{
4669
		.name = "limit_in_bytes",
4670
		.private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
4671
		.write_string = mem_cgroup_write,
4672
		.read_u64 = mem_cgroup_read,
B
Balbir Singh 已提交
4673
	},
4674 4675 4676 4677 4678 4679
	{
		.name = "soft_limit_in_bytes",
		.private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
		.write_string = mem_cgroup_write,
		.read_u64 = mem_cgroup_read,
	},
B
Balbir Singh 已提交
4680 4681
	{
		.name = "failcnt",
4682
		.private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
4683
		.trigger = mem_cgroup_reset,
4684
		.read_u64 = mem_cgroup_read,
B
Balbir Singh 已提交
4685
	},
4686 4687
	{
		.name = "stat",
4688
		.read_map = mem_control_stat_show,
4689
	},
4690 4691 4692 4693
	{
		.name = "force_empty",
		.trigger = mem_cgroup_force_empty_write,
	},
4694 4695 4696 4697 4698
	{
		.name = "use_hierarchy",
		.write_u64 = mem_cgroup_hierarchy_write,
		.read_u64 = mem_cgroup_hierarchy_read,
	},
K
KOSAKI Motohiro 已提交
4699 4700 4701 4702 4703
	{
		.name = "swappiness",
		.read_u64 = mem_cgroup_swappiness_read,
		.write_u64 = mem_cgroup_swappiness_write,
	},
4704 4705 4706 4707 4708
	{
		.name = "move_charge_at_immigrate",
		.read_u64 = mem_cgroup_move_charge_read,
		.write_u64 = mem_cgroup_move_charge_write,
	},
K
KAMEZAWA Hiroyuki 已提交
4709 4710
	{
		.name = "oom_control",
4711 4712
		.read_map = mem_cgroup_oom_control_read,
		.write_u64 = mem_cgroup_oom_control_write,
K
KAMEZAWA Hiroyuki 已提交
4713 4714 4715 4716
		.register_event = mem_cgroup_oom_register_event,
		.unregister_event = mem_cgroup_oom_unregister_event,
		.private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
	},
4717 4718 4719 4720
#ifdef CONFIG_NUMA
	{
		.name = "numa_stat",
		.open = mem_control_numa_stat_open,
4721
		.mode = S_IRUGO,
4722 4723
	},
#endif
B
Balbir Singh 已提交
4724 4725
};

4726 4727 4728 4729 4730 4731
#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
static struct cftype memsw_cgroup_files[] = {
	{
		.name = "memsw.usage_in_bytes",
		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
		.read_u64 = mem_cgroup_read,
K
KAMEZAWA Hiroyuki 已提交
4732 4733
		.register_event = mem_cgroup_usage_register_event,
		.unregister_event = mem_cgroup_usage_unregister_event,
4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768
	},
	{
		.name = "memsw.max_usage_in_bytes",
		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
		.trigger = mem_cgroup_reset,
		.read_u64 = mem_cgroup_read,
	},
	{
		.name = "memsw.limit_in_bytes",
		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
		.write_string = mem_cgroup_write,
		.read_u64 = mem_cgroup_read,
	},
	{
		.name = "memsw.failcnt",
		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
		.trigger = mem_cgroup_reset,
		.read_u64 = mem_cgroup_read,
	},
};

static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
{
	if (!do_swap_account)
		return 0;
	return cgroup_add_files(cont, ss, memsw_cgroup_files,
				ARRAY_SIZE(memsw_cgroup_files));
};
#else
static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
{
	return 0;
}
#endif

4769
static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
4770 4771
{
	struct mem_cgroup_per_node *pn;
4772
	struct mem_cgroup_per_zone *mz;
H
Hugh Dickins 已提交
4773
	enum lru_list lru;
4774
	int zone, tmp = node;
4775 4776 4777 4778 4779 4780 4781 4782
	/*
	 * This routine is called against possible nodes.
	 * But it's BUG to call kmalloc() against offline node.
	 *
	 * TODO: this routine can waste much memory for nodes which will
	 *       never be onlined. It's better to use memory hotplug callback
	 *       function.
	 */
4783 4784
	if (!node_state(node, N_NORMAL_MEMORY))
		tmp = -1;
4785
	pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
4786 4787
	if (!pn)
		return 1;
4788 4789 4790

	for (zone = 0; zone < MAX_NR_ZONES; zone++) {
		mz = &pn->zoneinfo[zone];
H
Hugh Dickins 已提交
4791 4792
		for_each_lru(lru)
			INIT_LIST_HEAD(&mz->lruvec.lists[lru]);
4793
		mz->usage_in_excess = 0;
4794
		mz->on_tree = false;
4795
		mz->memcg = memcg;
4796
	}
4797
	memcg->info.nodeinfo[node] = pn;
4798 4799 4800
	return 0;
}

4801
static void free_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
4802
{
4803
	kfree(memcg->info.nodeinfo[node]);
4804 4805
}

4806 4807
static struct mem_cgroup *mem_cgroup_alloc(void)
{
4808
	struct mem_cgroup *memcg;
4809
	int size = sizeof(struct mem_cgroup);
4810

4811
	/* Can be very big if MAX_NUMNODES is very big */
4812
	if (size < PAGE_SIZE)
4813
		memcg = kzalloc(size, GFP_KERNEL);
4814
	else
4815
		memcg = vzalloc(size);
4816

4817
	if (!memcg)
4818 4819
		return NULL;

4820 4821
	memcg->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
	if (!memcg->stat)
4822
		goto out_free;
4823 4824
	spin_lock_init(&memcg->pcp_counter_lock);
	return memcg;
4825 4826 4827

out_free:
	if (size < PAGE_SIZE)
4828
		kfree(memcg);
4829
	else
4830
		vfree(memcg);
4831
	return NULL;
4832 4833
}

4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854
/*
 * Helpers for freeing a vzalloc()ed mem_cgroup by RCU,
 * but in process context.  The work_freeing structure is overlaid
 * on the rcu_freeing structure, which itself is overlaid on memsw.
 */
static void vfree_work(struct work_struct *work)
{
	struct mem_cgroup *memcg;

	memcg = container_of(work, struct mem_cgroup, work_freeing);
	vfree(memcg);
}
static void vfree_rcu(struct rcu_head *rcu_head)
{
	struct mem_cgroup *memcg;

	memcg = container_of(rcu_head, struct mem_cgroup, rcu_freeing);
	INIT_WORK(&memcg->work_freeing, vfree_work);
	schedule_work(&memcg->work_freeing);
}

4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865
/*
 * At destroying mem_cgroup, references from swap_cgroup can remain.
 * (scanning all at force_empty is too costly...)
 *
 * Instead of clearing all references at force_empty, we remember
 * the number of reference from swap_cgroup and free mem_cgroup when
 * it goes down to 0.
 *
 * Removal of cgroup itself succeeds regardless of refs from swap.
 */

4866
static void __mem_cgroup_free(struct mem_cgroup *memcg)
4867
{
K
KAMEZAWA Hiroyuki 已提交
4868 4869
	int node;

4870 4871
	mem_cgroup_remove_from_trees(memcg);
	free_css_id(&mem_cgroup_subsys, &memcg->css);
K
KAMEZAWA Hiroyuki 已提交
4872

B
Bob Liu 已提交
4873
	for_each_node(node)
4874
		free_mem_cgroup_per_zone_info(memcg, node);
K
KAMEZAWA Hiroyuki 已提交
4875

4876
	free_percpu(memcg->stat);
4877
	if (sizeof(struct mem_cgroup) < PAGE_SIZE)
4878
		kfree_rcu(memcg, rcu_freeing);
4879
	else
4880
		call_rcu(&memcg->rcu_freeing, vfree_rcu);
4881 4882
}

4883
static void mem_cgroup_get(struct mem_cgroup *memcg)
4884
{
4885
	atomic_inc(&memcg->refcnt);
4886 4887
}

4888
static void __mem_cgroup_put(struct mem_cgroup *memcg, int count)
4889
{
4890 4891 4892
	if (atomic_sub_and_test(count, &memcg->refcnt)) {
		struct mem_cgroup *parent = parent_mem_cgroup(memcg);
		__mem_cgroup_free(memcg);
4893 4894 4895
		if (parent)
			mem_cgroup_put(parent);
	}
4896 4897
}

4898
static void mem_cgroup_put(struct mem_cgroup *memcg)
4899
{
4900
	__mem_cgroup_put(memcg, 1);
4901 4902
}

4903 4904 4905
/*
 * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled.
 */
G
Glauber Costa 已提交
4906
struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
4907
{
4908
	if (!memcg->res.parent)
4909
		return NULL;
4910
	return mem_cgroup_from_res_counter(memcg->res.parent, res);
4911
}
G
Glauber Costa 已提交
4912
EXPORT_SYMBOL(parent_mem_cgroup);
4913

4914 4915 4916
#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
static void __init enable_swap_cgroup(void)
{
4917
	if (!mem_cgroup_disabled() && really_do_swap_account)
4918 4919 4920 4921 4922 4923 4924 4925
		do_swap_account = 1;
}
#else
static void __init enable_swap_cgroup(void)
{
}
#endif

4926 4927 4928 4929 4930 4931
static int mem_cgroup_soft_limit_tree_init(void)
{
	struct mem_cgroup_tree_per_node *rtpn;
	struct mem_cgroup_tree_per_zone *rtpz;
	int tmp, node, zone;

B
Bob Liu 已提交
4932
	for_each_node(node) {
4933 4934 4935 4936 4937
		tmp = node;
		if (!node_state(node, N_NORMAL_MEMORY))
			tmp = -1;
		rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, tmp);
		if (!rtpn)
4938
			goto err_cleanup;
4939 4940 4941 4942 4943 4944 4945 4946 4947 4948

		soft_limit_tree.rb_tree_per_node[node] = rtpn;

		for (zone = 0; zone < MAX_NR_ZONES; zone++) {
			rtpz = &rtpn->rb_tree_per_zone[zone];
			rtpz->rb_root = RB_ROOT;
			spin_lock_init(&rtpz->lock);
		}
	}
	return 0;
4949 4950

err_cleanup:
B
Bob Liu 已提交
4951
	for_each_node(node) {
4952 4953 4954 4955 4956 4957 4958
		if (!soft_limit_tree.rb_tree_per_node[node])
			break;
		kfree(soft_limit_tree.rb_tree_per_node[node]);
		soft_limit_tree.rb_tree_per_node[node] = NULL;
	}
	return 1;

4959 4960
}

L
Li Zefan 已提交
4961
static struct cgroup_subsys_state * __ref
4962
mem_cgroup_create(struct cgroup *cont)
B
Balbir Singh 已提交
4963
{
4964
	struct mem_cgroup *memcg, *parent;
K
KAMEZAWA Hiroyuki 已提交
4965
	long error = -ENOMEM;
4966
	int node;
B
Balbir Singh 已提交
4967

4968 4969
	memcg = mem_cgroup_alloc();
	if (!memcg)
K
KAMEZAWA Hiroyuki 已提交
4970
		return ERR_PTR(error);
4971

B
Bob Liu 已提交
4972
	for_each_node(node)
4973
		if (alloc_mem_cgroup_per_zone_info(memcg, node))
4974
			goto free_out;
4975

4976
	/* root ? */
4977
	if (cont->parent == NULL) {
4978
		int cpu;
4979
		enable_swap_cgroup();
4980
		parent = NULL;
4981 4982
		if (mem_cgroup_soft_limit_tree_init())
			goto free_out;
4983
		root_mem_cgroup = memcg;
4984 4985 4986 4987 4988
		for_each_possible_cpu(cpu) {
			struct memcg_stock_pcp *stock =
						&per_cpu(memcg_stock, cpu);
			INIT_WORK(&stock->work, drain_local_stock);
		}
4989
		hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
4990
	} else {
4991
		parent = mem_cgroup_from_cont(cont->parent);
4992 4993
		memcg->use_hierarchy = parent->use_hierarchy;
		memcg->oom_kill_disable = parent->oom_kill_disable;
4994
	}
4995

4996
	if (parent && parent->use_hierarchy) {
4997 4998
		res_counter_init(&memcg->res, &parent->res);
		res_counter_init(&memcg->memsw, &parent->memsw);
4999 5000 5001 5002 5003 5004 5005
		/*
		 * We increment refcnt of the parent to ensure that we can
		 * safely access it on res_counter_charge/uncharge.
		 * This refcnt will be decremented when freeing this
		 * mem_cgroup(see mem_cgroup_put).
		 */
		mem_cgroup_get(parent);
5006
	} else {
5007 5008
		res_counter_init(&memcg->res, NULL);
		res_counter_init(&memcg->memsw, NULL);
5009
	}
5010 5011
	memcg->last_scanned_node = MAX_NUMNODES;
	INIT_LIST_HEAD(&memcg->oom_notify);
5012

K
KOSAKI Motohiro 已提交
5013
	if (parent)
5014 5015 5016 5017
		memcg->swappiness = mem_cgroup_swappiness(parent);
	atomic_set(&memcg->refcnt, 1);
	memcg->move_charge_at_immigrate = 0;
	mutex_init(&memcg->thresholds_lock);
5018
	spin_lock_init(&memcg->move_lock);
5019
	return &memcg->css;
5020
free_out:
5021
	__mem_cgroup_free(memcg);
K
KAMEZAWA Hiroyuki 已提交
5022
	return ERR_PTR(error);
B
Balbir Singh 已提交
5023 5024
}

5025
static int mem_cgroup_pre_destroy(struct cgroup *cont)
5026
{
5027
	struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
5028

5029
	return mem_cgroup_force_empty(memcg, false);
5030 5031
}

5032
static void mem_cgroup_destroy(struct cgroup *cont)
B
Balbir Singh 已提交
5033
{
5034
	struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
5035

5036
	kmem_cgroup_destroy(cont);
G
Glauber Costa 已提交
5037

5038
	mem_cgroup_put(memcg);
B
Balbir Singh 已提交
5039 5040 5041 5042 5043
}

static int mem_cgroup_populate(struct cgroup_subsys *ss,
				struct cgroup *cont)
{
5044 5045 5046 5047 5048 5049 5050
	int ret;

	ret = cgroup_add_files(cont, ss, mem_cgroup_files,
				ARRAY_SIZE(mem_cgroup_files));

	if (!ret)
		ret = register_memsw_files(cont, ss);
5051 5052 5053 5054

	if (!ret)
		ret = register_kmem_files(cont, ss);

5055
	return ret;
B
Balbir Singh 已提交
5056 5057
}

5058
#ifdef CONFIG_MMU
5059
/* Handlers for move charge at task migration. */
5060 5061
#define PRECHARGE_COUNT_AT_ONCE	256
static int mem_cgroup_do_precharge(unsigned long count)
5062
{
5063 5064
	int ret = 0;
	int batch_count = PRECHARGE_COUNT_AT_ONCE;
5065
	struct mem_cgroup *memcg = mc.to;
5066

5067
	if (mem_cgroup_is_root(memcg)) {
5068 5069 5070 5071 5072 5073 5074 5075
		mc.precharge += count;
		/* we don't need css_get for root */
		return ret;
	}
	/* try to charge at once */
	if (count > 1) {
		struct res_counter *dummy;
		/*
5076
		 * "memcg" cannot be under rmdir() because we've already checked
5077 5078 5079 5080
		 * by cgroup_lock_live_cgroup() that it is not removed and we
		 * are still under the same cgroup_mutex. So we can postpone
		 * css_get().
		 */
5081
		if (res_counter_charge(&memcg->res, PAGE_SIZE * count, &dummy))
5082
			goto one_by_one;
5083
		if (do_swap_account && res_counter_charge(&memcg->memsw,
5084
						PAGE_SIZE * count, &dummy)) {
5085
			res_counter_uncharge(&memcg->res, PAGE_SIZE * count);
5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 5100 5101
			goto one_by_one;
		}
		mc.precharge += count;
		return ret;
	}
one_by_one:
	/* fall back to one by one charge */
	while (count--) {
		if (signal_pending(current)) {
			ret = -EINTR;
			break;
		}
		if (!batch_count--) {
			batch_count = PRECHARGE_COUNT_AT_ONCE;
			cond_resched();
		}
5102 5103
		ret = __mem_cgroup_try_charge(NULL,
					GFP_KERNEL, 1, &memcg, false);
5104
		if (ret)
5105
			/* mem_cgroup_clear_mc() will do uncharge later */
5106
			return ret;
5107 5108
		mc.precharge++;
	}
5109 5110 5111 5112
	return ret;
}

/**
5113
 * get_mctgt_type - get target type of moving charge
5114 5115 5116
 * @vma: the vma the pte to be checked belongs
 * @addr: the address corresponding to the pte to be checked
 * @ptent: the pte to be checked
5117
 * @target: the pointer the target page or swap ent will be stored(can be NULL)
5118 5119 5120 5121 5122 5123
 *
 * Returns
 *   0(MC_TARGET_NONE): if the pte is not a target for move charge.
 *   1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
 *     move charge. if @target is not NULL, the page is stored in target->page
 *     with extra refcnt got(Callers should handle it).
5124 5125 5126
 *   2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
 *     target for charge migration. if @target is not NULL, the entry is stored
 *     in target->ent.
5127 5128 5129 5130 5131
 *
 * Called with pte lock held.
 */
union mc_target {
	struct page	*page;
5132
	swp_entry_t	ent;
5133 5134 5135
};

enum mc_target_type {
5136
	MC_TARGET_NONE = 0,
5137
	MC_TARGET_PAGE,
5138
	MC_TARGET_SWAP,
5139 5140
};

D
Daisuke Nishimura 已提交
5141 5142
static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
						unsigned long addr, pte_t ptent)
5143
{
D
Daisuke Nishimura 已提交
5144
	struct page *page = vm_normal_page(vma, addr, ptent);
5145

D
Daisuke Nishimura 已提交
5146 5147 5148 5149
	if (!page || !page_mapped(page))
		return NULL;
	if (PageAnon(page)) {
		/* we don't move shared anon */
5150
		if (!move_anon() || page_mapcount(page) > 2)
D
Daisuke Nishimura 已提交
5151
			return NULL;
5152 5153
	} else if (!move_file())
		/* we ignore mapcount for file pages */
D
Daisuke Nishimura 已提交
5154 5155 5156 5157 5158 5159 5160 5161 5162 5163 5164 5165 5166 5167 5168 5169 5170 5171
		return NULL;
	if (!get_page_unless_zero(page))
		return NULL;

	return page;
}

static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
			unsigned long addr, pte_t ptent, swp_entry_t *entry)
{
	int usage_count;
	struct page *page = NULL;
	swp_entry_t ent = pte_to_swp_entry(ptent);

	if (!move_anon() || non_swap_entry(ent))
		return NULL;
	usage_count = mem_cgroup_count_swap_user(ent, &page);
	if (usage_count > 1) { /* we don't move shared anon */
5172 5173
		if (page)
			put_page(page);
D
Daisuke Nishimura 已提交
5174
		return NULL;
5175
	}
D
Daisuke Nishimura 已提交
5176 5177 5178 5179 5180 5181
	if (do_swap_account)
		entry->val = ent.val;

	return page;
}

5182 5183 5184 5185 5186 5187 5188 5189 5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201 5202
static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
			unsigned long addr, pte_t ptent, swp_entry_t *entry)
{
	struct page *page = NULL;
	struct inode *inode;
	struct address_space *mapping;
	pgoff_t pgoff;

	if (!vma->vm_file) /* anonymous vma */
		return NULL;
	if (!move_file())
		return NULL;

	inode = vma->vm_file->f_path.dentry->d_inode;
	mapping = vma->vm_file->f_mapping;
	if (pte_none(ptent))
		pgoff = linear_page_index(vma, addr);
	else /* pte_file(ptent) is true */
		pgoff = pte_to_pgoff(ptent);

	/* page is moved even if it's not RSS of this task(page-faulted). */
5203 5204 5205 5206 5207 5208
	page = find_get_page(mapping, pgoff);

#ifdef CONFIG_SWAP
	/* shmem/tmpfs may report page out on swap: account for that too. */
	if (radix_tree_exceptional_entry(page)) {
		swp_entry_t swap = radix_to_swp_entry(page);
5209
		if (do_swap_account)
5210 5211
			*entry = swap;
		page = find_get_page(&swapper_space, swap.val);
5212
	}
5213
#endif
5214 5215 5216
	return page;
}

5217
static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
D
Daisuke Nishimura 已提交
5218 5219 5220 5221
		unsigned long addr, pte_t ptent, union mc_target *target)
{
	struct page *page = NULL;
	struct page_cgroup *pc;
5222
	enum mc_target_type ret = MC_TARGET_NONE;
D
Daisuke Nishimura 已提交
5223 5224 5225 5226 5227 5228
	swp_entry_t ent = { .val = 0 };

	if (pte_present(ptent))
		page = mc_handle_present_pte(vma, addr, ptent);
	else if (is_swap_pte(ptent))
		page = mc_handle_swap_pte(vma, addr, ptent, &ent);
5229 5230
	else if (pte_none(ptent) || pte_file(ptent))
		page = mc_handle_file_pte(vma, addr, ptent, &ent);
D
Daisuke Nishimura 已提交
5231 5232

	if (!page && !ent.val)
5233
		return ret;
5234 5235 5236 5237 5238 5239 5240 5241 5242 5243 5244 5245 5246 5247 5248
	if (page) {
		pc = lookup_page_cgroup(page);
		/*
		 * Do only loose check w/o page_cgroup lock.
		 * mem_cgroup_move_account() checks the pc is valid or not under
		 * the lock.
		 */
		if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) {
			ret = MC_TARGET_PAGE;
			if (target)
				target->page = page;
		}
		if (!ret || !target)
			put_page(page);
	}
D
Daisuke Nishimura 已提交
5249 5250
	/* There is a swap entry and a page doesn't exist or isn't charged */
	if (ent.val && !ret &&
5251
			css_id(&mc.from->css) == lookup_swap_cgroup_id(ent)) {
5252 5253 5254
		ret = MC_TARGET_SWAP;
		if (target)
			target->ent = ent;
5255 5256 5257 5258
	}
	return ret;
}

5259 5260 5261 5262 5263 5264 5265 5266 5267 5268 5269 5270 5271 5272 5273 5274 5275 5276 5277 5278 5279 5280 5281 5282 5283 5284 5285 5286 5287 5288 5289 5290 5291 5292 5293
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/*
 * We don't consider swapping or file mapped pages because THP does not
 * support them for now.
 * Caller should make sure that pmd_trans_huge(pmd) is true.
 */
static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
		unsigned long addr, pmd_t pmd, union mc_target *target)
{
	struct page *page = NULL;
	struct page_cgroup *pc;
	enum mc_target_type ret = MC_TARGET_NONE;

	page = pmd_page(pmd);
	VM_BUG_ON(!page || !PageHead(page));
	if (!move_anon())
		return ret;
	pc = lookup_page_cgroup(page);
	if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) {
		ret = MC_TARGET_PAGE;
		if (target) {
			get_page(page);
			target->page = page;
		}
	}
	return ret;
}
#else
static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
		unsigned long addr, pmd_t pmd, union mc_target *target)
{
	return MC_TARGET_NONE;
}
#endif

5294 5295 5296 5297 5298 5299 5300 5301
static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
					unsigned long addr, unsigned long end,
					struct mm_walk *walk)
{
	struct vm_area_struct *vma = walk->private;
	pte_t *pte;
	spinlock_t *ptl;

5302 5303 5304 5305
	if (pmd_trans_huge_lock(pmd, vma) == 1) {
		if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
			mc.precharge += HPAGE_PMD_NR;
		spin_unlock(&vma->vm_mm->page_table_lock);
5306
		return 0;
5307
	}
5308

5309 5310
	if (pmd_trans_unstable(pmd))
		return 0;
5311 5312
	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
	for (; addr != end; pte++, addr += PAGE_SIZE)
5313
		if (get_mctgt_type(vma, addr, *pte, NULL))
5314 5315 5316 5317
			mc.precharge++;	/* increment precharge temporarily */
	pte_unmap_unlock(pte - 1, ptl);
	cond_resched();

5318 5319 5320
	return 0;
}

5321 5322 5323 5324 5325
static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
{
	unsigned long precharge;
	struct vm_area_struct *vma;

5326
	down_read(&mm->mmap_sem);
5327 5328 5329 5330 5331 5332 5333 5334 5335 5336 5337
	for (vma = mm->mmap; vma; vma = vma->vm_next) {
		struct mm_walk mem_cgroup_count_precharge_walk = {
			.pmd_entry = mem_cgroup_count_precharge_pte_range,
			.mm = mm,
			.private = vma,
		};
		if (is_vm_hugetlb_page(vma))
			continue;
		walk_page_range(vma->vm_start, vma->vm_end,
					&mem_cgroup_count_precharge_walk);
	}
5338
	up_read(&mm->mmap_sem);
5339 5340 5341 5342 5343 5344 5345 5346 5347

	precharge = mc.precharge;
	mc.precharge = 0;

	return precharge;
}

static int mem_cgroup_precharge_mc(struct mm_struct *mm)
{
5348 5349 5350 5351 5352
	unsigned long precharge = mem_cgroup_count_precharge(mm);

	VM_BUG_ON(mc.moving_task);
	mc.moving_task = current;
	return mem_cgroup_do_precharge(precharge);
5353 5354
}

5355 5356
/* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
static void __mem_cgroup_clear_mc(void)
5357
{
5358 5359 5360
	struct mem_cgroup *from = mc.from;
	struct mem_cgroup *to = mc.to;

5361
	/* we must uncharge all the leftover precharges from mc.to */
5362 5363 5364 5365 5366 5367 5368 5369 5370 5371 5372
	if (mc.precharge) {
		__mem_cgroup_cancel_charge(mc.to, mc.precharge);
		mc.precharge = 0;
	}
	/*
	 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
	 * we must uncharge here.
	 */
	if (mc.moved_charge) {
		__mem_cgroup_cancel_charge(mc.from, mc.moved_charge);
		mc.moved_charge = 0;
5373
	}
5374 5375 5376 5377 5378 5379 5380 5381 5382 5383 5384 5385 5386 5387 5388 5389 5390 5391 5392
	/* we must fixup refcnts and charges */
	if (mc.moved_swap) {
		/* uncharge swap account from the old cgroup */
		if (!mem_cgroup_is_root(mc.from))
			res_counter_uncharge(&mc.from->memsw,
						PAGE_SIZE * mc.moved_swap);
		__mem_cgroup_put(mc.from, mc.moved_swap);

		if (!mem_cgroup_is_root(mc.to)) {
			/*
			 * we charged both to->res and to->memsw, so we should
			 * uncharge to->res.
			 */
			res_counter_uncharge(&mc.to->res,
						PAGE_SIZE * mc.moved_swap);
		}
		/* we've already done mem_cgroup_get(mc.to) */
		mc.moved_swap = 0;
	}
5393 5394 5395 5396 5397 5398 5399 5400 5401 5402 5403 5404 5405 5406 5407
	memcg_oom_recover(from);
	memcg_oom_recover(to);
	wake_up_all(&mc.waitq);
}

static void mem_cgroup_clear_mc(void)
{
	struct mem_cgroup *from = mc.from;

	/*
	 * we must clear moving_task before waking up waiters at the end of
	 * task migration.
	 */
	mc.moving_task = NULL;
	__mem_cgroup_clear_mc();
5408
	spin_lock(&mc.lock);
5409 5410
	mc.from = NULL;
	mc.to = NULL;
5411
	spin_unlock(&mc.lock);
5412
	mem_cgroup_end_move(from);
5413 5414
}

5415 5416
static int mem_cgroup_can_attach(struct cgroup *cgroup,
				 struct cgroup_taskset *tset)
5417
{
5418
	struct task_struct *p = cgroup_taskset_first(tset);
5419
	int ret = 0;
5420
	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgroup);
5421

5422
	if (memcg->move_charge_at_immigrate) {
5423 5424 5425
		struct mm_struct *mm;
		struct mem_cgroup *from = mem_cgroup_from_task(p);

5426
		VM_BUG_ON(from == memcg);
5427 5428 5429 5430 5431

		mm = get_task_mm(p);
		if (!mm)
			return 0;
		/* We move charges only when we move a owner of the mm */
5432 5433 5434 5435
		if (mm->owner == p) {
			VM_BUG_ON(mc.from);
			VM_BUG_ON(mc.to);
			VM_BUG_ON(mc.precharge);
5436
			VM_BUG_ON(mc.moved_charge);
5437
			VM_BUG_ON(mc.moved_swap);
5438
			mem_cgroup_start_move(from);
5439
			spin_lock(&mc.lock);
5440
			mc.from = from;
5441
			mc.to = memcg;
5442
			spin_unlock(&mc.lock);
5443
			/* We set mc.moving_task later */
5444 5445 5446 5447

			ret = mem_cgroup_precharge_mc(mm);
			if (ret)
				mem_cgroup_clear_mc();
5448 5449
		}
		mmput(mm);
5450 5451 5452 5453
	}
	return ret;
}

5454 5455
static void mem_cgroup_cancel_attach(struct cgroup *cgroup,
				     struct cgroup_taskset *tset)
5456
{
5457
	mem_cgroup_clear_mc();
5458 5459
}

5460 5461 5462
static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
				unsigned long addr, unsigned long end,
				struct mm_walk *walk)
5463
{
5464 5465 5466 5467
	int ret = 0;
	struct vm_area_struct *vma = walk->private;
	pte_t *pte;
	spinlock_t *ptl;
5468 5469 5470 5471
	enum mc_target_type target_type;
	union mc_target target;
	struct page *page;
	struct page_cgroup *pc;
5472

5473 5474 5475 5476 5477 5478 5479 5480 5481 5482 5483
	/*
	 * We don't take compound_lock() here but no race with splitting thp
	 * happens because:
	 *  - if pmd_trans_huge_lock() returns 1, the relevant thp is not
	 *    under splitting, which means there's no concurrent thp split,
	 *  - if another thread runs into split_huge_page() just after we
	 *    entered this if-block, the thread must wait for page table lock
	 *    to be unlocked in __split_huge_page_splitting(), where the main
	 *    part of thp split is not executed yet.
	 */
	if (pmd_trans_huge_lock(pmd, vma) == 1) {
5484
		if (mc.precharge < HPAGE_PMD_NR) {
5485 5486 5487 5488 5489 5490 5491 5492 5493 5494 5495 5496 5497 5498 5499 5500 5501 5502 5503
			spin_unlock(&vma->vm_mm->page_table_lock);
			return 0;
		}
		target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
		if (target_type == MC_TARGET_PAGE) {
			page = target.page;
			if (!isolate_lru_page(page)) {
				pc = lookup_page_cgroup(page);
				if (!mem_cgroup_move_account(page, HPAGE_PMD_NR,
							     pc, mc.from, mc.to,
							     false)) {
					mc.precharge -= HPAGE_PMD_NR;
					mc.moved_charge += HPAGE_PMD_NR;
				}
				putback_lru_page(page);
			}
			put_page(page);
		}
		spin_unlock(&vma->vm_mm->page_table_lock);
5504
		return 0;
5505 5506
	}

5507 5508
	if (pmd_trans_unstable(pmd))
		return 0;
5509 5510 5511 5512
retry:
	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
	for (; addr != end; addr += PAGE_SIZE) {
		pte_t ptent = *(pte++);
5513
		swp_entry_t ent;
5514 5515 5516 5517

		if (!mc.precharge)
			break;

5518
		switch (get_mctgt_type(vma, addr, ptent, &target)) {
5519 5520 5521 5522 5523
		case MC_TARGET_PAGE:
			page = target.page;
			if (isolate_lru_page(page))
				goto put;
			pc = lookup_page_cgroup(page);
5524 5525
			if (!mem_cgroup_move_account(page, 1, pc,
						     mc.from, mc.to, false)) {
5526
				mc.precharge--;
5527 5528
				/* we uncharge from mc.from later. */
				mc.moved_charge++;
5529 5530
			}
			putback_lru_page(page);
5531
put:			/* get_mctgt_type() gets the page */
5532 5533
			put_page(page);
			break;
5534 5535
		case MC_TARGET_SWAP:
			ent = target.ent;
5536 5537
			if (!mem_cgroup_move_swap_account(ent,
						mc.from, mc.to, false)) {
5538
				mc.precharge--;
5539 5540 5541
				/* we fixup refcnts and charges later. */
				mc.moved_swap++;
			}
5542
			break;
5543 5544 5545 5546 5547 5548 5549 5550 5551 5552 5553 5554 5555 5556
		default:
			break;
		}
	}
	pte_unmap_unlock(pte - 1, ptl);
	cond_resched();

	if (addr != end) {
		/*
		 * We have consumed all precharges we got in can_attach().
		 * We try charge one by one, but don't do any additional
		 * charges to mc.to if we have failed in charge once in attach()
		 * phase.
		 */
5557
		ret = mem_cgroup_do_precharge(1);
5558 5559 5560 5561 5562 5563 5564 5565 5566 5567 5568 5569
		if (!ret)
			goto retry;
	}

	return ret;
}

static void mem_cgroup_move_charge(struct mm_struct *mm)
{
	struct vm_area_struct *vma;

	lru_add_drain_all();
5570 5571 5572 5573 5574 5575 5576 5577 5578 5579 5580 5581 5582
retry:
	if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
		/*
		 * Someone who are holding the mmap_sem might be waiting in
		 * waitq. So we cancel all extra charges, wake up all waiters,
		 * and retry. Because we cancel precharges, we might not be able
		 * to move enough charges, but moving charge is a best-effort
		 * feature anyway, so it wouldn't be a big problem.
		 */
		__mem_cgroup_clear_mc();
		cond_resched();
		goto retry;
	}
5583 5584 5585 5586 5587 5588 5589 5590 5591 5592 5593 5594 5595 5596 5597 5598 5599 5600
	for (vma = mm->mmap; vma; vma = vma->vm_next) {
		int ret;
		struct mm_walk mem_cgroup_move_charge_walk = {
			.pmd_entry = mem_cgroup_move_charge_pte_range,
			.mm = mm,
			.private = vma,
		};
		if (is_vm_hugetlb_page(vma))
			continue;
		ret = walk_page_range(vma->vm_start, vma->vm_end,
						&mem_cgroup_move_charge_walk);
		if (ret)
			/*
			 * means we have consumed all precharges and failed in
			 * doing additional charge. Just abandon here.
			 */
			break;
	}
5601
	up_read(&mm->mmap_sem);
5602 5603
}

5604 5605
static void mem_cgroup_move_task(struct cgroup *cont,
				 struct cgroup_taskset *tset)
B
Balbir Singh 已提交
5606
{
5607
	struct task_struct *p = cgroup_taskset_first(tset);
5608
	struct mm_struct *mm = get_task_mm(p);
5609 5610

	if (mm) {
5611 5612 5613
		if (mc.to)
			mem_cgroup_move_charge(mm);
		put_swap_token(mm);
5614 5615
		mmput(mm);
	}
5616 5617
	if (mc.to)
		mem_cgroup_clear_mc();
B
Balbir Singh 已提交
5618
}
5619
#else	/* !CONFIG_MMU */
5620 5621
static int mem_cgroup_can_attach(struct cgroup *cgroup,
				 struct cgroup_taskset *tset)
5622 5623 5624
{
	return 0;
}
5625 5626
static void mem_cgroup_cancel_attach(struct cgroup *cgroup,
				     struct cgroup_taskset *tset)
5627 5628
{
}
5629 5630
static void mem_cgroup_move_task(struct cgroup *cont,
				 struct cgroup_taskset *tset)
5631 5632 5633
{
}
#endif
B
Balbir Singh 已提交
5634

B
Balbir Singh 已提交
5635 5636 5637 5638
struct cgroup_subsys mem_cgroup_subsys = {
	.name = "memory",
	.subsys_id = mem_cgroup_subsys_id,
	.create = mem_cgroup_create,
5639
	.pre_destroy = mem_cgroup_pre_destroy,
B
Balbir Singh 已提交
5640 5641
	.destroy = mem_cgroup_destroy,
	.populate = mem_cgroup_populate,
5642 5643
	.can_attach = mem_cgroup_can_attach,
	.cancel_attach = mem_cgroup_cancel_attach,
B
Balbir Singh 已提交
5644
	.attach = mem_cgroup_move_task,
5645
	.early_init = 0,
K
KAMEZAWA Hiroyuki 已提交
5646
	.use_id = 1,
B
Balbir Singh 已提交
5647
};
5648 5649

#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
5650 5651 5652
static int __init enable_swap_account(char *s)
{
	/* consider enabled if no parameter or 1 is given */
5653
	if (!strcmp(s, "1"))
5654
		really_do_swap_account = 1;
5655
	else if (!strcmp(s, "0"))
5656 5657 5658
		really_do_swap_account = 0;
	return 1;
}
5659
__setup("swapaccount=", enable_swap_account);
5660 5661

#endif