memcontrol.c 147.4 KB
Newer Older
B
Balbir Singh 已提交
1 2 3 4 5
/* memcontrol.c - Memory Controller
 *
 * Copyright IBM Corporation, 2007
 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
 *
6 7 8
 * Copyright 2007 OpenVZ SWsoft Inc
 * Author: Pavel Emelianov <xemul@openvz.org>
 *
9 10 11 12
 * Memory thresholds
 * Copyright (C) 2009 Nokia Corporation
 * Author: Kirill A. Shutemov
 *
13 14 15 16
 * Kernel Memory Controller
 * Copyright (C) 2012 Parallels Inc. and Google Inc.
 * Authors: Glauber Costa and Suleiman Souhlal
 *
17 18 19 20 21 22
 * Native page reclaim
 * Charge lifetime sanitation
 * Lockless page tracking & accounting
 * Unified hierarchy configuration model
 * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner
 *
B
Balbir Singh 已提交
23 24 25 26 27 28 29 30 31 32 33
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 */

34
#include <linux/page_counter.h>
B
Balbir Singh 已提交
35 36
#include <linux/memcontrol.h>
#include <linux/cgroup.h>
37
#include <linux/mm.h>
38
#include <linux/hugetlb.h>
K
KAMEZAWA Hiroyuki 已提交
39
#include <linux/pagemap.h>
40
#include <linux/smp.h>
41
#include <linux/page-flags.h>
42
#include <linux/backing-dev.h>
43 44
#include <linux/bit_spinlock.h>
#include <linux/rcupdate.h>
45
#include <linux/limits.h>
46
#include <linux/export.h>
47
#include <linux/mutex.h>
48
#include <linux/rbtree.h>
49
#include <linux/slab.h>
50
#include <linux/swap.h>
51
#include <linux/swapops.h>
52
#include <linux/spinlock.h>
53
#include <linux/eventfd.h>
54
#include <linux/poll.h>
55
#include <linux/sort.h>
56
#include <linux/fs.h>
57
#include <linux/seq_file.h>
58
#include <linux/vmpressure.h>
59
#include <linux/mm_inline.h>
60
#include <linux/swap_cgroup.h>
61
#include <linux/cpu.h>
62
#include <linux/oom.h>
63
#include <linux/lockdep.h>
64
#include <linux/file.h>
65
#include <linux/tracehook.h>
K
KAMEZAWA Hiroyuki 已提交
66
#include "internal.h"
G
Glauber Costa 已提交
67
#include <net/sock.h>
M
Michal Hocko 已提交
68
#include <net/ip.h>
G
Glauber Costa 已提交
69
#include <net/tcp_memcontrol.h>
70
#include "slab.h"
B
Balbir Singh 已提交
71

72 73
#include <asm/uaccess.h>

74 75
#include <trace/events/vmscan.h>

76 77
struct cgroup_subsys memory_cgrp_subsys __read_mostly;
EXPORT_SYMBOL(memory_cgrp_subsys);
78

79
#define MEM_CGROUP_RECLAIM_RETRIES	5
80
static struct mem_cgroup *root_mem_cgroup __read_mostly;
T
Tejun Heo 已提交
81
struct cgroup_subsys_state *mem_cgroup_root_css __read_mostly;
B
Balbir Singh 已提交
82

83
/* Whether the swap controller is active */
A
Andrew Morton 已提交
84
#ifdef CONFIG_MEMCG_SWAP
85 86
int do_swap_account __read_mostly;
#else
87
#define do_swap_account		0
88 89
#endif

90 91 92
static const char * const mem_cgroup_stat_names[] = {
	"cache",
	"rss",
93
	"rss_huge",
94
	"mapped_file",
95
	"dirty",
96
	"writeback",
97 98 99 100 101 102 103 104 105 106
	"swap",
};

static const char * const mem_cgroup_events_names[] = {
	"pgpgin",
	"pgpgout",
	"pgfault",
	"pgmajfault",
};

107 108 109 110 111 112 113 114
static const char * const mem_cgroup_lru_names[] = {
	"inactive_anon",
	"active_anon",
	"inactive_file",
	"active_file",
	"unevictable",
};

115 116 117
#define THRESHOLDS_EVENTS_TARGET 128
#define SOFTLIMIT_EVENTS_TARGET 1024
#define NUMAINFO_EVENTS_TARGET	1024
118

119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138
/*
 * Cgroups above their limits are maintained in a RB-Tree, independent of
 * their hierarchy representation
 */

struct mem_cgroup_tree_per_zone {
	struct rb_root rb_root;
	spinlock_t lock;
};

struct mem_cgroup_tree_per_node {
	struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES];
};

struct mem_cgroup_tree {
	struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
};

static struct mem_cgroup_tree soft_limit_tree __read_mostly;

K
KAMEZAWA Hiroyuki 已提交
139 140 141 142 143
/* for OOM */
struct mem_cgroup_eventfd_list {
	struct list_head list;
	struct eventfd_ctx *eventfd;
};
144

145 146 147
/*
 * cgroup_event represents events which userspace want to receive.
 */
148
struct mem_cgroup_event {
149
	/*
150
	 * memcg which the event belongs to.
151
	 */
152
	struct mem_cgroup *memcg;
153 154 155 156 157 158 159 160
	/*
	 * eventfd to signal userspace about the event.
	 */
	struct eventfd_ctx *eventfd;
	/*
	 * Each of these stored in a list by the cgroup.
	 */
	struct list_head list;
161 162 163 164 165
	/*
	 * register_event() callback will be used to add new userspace
	 * waiter for changes related to this event.  Use eventfd_signal()
	 * on eventfd to send notification to userspace.
	 */
166
	int (*register_event)(struct mem_cgroup *memcg,
T
Tejun Heo 已提交
167
			      struct eventfd_ctx *eventfd, const char *args);
168 169 170 171 172
	/*
	 * unregister_event() callback will be called when userspace closes
	 * the eventfd or on cgroup removing.  This callback must be set,
	 * if you want provide notification functionality.
	 */
173
	void (*unregister_event)(struct mem_cgroup *memcg,
174
				 struct eventfd_ctx *eventfd);
175 176 177 178 179 180 181 182 183 184
	/*
	 * All fields below needed to unregister event when
	 * userspace closes eventfd.
	 */
	poll_table pt;
	wait_queue_head_t *wqh;
	wait_queue_t wait;
	struct work_struct remove;
};

185 186
static void mem_cgroup_threshold(struct mem_cgroup *memcg);
static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
187

188 189
/* Stuffs for move charges at task migration. */
/*
190
 * Types of charges to be moved.
191
 */
192 193 194
#define MOVE_ANON	0x1U
#define MOVE_FILE	0x2U
#define MOVE_MASK	(MOVE_ANON | MOVE_FILE)
195

196 197
/* "mc" and its members are protected by cgroup_mutex */
static struct move_charge_struct {
198
	spinlock_t	  lock; /* for from, to */
199 200
	struct mem_cgroup *from;
	struct mem_cgroup *to;
201
	unsigned long flags;
202
	unsigned long precharge;
203
	unsigned long moved_charge;
204
	unsigned long moved_swap;
205 206 207
	struct task_struct *moving_task;	/* a task moving charges */
	wait_queue_head_t waitq;		/* a waitq for other context */
} mc = {
208
	.lock = __SPIN_LOCK_UNLOCKED(mc.lock),
209 210
	.waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
};
211

212 213 214 215
/*
 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
 * limit reclaim to prevent infinite loops, if they ever occur.
 */
216
#define	MEM_CGROUP_MAX_RECLAIM_LOOPS		100
217
#define	MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS	2
218

219 220
enum charge_type {
	MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
221
	MEM_CGROUP_CHARGE_TYPE_ANON,
K
KAMEZAWA Hiroyuki 已提交
222
	MEM_CGROUP_CHARGE_TYPE_SWAPOUT,	/* for accounting swapcache */
K
KAMEZAWA Hiroyuki 已提交
223
	MEM_CGROUP_CHARGE_TYPE_DROP,	/* a page was unused swap cache */
224 225 226
	NR_CHARGE_TYPE,
};

227
/* for encoding cft->private value on file */
G
Glauber Costa 已提交
228 229 230 231
enum res_type {
	_MEM,
	_MEMSWAP,
	_OOM_TYPE,
232
	_KMEM,
G
Glauber Costa 已提交
233 234
};

235 236
#define MEMFILE_PRIVATE(x, val)	((x) << 16 | (val))
#define MEMFILE_TYPE(val)	((val) >> 16 & 0xffff)
237
#define MEMFILE_ATTR(val)	((val) & 0xffff)
K
KAMEZAWA Hiroyuki 已提交
238 239
/* Used for OOM nofiier */
#define OOM_CONTROL		(0)
240

241 242 243 244 245 246 247
/*
 * The memcg_create_mutex will be held whenever a new cgroup is created.
 * As a consequence, any change that needs to protect against new child cgroups
 * appearing has to hold it as well.
 */
static DEFINE_MUTEX(memcg_create_mutex);

248 249 250 251 252 253 254 255 256 257 258 259 260
/* Some nice accessors for the vmpressure. */
struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
{
	if (!memcg)
		memcg = root_mem_cgroup;
	return &memcg->vmpressure;
}

struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr)
{
	return &container_of(vmpr, struct mem_cgroup, vmpressure)->css;
}

261 262 263 264 265
static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
{
	return (memcg == root_mem_cgroup);
}

266 267 268 269 270 271
/*
 * We restrict the id in the range of [1, 65535], so it can fit into
 * an unsigned short.
 */
#define MEM_CGROUP_ID_MAX	USHRT_MAX

L
Li Zefan 已提交
272 273
static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
{
274
	return memcg->css.id;
L
Li Zefan 已提交
275 276
}

277 278 279 280 281 282
/*
 * A helper function to get mem_cgroup from ID. must be called under
 * rcu_read_lock().  The caller is responsible for calling
 * css_tryget_online() if the mem_cgroup is used for charging. (dropping
 * refcnt from swap can be called against removed memcg.)
 */
L
Li Zefan 已提交
283 284 285 286
static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
{
	struct cgroup_subsys_state *css;

287
	css = css_from_id(id, &memory_cgrp_subsys);
L
Li Zefan 已提交
288 289 290
	return mem_cgroup_from_css(css);
}

G
Glauber Costa 已提交
291
/* Writing them here to avoid exposing memcg's inner layout */
M
Michal Hocko 已提交
292
#if defined(CONFIG_INET) && defined(CONFIG_MEMCG_KMEM)
G
Glauber Costa 已提交
293 294 295

void sock_update_memcg(struct sock *sk)
{
296
	if (mem_cgroup_sockets_enabled) {
G
Glauber Costa 已提交
297
		struct mem_cgroup *memcg;
298
		struct cg_proto *cg_proto;
G
Glauber Costa 已提交
299 300 301

		BUG_ON(!sk->sk_prot->proto_cgroup);

302 303 304 305 306 307 308 309 310 311
		/* Socket cloning can throw us here with sk_cgrp already
		 * filled. It won't however, necessarily happen from
		 * process context. So the test for root memcg given
		 * the current task's memcg won't help us in this case.
		 *
		 * Respecting the original socket's memcg is a better
		 * decision in this case.
		 */
		if (sk->sk_cgrp) {
			BUG_ON(mem_cgroup_is_root(sk->sk_cgrp->memcg));
312
			css_get(&sk->sk_cgrp->memcg->css);
313 314 315
			return;
		}

G
Glauber Costa 已提交
316 317
		rcu_read_lock();
		memcg = mem_cgroup_from_task(current);
318
		cg_proto = sk->sk_prot->proto_cgroup(memcg);
319
		if (cg_proto && test_bit(MEMCG_SOCK_ACTIVE, &cg_proto->flags) &&
320
		    css_tryget_online(&memcg->css)) {
321
			sk->sk_cgrp = cg_proto;
G
Glauber Costa 已提交
322 323 324 325 326 327 328 329
		}
		rcu_read_unlock();
	}
}
EXPORT_SYMBOL(sock_update_memcg);

void sock_release_memcg(struct sock *sk)
{
330
	if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
G
Glauber Costa 已提交
331 332 333
		struct mem_cgroup *memcg;
		WARN_ON(!sk->sk_cgrp->memcg);
		memcg = sk->sk_cgrp->memcg;
334
		css_put(&sk->sk_cgrp->memcg->css);
G
Glauber Costa 已提交
335 336
	}
}
G
Glauber Costa 已提交
337 338 339 340 341 342

struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg)
{
	if (!memcg || mem_cgroup_is_root(memcg))
		return NULL;

343
	return &memcg->tcp_mem;
G
Glauber Costa 已提交
344 345
}
EXPORT_SYMBOL(tcp_proto_cgroup);
G
Glauber Costa 已提交
346

347 348
#endif

349
#ifdef CONFIG_MEMCG_KMEM
350
/*
351
 * This will be the memcg's index in each cache's ->memcg_params.memcg_caches.
L
Li Zefan 已提交
352 353 354 355 356
 * The main reason for not using cgroup id for this:
 *  this works better in sparse environments, where we have a lot of memcgs,
 *  but only a few kmem-limited. Or also, if we have, for instance, 200
 *  memcgs, and none but the 200th is kmem-limited, we'd have to have a
 *  200 entry array for that.
357
 *
358 359
 * The current size of the caches array is stored in memcg_nr_cache_ids. It
 * will double each time we have to increase it.
360
 */
361 362
static DEFINE_IDA(memcg_cache_ida);
int memcg_nr_cache_ids;
363

364 365 366 367 368 369 370 371 372 373 374 375 376
/* Protects memcg_nr_cache_ids */
static DECLARE_RWSEM(memcg_cache_ids_sem);

void memcg_get_cache_ids(void)
{
	down_read(&memcg_cache_ids_sem);
}

void memcg_put_cache_ids(void)
{
	up_read(&memcg_cache_ids_sem);
}

377 378 379 380 381 382
/*
 * MIN_SIZE is different than 1, because we would like to avoid going through
 * the alloc/free process all the time. In a small machine, 4 kmem-limited
 * cgroups is a reasonable guess. In the future, it could be a parameter or
 * tunable, but that is strictly not necessary.
 *
L
Li Zefan 已提交
383
 * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get
384 385
 * this constant directly from cgroup, but it is understandable that this is
 * better kept as an internal representation in cgroup.c. In any case, the
L
Li Zefan 已提交
386
 * cgrp_id space is not getting any smaller, and we don't have to necessarily
387 388 389
 * increase ours as well if it increases.
 */
#define MEMCG_CACHES_MIN_SIZE 4
L
Li Zefan 已提交
390
#define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX
391

392 393 394 395 396 397
/*
 * A lot of the calls to the cache allocation functions are expected to be
 * inlined by the compiler. Since the calls to memcg_kmem_get_cache are
 * conditional to this static branch, we'll have to allow modules that does
 * kmem_cache_alloc and the such to see this symbol as well
 */
398
struct static_key memcg_kmem_enabled_key;
399
EXPORT_SYMBOL(memcg_kmem_enabled_key);
400 401 402

#endif /* CONFIG_MEMCG_KMEM */

403
static struct mem_cgroup_per_zone *
404
mem_cgroup_zone_zoneinfo(struct mem_cgroup *memcg, struct zone *zone)
405
{
406 407 408
	int nid = zone_to_nid(zone);
	int zid = zone_idx(zone);

409
	return &memcg->nodeinfo[nid]->zoneinfo[zid];
410 411
}

412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444
/**
 * mem_cgroup_css_from_page - css of the memcg associated with a page
 * @page: page of interest
 *
 * If memcg is bound to the default hierarchy, css of the memcg associated
 * with @page is returned.  The returned css remains associated with @page
 * until it is released.
 *
 * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup
 * is returned.
 *
 * XXX: The above description of behavior on the default hierarchy isn't
 * strictly true yet as replace_page_cache_page() can modify the
 * association before @page is released even on the default hierarchy;
 * however, the current and planned usages don't mix the the two functions
 * and replace_page_cache_page() will soon be updated to make the invariant
 * actually true.
 */
struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page)
{
	struct mem_cgroup *memcg;

	rcu_read_lock();

	memcg = page->mem_cgroup;

	if (!memcg || !cgroup_on_dfl(memcg->css.cgroup))
		memcg = root_mem_cgroup;

	rcu_read_unlock();
	return &memcg->css;
}

445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472
/**
 * page_cgroup_ino - return inode number of the memcg a page is charged to
 * @page: the page
 *
 * Look up the closest online ancestor of the memory cgroup @page is charged to
 * and return its inode number or 0 if @page is not charged to any cgroup. It
 * is safe to call this function without holding a reference to @page.
 *
 * Note, this function is inherently racy, because there is nothing to prevent
 * the cgroup inode from getting torn down and potentially reallocated a moment
 * after page_cgroup_ino() returns, so it only should be used by callers that
 * do not care (such as procfs interfaces).
 */
ino_t page_cgroup_ino(struct page *page)
{
	struct mem_cgroup *memcg;
	unsigned long ino = 0;

	rcu_read_lock();
	memcg = READ_ONCE(page->mem_cgroup);
	while (memcg && !(memcg->css.flags & CSS_ONLINE))
		memcg = parent_mem_cgroup(memcg);
	if (memcg)
		ino = cgroup_ino(memcg->css.cgroup);
	rcu_read_unlock();
	return ino;
}

473
static struct mem_cgroup_per_zone *
474
mem_cgroup_page_zoneinfo(struct mem_cgroup *memcg, struct page *page)
475
{
476 477
	int nid = page_to_nid(page);
	int zid = page_zonenum(page);
478

479
	return &memcg->nodeinfo[nid]->zoneinfo[zid];
480 481
}

482 483 484 485 486 487 488 489 490 491 492 493 494 495 496
static struct mem_cgroup_tree_per_zone *
soft_limit_tree_node_zone(int nid, int zid)
{
	return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
}

static struct mem_cgroup_tree_per_zone *
soft_limit_tree_from_page(struct page *page)
{
	int nid = page_to_nid(page);
	int zid = page_zonenum(page);

	return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
}

497 498
static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_zone *mz,
					 struct mem_cgroup_tree_per_zone *mctz,
499
					 unsigned long new_usage_in_excess)
500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528
{
	struct rb_node **p = &mctz->rb_root.rb_node;
	struct rb_node *parent = NULL;
	struct mem_cgroup_per_zone *mz_node;

	if (mz->on_tree)
		return;

	mz->usage_in_excess = new_usage_in_excess;
	if (!mz->usage_in_excess)
		return;
	while (*p) {
		parent = *p;
		mz_node = rb_entry(parent, struct mem_cgroup_per_zone,
					tree_node);
		if (mz->usage_in_excess < mz_node->usage_in_excess)
			p = &(*p)->rb_left;
		/*
		 * We can't avoid mem cgroups that are over their soft
		 * limit by the same amount
		 */
		else if (mz->usage_in_excess >= mz_node->usage_in_excess)
			p = &(*p)->rb_right;
	}
	rb_link_node(&mz->tree_node, parent, p);
	rb_insert_color(&mz->tree_node, &mctz->rb_root);
	mz->on_tree = true;
}

529 530
static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_zone *mz,
					 struct mem_cgroup_tree_per_zone *mctz)
531 532 533 534 535 536 537
{
	if (!mz->on_tree)
		return;
	rb_erase(&mz->tree_node, &mctz->rb_root);
	mz->on_tree = false;
}

538 539
static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_zone *mz,
				       struct mem_cgroup_tree_per_zone *mctz)
540
{
541 542 543
	unsigned long flags;

	spin_lock_irqsave(&mctz->lock, flags);
544
	__mem_cgroup_remove_exceeded(mz, mctz);
545
	spin_unlock_irqrestore(&mctz->lock, flags);
546 547
}

548 549 550
static unsigned long soft_limit_excess(struct mem_cgroup *memcg)
{
	unsigned long nr_pages = page_counter_read(&memcg->memory);
551
	unsigned long soft_limit = READ_ONCE(memcg->soft_limit);
552 553 554 555 556 557 558
	unsigned long excess = 0;

	if (nr_pages > soft_limit)
		excess = nr_pages - soft_limit;

	return excess;
}
559 560 561

static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
{
562
	unsigned long excess;
563 564 565
	struct mem_cgroup_per_zone *mz;
	struct mem_cgroup_tree_per_zone *mctz;

566
	mctz = soft_limit_tree_from_page(page);
567 568 569 570 571
	/*
	 * Necessary to update all ancestors when hierarchy is used.
	 * because their event counter is not touched.
	 */
	for (; memcg; memcg = parent_mem_cgroup(memcg)) {
572
		mz = mem_cgroup_page_zoneinfo(memcg, page);
573
		excess = soft_limit_excess(memcg);
574 575 576 577 578
		/*
		 * We have to update the tree if mz is on RB-tree or
		 * mem is over its softlimit.
		 */
		if (excess || mz->on_tree) {
579 580 581
			unsigned long flags;

			spin_lock_irqsave(&mctz->lock, flags);
582 583
			/* if on-tree, remove it */
			if (mz->on_tree)
584
				__mem_cgroup_remove_exceeded(mz, mctz);
585 586 587 588
			/*
			 * Insert again. mz->usage_in_excess will be updated.
			 * If excess is 0, no tree ops.
			 */
589
			__mem_cgroup_insert_exceeded(mz, mctz, excess);
590
			spin_unlock_irqrestore(&mctz->lock, flags);
591 592 593 594 595 596 597
		}
	}
}

static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
{
	struct mem_cgroup_tree_per_zone *mctz;
598 599
	struct mem_cgroup_per_zone *mz;
	int nid, zid;
600

601 602 603 604
	for_each_node(nid) {
		for (zid = 0; zid < MAX_NR_ZONES; zid++) {
			mz = &memcg->nodeinfo[nid]->zoneinfo[zid];
			mctz = soft_limit_tree_node_zone(nid, zid);
605
			mem_cgroup_remove_exceeded(mz, mctz);
606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627
		}
	}
}

static struct mem_cgroup_per_zone *
__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
{
	struct rb_node *rightmost = NULL;
	struct mem_cgroup_per_zone *mz;

retry:
	mz = NULL;
	rightmost = rb_last(&mctz->rb_root);
	if (!rightmost)
		goto done;		/* Nothing to reclaim from */

	mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node);
	/*
	 * Remove the node now but someone else can add it back,
	 * we will to add it back at the end of reclaim to its correct
	 * position in the tree.
	 */
628
	__mem_cgroup_remove_exceeded(mz, mctz);
629
	if (!soft_limit_excess(mz->memcg) ||
630
	    !css_tryget_online(&mz->memcg->css))
631 632 633 634 635 636 637 638 639 640
		goto retry;
done:
	return mz;
}

static struct mem_cgroup_per_zone *
mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
{
	struct mem_cgroup_per_zone *mz;

641
	spin_lock_irq(&mctz->lock);
642
	mz = __mem_cgroup_largest_soft_limit_node(mctz);
643
	spin_unlock_irq(&mctz->lock);
644 645 646
	return mz;
}

647
/*
648 649
 * Return page count for single (non recursive) @memcg.
 *
650 651 652 653 654
 * Implementation Note: reading percpu statistics for memcg.
 *
 * Both of vmstat[] and percpu_counter has threshold and do periodic
 * synchronization to implement "quick" read. There are trade-off between
 * reading cost and precision of value. Then, we may have a chance to implement
655
 * a periodic synchronization of counter in memcg's counter.
656 657 658 659 660 661 662 663 664
 *
 * But this _read() function is used for user interface now. The user accounts
 * memory usage by memory cgroup and he _always_ requires exact value because
 * he accounts memory. Even if we provide quick-and-fuzzy read, we always
 * have to visit all online cpus and make sum. So, for now, unnecessary
 * synchronization is not implemented. (just implemented for cpu hotplug)
 *
 * If there are kernel internal actions which can make use of some not-exact
 * value, and reading all cpu value can be performance bottleneck in some
665
 * common workload, threshold and synchronization as vmstat[] should be
666 667
 * implemented.
 */
668 669
static unsigned long
mem_cgroup_read_stat(struct mem_cgroup *memcg, enum mem_cgroup_stat_index idx)
670
{
671
	long val = 0;
672 673
	int cpu;

674
	/* Per-cpu values can be negative, use a signed accumulator */
675
	for_each_possible_cpu(cpu)
676
		val += per_cpu(memcg->stat->count[idx], cpu);
677 678 679 680 681 682
	/*
	 * Summing races with updates, so val may be negative.  Avoid exposing
	 * transient negative values.
	 */
	if (val < 0)
		val = 0;
683 684 685
	return val;
}

686
static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg,
687 688 689 690 691
					    enum mem_cgroup_events_index idx)
{
	unsigned long val = 0;
	int cpu;

692
	for_each_possible_cpu(cpu)
693
		val += per_cpu(memcg->stat->events[idx], cpu);
694 695 696
	return val;
}

697
static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
698
					 struct page *page,
699
					 int nr_pages)
700
{
701 702 703 704
	/*
	 * Here, RSS means 'mapped anon' and anon's SwapCache. Shmem/tmpfs is
	 * counted as CACHE even if it's on ANON LRU.
	 */
705
	if (PageAnon(page))
706
		__this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS],
707
				nr_pages);
708
	else
709
		__this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE],
710
				nr_pages);
711

712 713 714 715
	if (PageTransHuge(page))
		__this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE],
				nr_pages);

716 717
	/* pagein of a big page is an event. So, ignore page size */
	if (nr_pages > 0)
718
		__this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGIN]);
719
	else {
720
		__this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]);
721 722
		nr_pages = -nr_pages; /* for event */
	}
723

724
	__this_cpu_add(memcg->stat->nr_page_events, nr_pages);
725 726
}

727 728 729
static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
						  int nid,
						  unsigned int lru_mask)
730
{
731
	unsigned long nr = 0;
732 733
	int zid;

734
	VM_BUG_ON((unsigned)nid >= nr_node_ids);
735

736 737 738 739 740 741 742 743 744 745 746 747
	for (zid = 0; zid < MAX_NR_ZONES; zid++) {
		struct mem_cgroup_per_zone *mz;
		enum lru_list lru;

		for_each_lru(lru) {
			if (!(BIT(lru) & lru_mask))
				continue;
			mz = &memcg->nodeinfo[nid]->zoneinfo[zid];
			nr += mz->lru_size[lru];
		}
	}
	return nr;
748
}
749

750
static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
751
			unsigned int lru_mask)
752
{
753
	unsigned long nr = 0;
754
	int nid;
755

756
	for_each_node_state(nid, N_MEMORY)
757 758
		nr += mem_cgroup_node_nr_lru_pages(memcg, nid, lru_mask);
	return nr;
759 760
}

761 762
static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
				       enum mem_cgroup_events_target target)
763 764 765
{
	unsigned long val, next;

766
	val = __this_cpu_read(memcg->stat->nr_page_events);
767
	next = __this_cpu_read(memcg->stat->targets[target]);
768
	/* from time_after() in jiffies.h */
769 770 771 772 773
	if ((long)next - (long)val < 0) {
		switch (target) {
		case MEM_CGROUP_TARGET_THRESH:
			next = val + THRESHOLDS_EVENTS_TARGET;
			break;
774 775 776
		case MEM_CGROUP_TARGET_SOFTLIMIT:
			next = val + SOFTLIMIT_EVENTS_TARGET;
			break;
777 778 779 780 781 782 783 784
		case MEM_CGROUP_TARGET_NUMAINFO:
			next = val + NUMAINFO_EVENTS_TARGET;
			break;
		default:
			break;
		}
		__this_cpu_write(memcg->stat->targets[target], next);
		return true;
785
	}
786
	return false;
787 788 789 790 791 792
}

/*
 * Check events in order.
 *
 */
793
static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
794 795
{
	/* threshold event is triggered in finer grain than soft limit */
796 797
	if (unlikely(mem_cgroup_event_ratelimit(memcg,
						MEM_CGROUP_TARGET_THRESH))) {
798
		bool do_softlimit;
799
		bool do_numainfo __maybe_unused;
800

801 802
		do_softlimit = mem_cgroup_event_ratelimit(memcg,
						MEM_CGROUP_TARGET_SOFTLIMIT);
803 804 805 806
#if MAX_NUMNODES > 1
		do_numainfo = mem_cgroup_event_ratelimit(memcg,
						MEM_CGROUP_TARGET_NUMAINFO);
#endif
807
		mem_cgroup_threshold(memcg);
808 809
		if (unlikely(do_softlimit))
			mem_cgroup_update_tree(memcg, page);
810
#if MAX_NUMNODES > 1
811
		if (unlikely(do_numainfo))
812
			atomic_inc(&memcg->numainfo_events);
813
#endif
814
	}
815 816
}

817
struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
818
{
819 820 821 822 823 824 825 826
	/*
	 * mm_update_next_owner() may clear mm->owner to NULL
	 * if it races with swapoff, page migration, etc.
	 * So this can be called with p == NULL.
	 */
	if (unlikely(!p))
		return NULL;

827
	return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
828
}
M
Michal Hocko 已提交
829
EXPORT_SYMBOL(mem_cgroup_from_task);
830

831
static struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
832
{
833
	struct mem_cgroup *memcg = NULL;
834

835 836
	rcu_read_lock();
	do {
837 838 839 840 841 842
		/*
		 * Page cache insertions can happen withou an
		 * actual mm context, e.g. during disk probing
		 * on boot, loopback IO, acct() writes etc.
		 */
		if (unlikely(!mm))
843
			memcg = root_mem_cgroup;
844 845 846 847 848
		else {
			memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
			if (unlikely(!memcg))
				memcg = root_mem_cgroup;
		}
849
	} while (!css_tryget_online(&memcg->css));
850
	rcu_read_unlock();
851
	return memcg;
852 853
}

854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870
/**
 * mem_cgroup_iter - iterate over memory cgroup hierarchy
 * @root: hierarchy root
 * @prev: previously returned memcg, NULL on first invocation
 * @reclaim: cookie for shared reclaim walks, NULL for full walks
 *
 * Returns references to children of the hierarchy below @root, or
 * @root itself, or %NULL after a full round-trip.
 *
 * Caller must pass the return value in @prev on subsequent
 * invocations for reference counting, or use mem_cgroup_iter_break()
 * to cancel a hierarchy walk before the round-trip is complete.
 *
 * Reclaimers can specify a zone and a priority level in @reclaim to
 * divide up the memcgs in the hierarchy among all concurrent
 * reclaimers operating on the same zone and priority.
 */
871
struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
872
				   struct mem_cgroup *prev,
873
				   struct mem_cgroup_reclaim_cookie *reclaim)
K
KAMEZAWA Hiroyuki 已提交
874
{
M
Michal Hocko 已提交
875
	struct mem_cgroup_reclaim_iter *uninitialized_var(iter);
876
	struct cgroup_subsys_state *css = NULL;
877
	struct mem_cgroup *memcg = NULL;
878
	struct mem_cgroup *pos = NULL;
879

880 881
	if (mem_cgroup_disabled())
		return NULL;
882

883 884
	if (!root)
		root = root_mem_cgroup;
K
KAMEZAWA Hiroyuki 已提交
885

886
	if (prev && !reclaim)
887
		pos = prev;
K
KAMEZAWA Hiroyuki 已提交
888

889 890
	if (!root->use_hierarchy && root != root_mem_cgroup) {
		if (prev)
891
			goto out;
892
		return root;
893
	}
K
KAMEZAWA Hiroyuki 已提交
894

895
	rcu_read_lock();
M
Michal Hocko 已提交
896

897 898 899 900 901 902 903 904 905 906
	if (reclaim) {
		struct mem_cgroup_per_zone *mz;

		mz = mem_cgroup_zone_zoneinfo(root, reclaim->zone);
		iter = &mz->iter[reclaim->priority];

		if (prev && reclaim->generation != iter->generation)
			goto out_unlock;

		do {
907
			pos = READ_ONCE(iter->position);
908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930
			/*
			 * A racing update may change the position and
			 * put the last reference, hence css_tryget(),
			 * or retry to see the updated position.
			 */
		} while (pos && !css_tryget(&pos->css));
	}

	if (pos)
		css = &pos->css;

	for (;;) {
		css = css_next_descendant_pre(css, &root->css);
		if (!css) {
			/*
			 * Reclaimers share the hierarchy walk, and a
			 * new one might jump in right at the end of
			 * the hierarchy - make sure they see at least
			 * one group and restart from the beginning.
			 */
			if (!prev)
				continue;
			break;
931
		}
K
KAMEZAWA Hiroyuki 已提交
932

933 934 935 936 937 938
		/*
		 * Verify the css and acquire a reference.  The root
		 * is provided by the caller, so we know it's alive
		 * and kicking, and don't take an extra reference.
		 */
		memcg = mem_cgroup_from_css(css);
K
KAMEZAWA Hiroyuki 已提交
939

940 941
		if (css == &root->css)
			break;
K
KAMEZAWA Hiroyuki 已提交
942

943
		if (css_tryget(css)) {
944 945 946 947 948 949 950
			/*
			 * Make sure the memcg is initialized:
			 * mem_cgroup_css_online() orders the the
			 * initialization against setting the flag.
			 */
			if (smp_load_acquire(&memcg->initialized))
				break;
951

952
			css_put(css);
953
		}
954

955
		memcg = NULL;
956
	}
957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976

	if (reclaim) {
		if (cmpxchg(&iter->position, pos, memcg) == pos) {
			if (memcg)
				css_get(&memcg->css);
			if (pos)
				css_put(&pos->css);
		}

		/*
		 * pairs with css_tryget when dereferencing iter->position
		 * above.
		 */
		if (pos)
			css_put(&pos->css);

		if (!memcg)
			iter->generation++;
		else if (!prev)
			reclaim->generation = iter->generation;
977
	}
978

979 980
out_unlock:
	rcu_read_unlock();
981
out:
982 983 984
	if (prev && prev != root)
		css_put(&prev->css);

985
	return memcg;
K
KAMEZAWA Hiroyuki 已提交
986
}
K
KAMEZAWA Hiroyuki 已提交
987

988 989 990 991 992 993 994
/**
 * mem_cgroup_iter_break - abort a hierarchy walk prematurely
 * @root: hierarchy root
 * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
 */
void mem_cgroup_iter_break(struct mem_cgroup *root,
			   struct mem_cgroup *prev)
995 996 997 998 999 1000
{
	if (!root)
		root = root_mem_cgroup;
	if (prev && prev != root)
		css_put(&prev->css);
}
K
KAMEZAWA Hiroyuki 已提交
1001

1002 1003 1004 1005 1006 1007
/*
 * Iteration constructs for visiting all cgroups (under a tree).  If
 * loops are exited prematurely (break), mem_cgroup_iter_break() must
 * be used for reference counting.
 */
#define for_each_mem_cgroup_tree(iter, root)		\
1008
	for (iter = mem_cgroup_iter(root, NULL, NULL);	\
1009
	     iter != NULL;				\
1010
	     iter = mem_cgroup_iter(root, iter, NULL))
1011

1012
#define for_each_mem_cgroup(iter)			\
1013
	for (iter = mem_cgroup_iter(NULL, NULL, NULL);	\
1014
	     iter != NULL;				\
1015
	     iter = mem_cgroup_iter(NULL, iter, NULL))
K
KAMEZAWA Hiroyuki 已提交
1016

1017 1018 1019
/**
 * mem_cgroup_zone_lruvec - get the lru list vector for a zone and memcg
 * @zone: zone of the wanted lruvec
1020
 * @memcg: memcg of the wanted lruvec
1021 1022 1023 1024 1025 1026 1027 1028 1029
 *
 * Returns the lru list vector holding pages for the given @zone and
 * @mem.  This can be the global zone lruvec, if the memory controller
 * is disabled.
 */
struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
				      struct mem_cgroup *memcg)
{
	struct mem_cgroup_per_zone *mz;
1030
	struct lruvec *lruvec;
1031

1032 1033 1034 1035
	if (mem_cgroup_disabled()) {
		lruvec = &zone->lruvec;
		goto out;
	}
1036

1037
	mz = mem_cgroup_zone_zoneinfo(memcg, zone);
1038 1039 1040 1041 1042 1043 1044 1045 1046 1047
	lruvec = &mz->lruvec;
out:
	/*
	 * Since a node can be onlined after the mem_cgroup was created,
	 * we have to be prepared to initialize lruvec->zone here;
	 * and if offlined then reonlined, we need to reinitialize it.
	 */
	if (unlikely(lruvec->zone != zone))
		lruvec->zone = zone;
	return lruvec;
1048 1049 1050
}

/**
1051
 * mem_cgroup_page_lruvec - return lruvec for isolating/putting an LRU page
1052
 * @page: the page
1053
 * @zone: zone of the page
1054 1055 1056 1057
 *
 * This function is only safe when following the LRU page isolation
 * and putback protocol: the LRU lock must be held, and the page must
 * either be PageLRU() or the caller must have isolated/allocated it.
1058
 */
1059
struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone)
K
KAMEZAWA Hiroyuki 已提交
1060 1061
{
	struct mem_cgroup_per_zone *mz;
1062
	struct mem_cgroup *memcg;
1063
	struct lruvec *lruvec;
1064

1065 1066 1067 1068
	if (mem_cgroup_disabled()) {
		lruvec = &zone->lruvec;
		goto out;
	}
1069

1070
	memcg = page->mem_cgroup;
1071
	/*
1072
	 * Swapcache readahead pages are added to the LRU - and
1073
	 * possibly migrated - before they are charged.
1074
	 */
1075 1076
	if (!memcg)
		memcg = root_mem_cgroup;
1077

1078
	mz = mem_cgroup_page_zoneinfo(memcg, page);
1079 1080 1081 1082 1083 1084 1085 1086 1087 1088
	lruvec = &mz->lruvec;
out:
	/*
	 * Since a node can be onlined after the mem_cgroup was created,
	 * we have to be prepared to initialize lruvec->zone here;
	 * and if offlined then reonlined, we need to reinitialize it.
	 */
	if (unlikely(lruvec->zone != zone))
		lruvec->zone = zone;
	return lruvec;
K
KAMEZAWA Hiroyuki 已提交
1089
}
1090

1091
/**
1092 1093 1094 1095
 * mem_cgroup_update_lru_size - account for adding or removing an lru page
 * @lruvec: mem_cgroup per zone lru vector
 * @lru: index of lru list the page is sitting on
 * @nr_pages: positive when adding or negative when removing
1096
 *
1097 1098
 * This function must be called when a page is added to or removed from an
 * lru list.
1099
 */
1100 1101
void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
				int nr_pages)
1102 1103
{
	struct mem_cgroup_per_zone *mz;
1104
	unsigned long *lru_size;
1105 1106 1107 1108

	if (mem_cgroup_disabled())
		return;

1109 1110 1111 1112
	mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
	lru_size = mz->lru_size + lru;
	*lru_size += nr_pages;
	VM_BUG_ON((long)(*lru_size) < 0);
K
KAMEZAWA Hiroyuki 已提交
1113
}
1114

1115
bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg)
1116
{
1117
	struct mem_cgroup *task_memcg;
1118
	struct task_struct *p;
1119
	bool ret;
1120

1121
	p = find_lock_task_mm(task);
1122
	if (p) {
1123
		task_memcg = get_mem_cgroup_from_mm(p->mm);
1124 1125 1126 1127 1128 1129 1130
		task_unlock(p);
	} else {
		/*
		 * All threads may have already detached their mm's, but the oom
		 * killer still needs to detect if they have already been oom
		 * killed to prevent needlessly killing additional tasks.
		 */
1131
		rcu_read_lock();
1132 1133
		task_memcg = mem_cgroup_from_task(task);
		css_get(&task_memcg->css);
1134
		rcu_read_unlock();
1135
	}
1136 1137
	ret = mem_cgroup_is_descendant(task_memcg, memcg);
	css_put(&task_memcg->css);
1138 1139 1140
	return ret;
}

1141
#define mem_cgroup_from_counter(counter, member)	\
1142 1143
	container_of(counter, struct mem_cgroup, member)

1144
/**
1145
 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
W
Wanpeng Li 已提交
1146
 * @memcg: the memory cgroup
1147
 *
1148
 * Returns the maximum amount of memory @mem can be charged with, in
1149
 * pages.
1150
 */
1151
static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1152
{
1153 1154 1155
	unsigned long margin = 0;
	unsigned long count;
	unsigned long limit;
1156

1157
	count = page_counter_read(&memcg->memory);
1158
	limit = READ_ONCE(memcg->memory.limit);
1159 1160 1161 1162 1163
	if (count < limit)
		margin = limit - count;

	if (do_swap_account) {
		count = page_counter_read(&memcg->memsw);
1164
		limit = READ_ONCE(memcg->memsw.limit);
1165 1166 1167 1168 1169
		if (count <= limit)
			margin = min(margin, limit - count);
	}

	return margin;
1170 1171
}

1172
/*
Q
Qiang Huang 已提交
1173
 * A routine for checking "mem" is under move_account() or not.
1174
 *
Q
Qiang Huang 已提交
1175 1176 1177
 * Checking a cgroup is mc.from or mc.to or under hierarchy of
 * moving cgroups. This is for waiting at high-memory pressure
 * caused by "move".
1178
 */
1179
static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
1180
{
1181 1182
	struct mem_cgroup *from;
	struct mem_cgroup *to;
1183
	bool ret = false;
1184 1185 1186 1187 1188 1189 1190 1191 1192
	/*
	 * Unlike task_move routines, we access mc.to, mc.from not under
	 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
	 */
	spin_lock(&mc.lock);
	from = mc.from;
	to = mc.to;
	if (!from)
		goto unlock;
1193

1194 1195
	ret = mem_cgroup_is_descendant(from, memcg) ||
		mem_cgroup_is_descendant(to, memcg);
1196 1197
unlock:
	spin_unlock(&mc.lock);
1198 1199 1200
	return ret;
}

1201
static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
1202 1203
{
	if (mc.moving_task && current != mc.moving_task) {
1204
		if (mem_cgroup_under_move(memcg)) {
1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216
			DEFINE_WAIT(wait);
			prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
			/* moving charge context might have finished. */
			if (mc.moving_task)
				schedule();
			finish_wait(&mc.waitq, &wait);
			return true;
		}
	}
	return false;
}

1217
#define K(x) ((x) << (PAGE_SHIFT-10))
1218
/**
1219
 * mem_cgroup_print_oom_info: Print OOM information relevant to memory controller.
1220 1221 1222 1223 1224 1225 1226 1227
 * @memcg: The memory cgroup that went over limit
 * @p: Task that is going to be killed
 *
 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
 * enabled
 */
void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
{
T
Tejun Heo 已提交
1228
	/* oom_info_lock ensures that parallel ooms do not interleave */
1229
	static DEFINE_MUTEX(oom_info_lock);
1230 1231
	struct mem_cgroup *iter;
	unsigned int i;
1232

1233
	mutex_lock(&oom_info_lock);
1234 1235
	rcu_read_lock();

1236 1237 1238 1239 1240 1241 1242 1243
	if (p) {
		pr_info("Task in ");
		pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
		pr_cont(" killed as a result of limit of ");
	} else {
		pr_info("Memory limit reached of cgroup ");
	}

T
Tejun Heo 已提交
1244
	pr_cont_cgroup_path(memcg->css.cgroup);
1245
	pr_cont("\n");
1246 1247 1248

	rcu_read_unlock();

1249 1250 1251 1252 1253 1254 1255 1256 1257
	pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
		K((u64)page_counter_read(&memcg->memory)),
		K((u64)memcg->memory.limit), memcg->memory.failcnt);
	pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
		K((u64)page_counter_read(&memcg->memsw)),
		K((u64)memcg->memsw.limit), memcg->memsw.failcnt);
	pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
		K((u64)page_counter_read(&memcg->kmem)),
		K((u64)memcg->kmem.limit), memcg->kmem.failcnt);
1258 1259

	for_each_mem_cgroup_tree(iter, memcg) {
T
Tejun Heo 已提交
1260 1261
		pr_info("Memory cgroup stats for ");
		pr_cont_cgroup_path(iter->css.cgroup);
1262 1263 1264 1265 1266
		pr_cont(":");

		for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
			if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
				continue;
1267
			pr_cont(" %s:%luKB", mem_cgroup_stat_names[i],
1268 1269 1270 1271 1272 1273 1274 1275 1276
				K(mem_cgroup_read_stat(iter, i)));
		}

		for (i = 0; i < NR_LRU_LISTS; i++)
			pr_cont(" %s:%luKB", mem_cgroup_lru_names[i],
				K(mem_cgroup_nr_lru_pages(iter, BIT(i))));

		pr_cont("\n");
	}
1277
	mutex_unlock(&oom_info_lock);
1278 1279
}

1280 1281 1282 1283
/*
 * This function returns the number of memcg under hierarchy tree. Returns
 * 1(self count) if no children.
 */
1284
static int mem_cgroup_count_children(struct mem_cgroup *memcg)
1285 1286
{
	int num = 0;
K
KAMEZAWA Hiroyuki 已提交
1287 1288
	struct mem_cgroup *iter;

1289
	for_each_mem_cgroup_tree(iter, memcg)
K
KAMEZAWA Hiroyuki 已提交
1290
		num++;
1291 1292 1293
	return num;
}

D
David Rientjes 已提交
1294 1295 1296
/*
 * Return the memory (and swap, if configured) limit for a memcg.
 */
1297
static unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg)
D
David Rientjes 已提交
1298
{
1299
	unsigned long limit;
1300

1301
	limit = memcg->memory.limit;
1302
	if (mem_cgroup_swappiness(memcg)) {
1303
		unsigned long memsw_limit;
1304

1305 1306
		memsw_limit = memcg->memsw.limit;
		limit = min(limit + total_swap_pages, memsw_limit);
1307 1308
	}
	return limit;
D
David Rientjes 已提交
1309 1310
}

1311 1312
static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
				     int order)
1313
{
1314 1315 1316 1317 1318 1319
	struct oom_control oc = {
		.zonelist = NULL,
		.nodemask = NULL,
		.gfp_mask = gfp_mask,
		.order = order,
	};
1320 1321 1322 1323 1324 1325
	struct mem_cgroup *iter;
	unsigned long chosen_points = 0;
	unsigned long totalpages;
	unsigned int points = 0;
	struct task_struct *chosen = NULL;

1326 1327
	mutex_lock(&oom_lock);

1328
	/*
1329 1330 1331
	 * If current has a pending SIGKILL or is exiting, then automatically
	 * select it.  The goal is to allow it to allocate so that it may
	 * quickly exit and free its memory.
1332
	 */
1333
	if (fatal_signal_pending(current) || task_will_free_mem(current)) {
1334
		mark_oom_victim(current);
1335
		goto unlock;
1336 1337
	}

1338
	check_panic_on_oom(&oc, CONSTRAINT_MEMCG, memcg);
1339
	totalpages = mem_cgroup_get_limit(memcg) ? : 1;
1340
	for_each_mem_cgroup_tree(iter, memcg) {
1341
		struct css_task_iter it;
1342 1343
		struct task_struct *task;

1344 1345
		css_task_iter_start(&iter->css, &it);
		while ((task = css_task_iter_next(&it))) {
1346
			switch (oom_scan_process_thread(&oc, task, totalpages)) {
1347 1348 1349 1350 1351 1352 1353 1354 1355 1356
			case OOM_SCAN_SELECT:
				if (chosen)
					put_task_struct(chosen);
				chosen = task;
				chosen_points = ULONG_MAX;
				get_task_struct(chosen);
				/* fall through */
			case OOM_SCAN_CONTINUE:
				continue;
			case OOM_SCAN_ABORT:
1357
				css_task_iter_end(&it);
1358 1359 1360
				mem_cgroup_iter_break(memcg, iter);
				if (chosen)
					put_task_struct(chosen);
1361
				goto unlock;
1362 1363 1364 1365
			case OOM_SCAN_OK:
				break;
			};
			points = oom_badness(task, memcg, NULL, totalpages);
1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377
			if (!points || points < chosen_points)
				continue;
			/* Prefer thread group leaders for display purposes */
			if (points == chosen_points &&
			    thread_group_leader(chosen))
				continue;

			if (chosen)
				put_task_struct(chosen);
			chosen = task;
			chosen_points = points;
			get_task_struct(chosen);
1378
		}
1379
		css_task_iter_end(&it);
1380 1381
	}

1382 1383
	if (chosen) {
		points = chosen_points * 1000 / totalpages;
1384 1385
		oom_kill_process(&oc, chosen, points, totalpages, memcg,
				 "Memory cgroup out of memory");
1386 1387 1388
	}
unlock:
	mutex_unlock(&oom_lock);
1389 1390
}

1391 1392
#if MAX_NUMNODES > 1

1393 1394
/**
 * test_mem_cgroup_node_reclaimable
W
Wanpeng Li 已提交
1395
 * @memcg: the target memcg
1396 1397 1398 1399 1400 1401 1402
 * @nid: the node ID to be checked.
 * @noswap : specify true here if the user wants flle only information.
 *
 * This function returns whether the specified memcg contains any
 * reclaimable pages on a node. Returns true if there are any reclaimable
 * pages in the node.
 */
1403
static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg,
1404 1405
		int nid, bool noswap)
{
1406
	if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_FILE))
1407 1408 1409
		return true;
	if (noswap || !total_swap_pages)
		return false;
1410
	if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_ANON))
1411 1412 1413 1414
		return true;
	return false;

}
1415 1416 1417 1418 1419 1420 1421

/*
 * Always updating the nodemask is not very good - even if we have an empty
 * list or the wrong list here, we can start from some node and traverse all
 * nodes based on the zonelist. So update the list loosely once per 10 secs.
 *
 */
1422
static void mem_cgroup_may_update_nodemask(struct mem_cgroup *memcg)
1423 1424
{
	int nid;
1425 1426 1427 1428
	/*
	 * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET
	 * pagein/pageout changes since the last update.
	 */
1429
	if (!atomic_read(&memcg->numainfo_events))
1430
		return;
1431
	if (atomic_inc_return(&memcg->numainfo_updating) > 1)
1432 1433 1434
		return;

	/* make a nodemask where this memcg uses memory from */
1435
	memcg->scan_nodes = node_states[N_MEMORY];
1436

1437
	for_each_node_mask(nid, node_states[N_MEMORY]) {
1438

1439 1440
		if (!test_mem_cgroup_node_reclaimable(memcg, nid, false))
			node_clear(nid, memcg->scan_nodes);
1441
	}
1442

1443 1444
	atomic_set(&memcg->numainfo_events, 0);
	atomic_set(&memcg->numainfo_updating, 0);
1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458
}

/*
 * Selecting a node where we start reclaim from. Because what we need is just
 * reducing usage counter, start from anywhere is O,K. Considering
 * memory reclaim from current node, there are pros. and cons.
 *
 * Freeing memory from current node means freeing memory from a node which
 * we'll use or we've used. So, it may make LRU bad. And if several threads
 * hit limits, it will see a contention on a node. But freeing from remote
 * node means more costs for memory reclaim because of memory latency.
 *
 * Now, we use round-robin. Better algorithm is welcomed.
 */
1459
int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
1460 1461 1462
{
	int node;

1463 1464
	mem_cgroup_may_update_nodemask(memcg);
	node = memcg->last_scanned_node;
1465

1466
	node = next_node(node, memcg->scan_nodes);
1467
	if (node == MAX_NUMNODES)
1468
		node = first_node(memcg->scan_nodes);
1469 1470 1471 1472 1473 1474 1475 1476 1477
	/*
	 * We call this when we hit limit, not when pages are added to LRU.
	 * No LRU may hold pages because all pages are UNEVICTABLE or
	 * memcg is too small and all pages are not on LRU. In that case,
	 * we use curret node.
	 */
	if (unlikely(node == MAX_NUMNODES))
		node = numa_node_id();

1478
	memcg->last_scanned_node = node;
1479 1480 1481
	return node;
}
#else
1482
int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
1483 1484 1485 1486 1487
{
	return 0;
}
#endif

1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502
static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
				   struct zone *zone,
				   gfp_t gfp_mask,
				   unsigned long *total_scanned)
{
	struct mem_cgroup *victim = NULL;
	int total = 0;
	int loop = 0;
	unsigned long excess;
	unsigned long nr_scanned;
	struct mem_cgroup_reclaim_cookie reclaim = {
		.zone = zone,
		.priority = 0,
	};

1503
	excess = soft_limit_excess(root_memcg);
1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531

	while (1) {
		victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
		if (!victim) {
			loop++;
			if (loop >= 2) {
				/*
				 * If we have not been able to reclaim
				 * anything, it might because there are
				 * no reclaimable pages under this hierarchy
				 */
				if (!total)
					break;
				/*
				 * We want to do more targeted reclaim.
				 * excess >> 2 is not to excessive so as to
				 * reclaim too much, nor too less that we keep
				 * coming back to reclaim from this cgroup
				 */
				if (total >= (excess >> 2) ||
					(loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
					break;
			}
			continue;
		}
		total += mem_cgroup_shrink_node_zone(victim, gfp_mask, false,
						     zone, &nr_scanned);
		*total_scanned += nr_scanned;
1532
		if (!soft_limit_excess(root_memcg))
1533
			break;
1534
	}
1535 1536
	mem_cgroup_iter_break(root_memcg, victim);
	return total;
1537 1538
}

1539 1540 1541 1542 1543 1544
#ifdef CONFIG_LOCKDEP
static struct lockdep_map memcg_oom_lock_dep_map = {
	.name = "memcg_oom_lock",
};
#endif

1545 1546
static DEFINE_SPINLOCK(memcg_oom_lock);

K
KAMEZAWA Hiroyuki 已提交
1547 1548 1549 1550
/*
 * Check OOM-Killer is already running under our hierarchy.
 * If someone is running, return false.
 */
1551
static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
K
KAMEZAWA Hiroyuki 已提交
1552
{
1553
	struct mem_cgroup *iter, *failed = NULL;
1554

1555 1556
	spin_lock(&memcg_oom_lock);

1557
	for_each_mem_cgroup_tree(iter, memcg) {
1558
		if (iter->oom_lock) {
1559 1560 1561 1562 1563
			/*
			 * this subtree of our hierarchy is already locked
			 * so we cannot give a lock.
			 */
			failed = iter;
1564 1565
			mem_cgroup_iter_break(memcg, iter);
			break;
1566 1567
		} else
			iter->oom_lock = true;
K
KAMEZAWA Hiroyuki 已提交
1568
	}
K
KAMEZAWA Hiroyuki 已提交
1569

1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580
	if (failed) {
		/*
		 * OK, we failed to lock the whole subtree so we have
		 * to clean up what we set up to the failing subtree
		 */
		for_each_mem_cgroup_tree(iter, memcg) {
			if (iter == failed) {
				mem_cgroup_iter_break(memcg, iter);
				break;
			}
			iter->oom_lock = false;
1581
		}
1582 1583
	} else
		mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
1584 1585 1586 1587

	spin_unlock(&memcg_oom_lock);

	return !failed;
1588
}
1589

1590
static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
1591
{
K
KAMEZAWA Hiroyuki 已提交
1592 1593
	struct mem_cgroup *iter;

1594
	spin_lock(&memcg_oom_lock);
1595
	mutex_release(&memcg_oom_lock_dep_map, 1, _RET_IP_);
1596
	for_each_mem_cgroup_tree(iter, memcg)
1597
		iter->oom_lock = false;
1598
	spin_unlock(&memcg_oom_lock);
1599 1600
}

1601
static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
1602 1603 1604
{
	struct mem_cgroup *iter;

1605
	spin_lock(&memcg_oom_lock);
1606
	for_each_mem_cgroup_tree(iter, memcg)
1607 1608
		iter->under_oom++;
	spin_unlock(&memcg_oom_lock);
1609 1610
}

1611
static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
1612 1613 1614
{
	struct mem_cgroup *iter;

K
KAMEZAWA Hiroyuki 已提交
1615 1616
	/*
	 * When a new child is created while the hierarchy is under oom,
1617
	 * mem_cgroup_oom_lock() may not be called. Watch for underflow.
K
KAMEZAWA Hiroyuki 已提交
1618
	 */
1619
	spin_lock(&memcg_oom_lock);
1620
	for_each_mem_cgroup_tree(iter, memcg)
1621 1622 1623
		if (iter->under_oom > 0)
			iter->under_oom--;
	spin_unlock(&memcg_oom_lock);
1624 1625
}

K
KAMEZAWA Hiroyuki 已提交
1626 1627
static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);

K
KAMEZAWA Hiroyuki 已提交
1628
struct oom_wait_info {
1629
	struct mem_cgroup *memcg;
K
KAMEZAWA Hiroyuki 已提交
1630 1631 1632 1633 1634 1635
	wait_queue_t	wait;
};

static int memcg_oom_wake_function(wait_queue_t *wait,
	unsigned mode, int sync, void *arg)
{
1636 1637
	struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
	struct mem_cgroup *oom_wait_memcg;
K
KAMEZAWA Hiroyuki 已提交
1638 1639 1640
	struct oom_wait_info *oom_wait_info;

	oom_wait_info = container_of(wait, struct oom_wait_info, wait);
1641
	oom_wait_memcg = oom_wait_info->memcg;
K
KAMEZAWA Hiroyuki 已提交
1642

1643 1644
	if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) &&
	    !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg))
K
KAMEZAWA Hiroyuki 已提交
1645 1646 1647 1648
		return 0;
	return autoremove_wake_function(wait, mode, sync, arg);
}

1649
static void memcg_oom_recover(struct mem_cgroup *memcg)
1650
{
1651 1652 1653 1654 1655 1656 1657 1658 1659
	/*
	 * For the following lockless ->under_oom test, the only required
	 * guarantee is that it must see the state asserted by an OOM when
	 * this function is called as a result of userland actions
	 * triggered by the notification of the OOM.  This is trivially
	 * achieved by invoking mem_cgroup_mark_under_oom() before
	 * triggering notification.
	 */
	if (memcg && memcg->under_oom)
1660
		__wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
1661 1662
}

1663
static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
1664
{
T
Tejun Heo 已提交
1665
	if (!current->memcg_may_oom)
1666
		return;
K
KAMEZAWA Hiroyuki 已提交
1667
	/*
1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679
	 * We are in the middle of the charge context here, so we
	 * don't want to block when potentially sitting on a callstack
	 * that holds all kinds of filesystem and mm locks.
	 *
	 * Also, the caller may handle a failed allocation gracefully
	 * (like optional page cache readahead) and so an OOM killer
	 * invocation might not even be necessary.
	 *
	 * That's why we don't do anything here except remember the
	 * OOM context and then deal with it at the end of the page
	 * fault when the stack is unwound, the locks are released,
	 * and when we know whether the fault was overall successful.
K
KAMEZAWA Hiroyuki 已提交
1680
	 */
1681
	css_get(&memcg->css);
T
Tejun Heo 已提交
1682 1683 1684
	current->memcg_in_oom = memcg;
	current->memcg_oom_gfp_mask = mask;
	current->memcg_oom_order = order;
1685 1686 1687 1688
}

/**
 * mem_cgroup_oom_synchronize - complete memcg OOM handling
1689
 * @handle: actually kill/wait or just clean up the OOM state
1690
 *
1691 1692
 * This has to be called at the end of a page fault if the memcg OOM
 * handler was enabled.
1693
 *
1694
 * Memcg supports userspace OOM handling where failed allocations must
1695 1696 1697 1698
 * sleep on a waitqueue until the userspace task resolves the
 * situation.  Sleeping directly in the charge context with all kinds
 * of locks held is not a good idea, instead we remember an OOM state
 * in the task and mem_cgroup_oom_synchronize() has to be called at
1699
 * the end of the page fault to complete the OOM handling.
1700 1701
 *
 * Returns %true if an ongoing memcg OOM situation was detected and
1702
 * completed, %false otherwise.
1703
 */
1704
bool mem_cgroup_oom_synchronize(bool handle)
1705
{
T
Tejun Heo 已提交
1706
	struct mem_cgroup *memcg = current->memcg_in_oom;
1707
	struct oom_wait_info owait;
1708
	bool locked;
1709 1710 1711

	/* OOM is global, do not handle */
	if (!memcg)
1712
		return false;
1713

1714
	if (!handle || oom_killer_disabled)
1715
		goto cleanup;
1716 1717 1718 1719 1720 1721

	owait.memcg = memcg;
	owait.wait.flags = 0;
	owait.wait.func = memcg_oom_wake_function;
	owait.wait.private = current;
	INIT_LIST_HEAD(&owait.wait.task_list);
K
KAMEZAWA Hiroyuki 已提交
1722

1723
	prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
1724 1725 1726 1727 1728 1729 1730 1731 1732 1733
	mem_cgroup_mark_under_oom(memcg);

	locked = mem_cgroup_oom_trylock(memcg);

	if (locked)
		mem_cgroup_oom_notify(memcg);

	if (locked && !memcg->oom_kill_disable) {
		mem_cgroup_unmark_under_oom(memcg);
		finish_wait(&memcg_oom_waitq, &owait.wait);
T
Tejun Heo 已提交
1734 1735
		mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask,
					 current->memcg_oom_order);
1736
	} else {
1737
		schedule();
1738 1739 1740 1741 1742
		mem_cgroup_unmark_under_oom(memcg);
		finish_wait(&memcg_oom_waitq, &owait.wait);
	}

	if (locked) {
1743 1744 1745 1746 1747 1748 1749 1750
		mem_cgroup_oom_unlock(memcg);
		/*
		 * There is no guarantee that an OOM-lock contender
		 * sees the wakeups triggered by the OOM kill
		 * uncharges.  Wake any sleepers explicitely.
		 */
		memcg_oom_recover(memcg);
	}
1751
cleanup:
T
Tejun Heo 已提交
1752
	current->memcg_in_oom = NULL;
1753
	css_put(&memcg->css);
K
KAMEZAWA Hiroyuki 已提交
1754
	return true;
1755 1756
}

1757 1758 1759
/**
 * mem_cgroup_begin_page_stat - begin a page state statistics transaction
 * @page: page that is going to change accounted state
1760
 *
1761 1762 1763
 * This function must mark the beginning of an accounted page state
 * change to prevent double accounting when the page is concurrently
 * being moved to another memcg:
1764
 *
1765
 *   memcg = mem_cgroup_begin_page_stat(page);
1766 1767
 *   if (TestClearPageState(page))
 *     mem_cgroup_update_page_stat(memcg, state, -1);
1768
 *   mem_cgroup_end_page_stat(memcg);
1769
 */
1770
struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page)
1771 1772
{
	struct mem_cgroup *memcg;
1773
	unsigned long flags;
1774

1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786
	/*
	 * The RCU lock is held throughout the transaction.  The fast
	 * path can get away without acquiring the memcg->move_lock
	 * because page moving starts with an RCU grace period.
	 *
	 * The RCU lock also protects the memcg from being freed when
	 * the page state that is going to change is the only thing
	 * preventing the page from being uncharged.
	 * E.g. end-writeback clearing PageWriteback(), which allows
	 * migration to go ahead and uncharge the page before the
	 * account transaction might be complete.
	 */
1787 1788 1789 1790
	rcu_read_lock();

	if (mem_cgroup_disabled())
		return NULL;
1791
again:
1792
	memcg = page->mem_cgroup;
1793
	if (unlikely(!memcg))
1794 1795
		return NULL;

Q
Qiang Huang 已提交
1796
	if (atomic_read(&memcg->moving_account) <= 0)
1797
		return memcg;
1798

1799
	spin_lock_irqsave(&memcg->move_lock, flags);
1800
	if (memcg != page->mem_cgroup) {
1801
		spin_unlock_irqrestore(&memcg->move_lock, flags);
1802 1803
		goto again;
	}
1804 1805 1806 1807 1808 1809 1810 1811

	/*
	 * When charge migration first begins, we can have locked and
	 * unlocked page stat updates happening concurrently.  Track
	 * the task who has the lock for mem_cgroup_end_page_stat().
	 */
	memcg->move_lock_task = current;
	memcg->move_lock_flags = flags;
1812 1813

	return memcg;
1814
}
1815
EXPORT_SYMBOL(mem_cgroup_begin_page_stat);
1816

1817 1818 1819 1820
/**
 * mem_cgroup_end_page_stat - finish a page state statistics transaction
 * @memcg: the memcg that was accounted against
 */
1821
void mem_cgroup_end_page_stat(struct mem_cgroup *memcg)
1822
{
1823 1824 1825 1826 1827 1828 1829 1830
	if (memcg && memcg->move_lock_task == current) {
		unsigned long flags = memcg->move_lock_flags;

		memcg->move_lock_task = NULL;
		memcg->move_lock_flags = 0;

		spin_unlock_irqrestore(&memcg->move_lock, flags);
	}
1831

1832
	rcu_read_unlock();
1833
}
1834
EXPORT_SYMBOL(mem_cgroup_end_page_stat);
1835

1836 1837 1838 1839
/*
 * size of first charge trial. "32" comes from vmscan.c's magic value.
 * TODO: maybe necessary to use big numbers in big irons.
 */
1840
#define CHARGE_BATCH	32U
1841 1842
struct memcg_stock_pcp {
	struct mem_cgroup *cached; /* this never be root cgroup */
1843
	unsigned int nr_pages;
1844
	struct work_struct work;
1845
	unsigned long flags;
1846
#define FLUSHING_CACHED_CHARGE	0
1847 1848
};
static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
1849
static DEFINE_MUTEX(percpu_charge_mutex);
1850

1851 1852 1853 1854 1855 1856 1857 1858 1859 1860
/**
 * consume_stock: Try to consume stocked charge on this cpu.
 * @memcg: memcg to consume from.
 * @nr_pages: how many pages to charge.
 *
 * The charges will only happen if @memcg matches the current cpu's memcg
 * stock, and at least @nr_pages are available in that stock.  Failure to
 * service an allocation will refill the stock.
 *
 * returns true if successful, false otherwise.
1861
 */
1862
static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
1863 1864
{
	struct memcg_stock_pcp *stock;
1865
	bool ret = false;
1866

1867
	if (nr_pages > CHARGE_BATCH)
1868
		return ret;
1869

1870
	stock = &get_cpu_var(memcg_stock);
1871
	if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
1872
		stock->nr_pages -= nr_pages;
1873 1874
		ret = true;
	}
1875 1876 1877 1878 1879
	put_cpu_var(memcg_stock);
	return ret;
}

/*
1880
 * Returns stocks cached in percpu and reset cached information.
1881 1882 1883 1884 1885
 */
static void drain_stock(struct memcg_stock_pcp *stock)
{
	struct mem_cgroup *old = stock->cached;

1886
	if (stock->nr_pages) {
1887
		page_counter_uncharge(&old->memory, stock->nr_pages);
1888
		if (do_swap_account)
1889
			page_counter_uncharge(&old->memsw, stock->nr_pages);
1890
		css_put_many(&old->css, stock->nr_pages);
1891
		stock->nr_pages = 0;
1892 1893 1894 1895 1896 1897 1898 1899 1900 1901
	}
	stock->cached = NULL;
}

/*
 * This must be called under preempt disabled or must be called by
 * a thread which is pinned to local cpu.
 */
static void drain_local_stock(struct work_struct *dummy)
{
1902
	struct memcg_stock_pcp *stock = this_cpu_ptr(&memcg_stock);
1903
	drain_stock(stock);
1904
	clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
1905 1906 1907
}

/*
1908
 * Cache charges(val) to local per_cpu area.
1909
 * This will be consumed by consume_stock() function, later.
1910
 */
1911
static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
1912 1913 1914
{
	struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock);

1915
	if (stock->cached != memcg) { /* reset if necessary */
1916
		drain_stock(stock);
1917
		stock->cached = memcg;
1918
	}
1919
	stock->nr_pages += nr_pages;
1920 1921 1922 1923
	put_cpu_var(memcg_stock);
}

/*
1924
 * Drains all per-CPU charge caches for given root_memcg resp. subtree
1925
 * of the hierarchy under it.
1926
 */
1927
static void drain_all_stock(struct mem_cgroup *root_memcg)
1928
{
1929
	int cpu, curcpu;
1930

1931 1932 1933
	/* If someone's already draining, avoid adding running more workers. */
	if (!mutex_trylock(&percpu_charge_mutex))
		return;
1934 1935
	/* Notify other cpus that system-wide "drain" is running */
	get_online_cpus();
1936
	curcpu = get_cpu();
1937 1938
	for_each_online_cpu(cpu) {
		struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
1939
		struct mem_cgroup *memcg;
1940

1941 1942
		memcg = stock->cached;
		if (!memcg || !stock->nr_pages)
1943
			continue;
1944
		if (!mem_cgroup_is_descendant(memcg, root_memcg))
1945
			continue;
1946 1947 1948 1949 1950 1951
		if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
			if (cpu == curcpu)
				drain_local_stock(&stock->work);
			else
				schedule_work_on(cpu, &stock->work);
		}
1952
	}
1953
	put_cpu();
A
Andrew Morton 已提交
1954
	put_online_cpus();
1955
	mutex_unlock(&percpu_charge_mutex);
1956 1957
}

1958
static int memcg_cpu_hotplug_callback(struct notifier_block *nb,
1959 1960 1961 1962 1963 1964
					unsigned long action,
					void *hcpu)
{
	int cpu = (unsigned long)hcpu;
	struct memcg_stock_pcp *stock;

1965
	if (action == CPU_ONLINE)
1966 1967
		return NOTIFY_OK;

1968
	if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
1969
		return NOTIFY_OK;
1970

1971 1972 1973 1974 1975
	stock = &per_cpu(memcg_stock, cpu);
	drain_stock(stock);
	return NOTIFY_OK;
}

1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000
/*
 * Scheduled by try_charge() to be executed from the userland return path
 * and reclaims memory over the high limit.
 */
void mem_cgroup_handle_over_high(void)
{
	unsigned int nr_pages = current->memcg_nr_pages_over_high;
	struct mem_cgroup *memcg, *pos;

	if (likely(!nr_pages))
		return;

	pos = memcg = get_mem_cgroup_from_mm(current->mm);

	do {
		if (page_counter_read(&pos->memory) <= pos->high)
			continue;
		mem_cgroup_events(pos, MEMCG_HIGH, 1);
		try_to_free_mem_cgroup_pages(pos, nr_pages, GFP_KERNEL, true);
	} while ((pos = parent_mem_cgroup(pos)));

	css_put(&memcg->css);
	current->memcg_nr_pages_over_high = 0;
}

2001 2002
static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
		      unsigned int nr_pages)
2003
{
2004
	unsigned int batch = max(CHARGE_BATCH, nr_pages);
2005
	int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
2006
	struct mem_cgroup *mem_over_limit;
2007
	struct page_counter *counter;
2008
	unsigned long nr_reclaimed;
2009 2010
	bool may_swap = true;
	bool drained = false;
2011
	int ret = 0;
2012

2013 2014
	if (mem_cgroup_is_root(memcg))
		goto done;
2015
retry:
2016 2017
	if (consume_stock(memcg, nr_pages))
		goto done;
2018

2019
	if (!do_swap_account ||
2020 2021
	    !page_counter_try_charge(&memcg->memsw, batch, &counter)) {
		if (!page_counter_try_charge(&memcg->memory, batch, &counter))
2022
			goto done_restock;
2023
		if (do_swap_account)
2024 2025
			page_counter_uncharge(&memcg->memsw, batch);
		mem_over_limit = mem_cgroup_from_counter(counter, memory);
2026
	} else {
2027
		mem_over_limit = mem_cgroup_from_counter(counter, memsw);
2028
		may_swap = false;
2029
	}
2030

2031 2032 2033 2034
	if (batch > nr_pages) {
		batch = nr_pages;
		goto retry;
	}
2035

2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049
	/*
	 * Unlike in global OOM situations, memcg is not in a physical
	 * memory shortage.  Allow dying and OOM-killed tasks to
	 * bypass the last charges so that they can exit quickly and
	 * free their memory.
	 */
	if (unlikely(test_thread_flag(TIF_MEMDIE) ||
		     fatal_signal_pending(current) ||
		     current->flags & PF_EXITING))
		goto bypass;

	if (unlikely(task_in_memcg_oom(current)))
		goto nomem;

2050 2051
	if (!(gfp_mask & __GFP_WAIT))
		goto nomem;
2052

2053 2054
	mem_cgroup_events(mem_over_limit, MEMCG_MAX, 1);

2055 2056
	nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
						    gfp_mask, may_swap);
2057

2058
	if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
2059
		goto retry;
2060

2061
	if (!drained) {
2062
		drain_all_stock(mem_over_limit);
2063 2064 2065 2066
		drained = true;
		goto retry;
	}

2067 2068
	if (gfp_mask & __GFP_NORETRY)
		goto nomem;
2069 2070 2071 2072 2073 2074 2075 2076 2077
	/*
	 * Even though the limit is exceeded at this point, reclaim
	 * may have been able to free some pages.  Retry the charge
	 * before killing the task.
	 *
	 * Only for regular pages, though: huge pages are rather
	 * unlikely to succeed so close to the limit, and we fall back
	 * to regular pages anyway in case of failure.
	 */
2078
	if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
2079 2080 2081 2082 2083 2084 2085 2086
		goto retry;
	/*
	 * At task move, charge accounts can be doubly counted. So, it's
	 * better to wait until the end of task_move if something is going on.
	 */
	if (mem_cgroup_wait_acct_move(mem_over_limit))
		goto retry;

2087 2088 2089
	if (nr_retries--)
		goto retry;

2090 2091 2092
	if (gfp_mask & __GFP_NOFAIL)
		goto bypass;

2093 2094 2095
	if (fatal_signal_pending(current))
		goto bypass;

2096 2097
	mem_cgroup_events(mem_over_limit, MEMCG_OOM, 1);

2098
	mem_cgroup_oom(mem_over_limit, gfp_mask, get_order(nr_pages));
2099
nomem:
2100
	if (!(gfp_mask & __GFP_NOFAIL))
2101
		return -ENOMEM;
K
KAMEZAWA Hiroyuki 已提交
2102
bypass:
2103
	return -EINTR;
2104 2105

done_restock:
2106
	css_get_many(&memcg->css, batch);
2107 2108
	if (batch > nr_pages)
		refill_stock(memcg, batch - nr_pages);
2109

2110
	/*
2111 2112 2113 2114 2115 2116 2117
	 * If the hierarchy is above the normal consumption range, schedule
	 * reclaim on returning to userland.  We can perform reclaim here
	 * if __GFP_WAIT but let's always punt for simplicity and so that
	 * GFP_KERNEL can consistently be used during reclaim.  @memcg is
	 * not recorded as it most likely matches current's and won't
	 * change in the meantime.  As high limit is checked again before
	 * reclaim, the cost of mismatch is negligible.
2118 2119
	 */
	do {
2120 2121 2122 2123 2124
		if (page_counter_read(&memcg->memory) > memcg->high) {
			current->memcg_nr_pages_over_high += nr_pages;
			set_notify_resume(current);
			break;
		}
2125
	} while ((memcg = parent_mem_cgroup(memcg)));
2126
done:
2127
	return ret;
2128
}
2129

2130
static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
2131
{
2132 2133 2134
	if (mem_cgroup_is_root(memcg))
		return;

2135
	page_counter_uncharge(&memcg->memory, nr_pages);
2136
	if (do_swap_account)
2137
		page_counter_uncharge(&memcg->memsw, nr_pages);
2138

2139
	css_put_many(&memcg->css, nr_pages);
2140 2141
}

2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172
static void lock_page_lru(struct page *page, int *isolated)
{
	struct zone *zone = page_zone(page);

	spin_lock_irq(&zone->lru_lock);
	if (PageLRU(page)) {
		struct lruvec *lruvec;

		lruvec = mem_cgroup_page_lruvec(page, zone);
		ClearPageLRU(page);
		del_page_from_lru_list(page, lruvec, page_lru(page));
		*isolated = 1;
	} else
		*isolated = 0;
}

static void unlock_page_lru(struct page *page, int isolated)
{
	struct zone *zone = page_zone(page);

	if (isolated) {
		struct lruvec *lruvec;

		lruvec = mem_cgroup_page_lruvec(page, zone);
		VM_BUG_ON_PAGE(PageLRU(page), page);
		SetPageLRU(page);
		add_page_to_lru_list(page, lruvec, page_lru(page));
	}
	spin_unlock_irq(&zone->lru_lock);
}

2173
static void commit_charge(struct page *page, struct mem_cgroup *memcg,
2174
			  bool lrucare)
2175
{
2176
	int isolated;
2177

2178
	VM_BUG_ON_PAGE(page->mem_cgroup, page);
2179 2180 2181 2182 2183

	/*
	 * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page
	 * may already be on some other mem_cgroup's LRU.  Take care of it.
	 */
2184 2185
	if (lrucare)
		lock_page_lru(page, &isolated);
2186

2187 2188
	/*
	 * Nobody should be changing or seriously looking at
2189
	 * page->mem_cgroup at this point:
2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200
	 *
	 * - the page is uncharged
	 *
	 * - the page is off-LRU
	 *
	 * - an anonymous fault has exclusive page access, except for
	 *   a locked page table
	 *
	 * - a page cache insertion, a swapin fault, or a migration
	 *   have the page locked
	 */
2201
	page->mem_cgroup = memcg;
2202

2203 2204
	if (lrucare)
		unlock_page_lru(page, isolated);
2205
}
2206

2207
#ifdef CONFIG_MEMCG_KMEM
2208 2209
int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp,
		      unsigned long nr_pages)
2210
{
2211
	struct page_counter *counter;
2212 2213
	int ret = 0;

2214 2215
	ret = page_counter_try_charge(&memcg->kmem, nr_pages, &counter);
	if (ret < 0)
2216 2217
		return ret;

2218
	ret = try_charge(memcg, gfp, nr_pages);
2219 2220
	if (ret == -EINTR)  {
		/*
2221 2222 2223 2224 2225 2226
		 * try_charge() chose to bypass to root due to OOM kill or
		 * fatal signal.  Since our only options are to either fail
		 * the allocation or charge it to this cgroup, do it as a
		 * temporary condition. But we can't fail. From a kmem/slab
		 * perspective, the cache has already been selected, by
		 * mem_cgroup_kmem_get_cache(), so it is too late to change
2227 2228 2229
		 * our minds.
		 *
		 * This condition will only trigger if the task entered
2230 2231 2232
		 * memcg_charge_kmem in a sane state, but was OOM-killed
		 * during try_charge() above. Tasks that were already dying
		 * when the allocation triggers should have been already
2233 2234
		 * directed to the root cgroup in memcontrol.h
		 */
2235
		page_counter_charge(&memcg->memory, nr_pages);
2236
		if (do_swap_account)
2237
			page_counter_charge(&memcg->memsw, nr_pages);
2238
		css_get_many(&memcg->css, nr_pages);
2239 2240
		ret = 0;
	} else if (ret)
2241
		page_counter_uncharge(&memcg->kmem, nr_pages);
2242 2243 2244 2245

	return ret;
}

2246
void memcg_uncharge_kmem(struct mem_cgroup *memcg, unsigned long nr_pages)
2247
{
2248
	page_counter_uncharge(&memcg->memory, nr_pages);
2249
	if (do_swap_account)
2250
		page_counter_uncharge(&memcg->memsw, nr_pages);
2251

2252
	page_counter_uncharge(&memcg->kmem, nr_pages);
2253

2254
	css_put_many(&memcg->css, nr_pages);
2255 2256
}

2257
static int memcg_alloc_cache_id(void)
2258
{
2259 2260 2261
	int id, size;
	int err;

2262
	id = ida_simple_get(&memcg_cache_ida,
2263 2264 2265
			    0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL);
	if (id < 0)
		return id;
2266

2267
	if (id < memcg_nr_cache_ids)
2268 2269 2270 2271 2272 2273
		return id;

	/*
	 * There's no space for the new id in memcg_caches arrays,
	 * so we have to grow them.
	 */
2274
	down_write(&memcg_cache_ids_sem);
2275 2276

	size = 2 * (id + 1);
2277 2278 2279 2280 2281
	if (size < MEMCG_CACHES_MIN_SIZE)
		size = MEMCG_CACHES_MIN_SIZE;
	else if (size > MEMCG_CACHES_MAX_SIZE)
		size = MEMCG_CACHES_MAX_SIZE;

2282
	err = memcg_update_all_caches(size);
2283 2284
	if (!err)
		err = memcg_update_all_list_lrus(size);
2285 2286 2287 2288 2289
	if (!err)
		memcg_nr_cache_ids = size;

	up_write(&memcg_cache_ids_sem);

2290
	if (err) {
2291
		ida_simple_remove(&memcg_cache_ida, id);
2292 2293 2294 2295 2296 2297 2298
		return err;
	}
	return id;
}

static void memcg_free_cache_id(int id)
{
2299
	ida_simple_remove(&memcg_cache_ida, id);
2300 2301
}

2302
struct memcg_kmem_cache_create_work {
2303 2304 2305 2306 2307
	struct mem_cgroup *memcg;
	struct kmem_cache *cachep;
	struct work_struct work;
};

2308
static void memcg_kmem_cache_create_func(struct work_struct *w)
2309
{
2310 2311
	struct memcg_kmem_cache_create_work *cw =
		container_of(w, struct memcg_kmem_cache_create_work, work);
2312 2313
	struct mem_cgroup *memcg = cw->memcg;
	struct kmem_cache *cachep = cw->cachep;
2314

2315
	memcg_create_kmem_cache(memcg, cachep);
2316

2317
	css_put(&memcg->css);
2318 2319 2320 2321 2322 2323
	kfree(cw);
}

/*
 * Enqueue the creation of a per-memcg kmem_cache.
 */
2324 2325
static void __memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
					       struct kmem_cache *cachep)
2326
{
2327
	struct memcg_kmem_cache_create_work *cw;
2328

2329
	cw = kmalloc(sizeof(*cw), GFP_NOWAIT);
2330
	if (!cw)
2331
		return;
2332 2333

	css_get(&memcg->css);
2334 2335 2336

	cw->memcg = memcg;
	cw->cachep = cachep;
2337
	INIT_WORK(&cw->work, memcg_kmem_cache_create_func);
2338 2339 2340 2341

	schedule_work(&cw->work);
}

2342 2343
static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
					     struct kmem_cache *cachep)
2344 2345 2346 2347
{
	/*
	 * We need to stop accounting when we kmalloc, because if the
	 * corresponding kmalloc cache is not yet created, the first allocation
2348
	 * in __memcg_schedule_kmem_cache_create will recurse.
2349 2350 2351 2352 2353 2354 2355
	 *
	 * However, it is better to enclose the whole function. Depending on
	 * the debugging options enabled, INIT_WORK(), for instance, can
	 * trigger an allocation. This too, will make us recurse. Because at
	 * this point we can't allow ourselves back into memcg_kmem_get_cache,
	 * the safest choice is to do it like this, wrapping the whole function.
	 */
2356
	current->memcg_kmem_skip_account = 1;
2357
	__memcg_schedule_kmem_cache_create(memcg, cachep);
2358
	current->memcg_kmem_skip_account = 0;
2359
}
2360

2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373
/*
 * Return the kmem_cache we're supposed to use for a slab allocation.
 * We try to use the current memcg's version of the cache.
 *
 * If the cache does not exist yet, if we are the first user of it,
 * we either create it immediately, if possible, or create it asynchronously
 * in a workqueue.
 * In the latter case, we will let the current allocation go through with
 * the original cache.
 *
 * Can't be called in interrupt context or from kernel threads.
 * This function needs to be called with rcu_read_lock() held.
 */
2374
struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep)
2375 2376
{
	struct mem_cgroup *memcg;
2377
	struct kmem_cache *memcg_cachep;
2378
	int kmemcg_id;
2379

2380
	VM_BUG_ON(!is_root_cache(cachep));
2381

2382
	if (current->memcg_kmem_skip_account)
2383 2384
		return cachep;

2385
	memcg = get_mem_cgroup_from_mm(current->mm);
2386
	kmemcg_id = READ_ONCE(memcg->kmemcg_id);
2387
	if (kmemcg_id < 0)
2388
		goto out;
2389

2390
	memcg_cachep = cache_from_memcg_idx(cachep, kmemcg_id);
2391 2392
	if (likely(memcg_cachep))
		return memcg_cachep;
2393 2394 2395 2396 2397 2398 2399 2400 2401

	/*
	 * If we are in a safe context (can wait, and not in interrupt
	 * context), we could be be predictable and return right away.
	 * This would guarantee that the allocation being performed
	 * already belongs in the new cache.
	 *
	 * However, there are some clashes that can arrive from locking.
	 * For instance, because we acquire the slab_mutex while doing
2402 2403 2404
	 * memcg_create_kmem_cache, this means no further allocation
	 * could happen with the slab_mutex held. So it's better to
	 * defer everything.
2405
	 */
2406
	memcg_schedule_kmem_cache_create(memcg, cachep);
2407
out:
2408
	css_put(&memcg->css);
2409
	return cachep;
2410 2411
}

2412 2413 2414
void __memcg_kmem_put_cache(struct kmem_cache *cachep)
{
	if (!is_root_cache(cachep))
2415
		css_put(&cachep->memcg_params.memcg->css);
2416 2417
}

2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438
/*
 * We need to verify if the allocation against current->mm->owner's memcg is
 * possible for the given order. But the page is not allocated yet, so we'll
 * need a further commit step to do the final arrangements.
 *
 * It is possible for the task to switch cgroups in this mean time, so at
 * commit time, we can't rely on task conversion any longer.  We'll then use
 * the handle argument to return to the caller which cgroup we should commit
 * against. We could also return the memcg directly and avoid the pointer
 * passing, but a boolean return value gives better semantics considering
 * the compiled-out case as well.
 *
 * Returning true means the allocation is possible.
 */
bool
__memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **_memcg, int order)
{
	struct mem_cgroup *memcg;
	int ret;

	*_memcg = NULL;
2439

2440
	memcg = get_mem_cgroup_from_mm(current->mm);
2441

2442
	if (!memcg_kmem_is_active(memcg)) {
2443 2444 2445 2446
		css_put(&memcg->css);
		return true;
	}

2447
	ret = memcg_charge_kmem(memcg, gfp, 1 << order);
2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461
	if (!ret)
		*_memcg = memcg;

	css_put(&memcg->css);
	return (ret == 0);
}

void __memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg,
			      int order)
{
	VM_BUG_ON(mem_cgroup_is_root(memcg));

	/* The page allocation failed. Revert */
	if (!page) {
2462
		memcg_uncharge_kmem(memcg, 1 << order);
2463 2464
		return;
	}
2465
	page->mem_cgroup = memcg;
2466 2467 2468 2469
}

void __memcg_kmem_uncharge_pages(struct page *page, int order)
{
2470
	struct mem_cgroup *memcg = page->mem_cgroup;
2471 2472 2473 2474

	if (!memcg)
		return;

2475
	VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page);
2476

2477
	memcg_uncharge_kmem(memcg, 1 << order);
2478
	page->mem_cgroup = NULL;
2479
}
2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490

struct mem_cgroup *__mem_cgroup_from_kmem(void *ptr)
{
	struct mem_cgroup *memcg = NULL;
	struct kmem_cache *cachep;
	struct page *page;

	page = virt_to_head_page(ptr);
	if (PageSlab(page)) {
		cachep = page->slab_cache;
		if (!is_root_cache(cachep))
2491
			memcg = cachep->memcg_params.memcg;
2492 2493 2494 2495 2496 2497
	} else
		/* page allocated by alloc_kmem_pages */
		memcg = page->mem_cgroup;

	return memcg;
}
2498 2499
#endif /* CONFIG_MEMCG_KMEM */

2500 2501 2502 2503
#ifdef CONFIG_TRANSPARENT_HUGEPAGE

/*
 * Because tail pages are not marked as "used", set it. We're under
2504 2505 2506
 * zone->lru_lock, 'splitting on pmd' and compound_lock.
 * charge/uncharge will be never happen and move_account() is done under
 * compound_lock(), so we don't have to take care of races.
2507
 */
2508
void mem_cgroup_split_huge_fixup(struct page *head)
2509
{
2510
	int i;
2511

2512 2513
	if (mem_cgroup_disabled())
		return;
2514

2515
	for (i = 1; i < HPAGE_PMD_NR; i++)
2516
		head[i].mem_cgroup = head->mem_cgroup;
2517

2518
	__this_cpu_sub(head->mem_cgroup->stat->count[MEM_CGROUP_STAT_RSS_HUGE],
2519
		       HPAGE_PMD_NR);
2520
}
2521
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
2522

A
Andrew Morton 已提交
2523
#ifdef CONFIG_MEMCG_SWAP
2524 2525
static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg,
					 bool charge)
K
KAMEZAWA Hiroyuki 已提交
2526
{
2527 2528
	int val = (charge) ? 1 : -1;
	this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_SWAP], val);
K
KAMEZAWA Hiroyuki 已提交
2529
}
2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541

/**
 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
 * @entry: swap entry to be moved
 * @from:  mem_cgroup which the entry is moved from
 * @to:  mem_cgroup which the entry is moved to
 *
 * It succeeds only when the swap_cgroup's record for this entry is the same
 * as the mem_cgroup's id of @from.
 *
 * Returns 0 on success, -EINVAL on failure.
 *
2542
 * The caller must have charged to @to, IOW, called page_counter_charge() about
2543 2544 2545
 * both res and memsw, and called css_get().
 */
static int mem_cgroup_move_swap_account(swp_entry_t entry,
2546
				struct mem_cgroup *from, struct mem_cgroup *to)
2547 2548 2549
{
	unsigned short old_id, new_id;

L
Li Zefan 已提交
2550 2551
	old_id = mem_cgroup_id(from);
	new_id = mem_cgroup_id(to);
2552 2553 2554

	if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
		mem_cgroup_swap_statistics(from, false);
2555
		mem_cgroup_swap_statistics(to, true);
2556 2557 2558 2559 2560 2561
		return 0;
	}
	return -EINVAL;
}
#else
static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
2562
				struct mem_cgroup *from, struct mem_cgroup *to)
2563 2564 2565
{
	return -EINVAL;
}
2566
#endif
K
KAMEZAWA Hiroyuki 已提交
2567

2568
static DEFINE_MUTEX(memcg_limit_mutex);
2569

2570
static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
2571
				   unsigned long limit)
2572
{
2573 2574 2575
	unsigned long curusage;
	unsigned long oldusage;
	bool enlarge = false;
2576
	int retry_count;
2577
	int ret;
2578 2579 2580 2581 2582 2583

	/*
	 * For keeping hierarchical_reclaim simple, how long we should retry
	 * is depends on callers. We set our retry-count to be function
	 * of # of children which we should visit in this loop.
	 */
2584 2585
	retry_count = MEM_CGROUP_RECLAIM_RETRIES *
		      mem_cgroup_count_children(memcg);
2586

2587
	oldusage = page_counter_read(&memcg->memory);
2588

2589
	do {
2590 2591 2592 2593
		if (signal_pending(current)) {
			ret = -EINTR;
			break;
		}
2594 2595 2596 2597

		mutex_lock(&memcg_limit_mutex);
		if (limit > memcg->memsw.limit) {
			mutex_unlock(&memcg_limit_mutex);
2598
			ret = -EINVAL;
2599 2600
			break;
		}
2601 2602 2603 2604
		if (limit > memcg->memory.limit)
			enlarge = true;
		ret = page_counter_limit(&memcg->memory, limit);
		mutex_unlock(&memcg_limit_mutex);
2605 2606 2607 2608

		if (!ret)
			break;

2609 2610
		try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, true);

2611
		curusage = page_counter_read(&memcg->memory);
2612
		/* Usage is reduced ? */
A
Andrew Morton 已提交
2613
		if (curusage >= oldusage)
2614 2615 2616
			retry_count--;
		else
			oldusage = curusage;
2617 2618
	} while (retry_count);

2619 2620
	if (!ret && enlarge)
		memcg_oom_recover(memcg);
2621

2622 2623 2624
	return ret;
}

L
Li Zefan 已提交
2625
static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
2626
					 unsigned long limit)
2627
{
2628 2629 2630
	unsigned long curusage;
	unsigned long oldusage;
	bool enlarge = false;
2631
	int retry_count;
2632
	int ret;
2633

2634
	/* see mem_cgroup_resize_res_limit */
2635 2636 2637 2638 2639 2640
	retry_count = MEM_CGROUP_RECLAIM_RETRIES *
		      mem_cgroup_count_children(memcg);

	oldusage = page_counter_read(&memcg->memsw);

	do {
2641 2642 2643 2644
		if (signal_pending(current)) {
			ret = -EINTR;
			break;
		}
2645 2646 2647 2648

		mutex_lock(&memcg_limit_mutex);
		if (limit < memcg->memory.limit) {
			mutex_unlock(&memcg_limit_mutex);
2649 2650 2651
			ret = -EINVAL;
			break;
		}
2652 2653 2654 2655
		if (limit > memcg->memsw.limit)
			enlarge = true;
		ret = page_counter_limit(&memcg->memsw, limit);
		mutex_unlock(&memcg_limit_mutex);
2656 2657 2658 2659

		if (!ret)
			break;

2660 2661
		try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, false);

2662
		curusage = page_counter_read(&memcg->memsw);
2663
		/* Usage is reduced ? */
2664
		if (curusage >= oldusage)
2665
			retry_count--;
2666 2667
		else
			oldusage = curusage;
2668 2669
	} while (retry_count);

2670 2671
	if (!ret && enlarge)
		memcg_oom_recover(memcg);
2672

2673 2674 2675
	return ret;
}

2676 2677 2678 2679 2680 2681 2682 2683 2684
unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
					    gfp_t gfp_mask,
					    unsigned long *total_scanned)
{
	unsigned long nr_reclaimed = 0;
	struct mem_cgroup_per_zone *mz, *next_mz = NULL;
	unsigned long reclaimed;
	int loop = 0;
	struct mem_cgroup_tree_per_zone *mctz;
2685
	unsigned long excess;
2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709
	unsigned long nr_scanned;

	if (order > 0)
		return 0;

	mctz = soft_limit_tree_node_zone(zone_to_nid(zone), zone_idx(zone));
	/*
	 * This loop can run a while, specially if mem_cgroup's continuously
	 * keep exceeding their soft limit and putting the system under
	 * pressure
	 */
	do {
		if (next_mz)
			mz = next_mz;
		else
			mz = mem_cgroup_largest_soft_limit_node(mctz);
		if (!mz)
			break;

		nr_scanned = 0;
		reclaimed = mem_cgroup_soft_reclaim(mz->memcg, zone,
						    gfp_mask, &nr_scanned);
		nr_reclaimed += reclaimed;
		*total_scanned += nr_scanned;
2710
		spin_lock_irq(&mctz->lock);
2711
		__mem_cgroup_remove_exceeded(mz, mctz);
2712 2713 2714 2715 2716 2717

		/*
		 * If we failed to reclaim anything from this memory cgroup
		 * it is time to move on to the next cgroup
		 */
		next_mz = NULL;
2718 2719 2720
		if (!reclaimed)
			next_mz = __mem_cgroup_largest_soft_limit_node(mctz);

2721
		excess = soft_limit_excess(mz->memcg);
2722 2723 2724 2725 2726 2727 2728 2729 2730
		/*
		 * One school of thought says that we should not add
		 * back the node to the tree if reclaim returns 0.
		 * But our reclaim could return 0, simply because due
		 * to priority we are exposing a smaller subset of
		 * memory to reclaim from. Consider this as a longer
		 * term TODO.
		 */
		/* If excess == 0, no tree ops */
2731
		__mem_cgroup_insert_exceeded(mz, mctz, excess);
2732
		spin_unlock_irq(&mctz->lock);
2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749
		css_put(&mz->memcg->css);
		loop++;
		/*
		 * Could not reclaim anything and there are no more
		 * mem cgroups to try or we seem to be looping without
		 * reclaiming anything.
		 */
		if (!nr_reclaimed &&
			(next_mz == NULL ||
			loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
			break;
	} while (!nr_reclaimed);
	if (next_mz)
		css_put(&next_mz->memcg->css);
	return nr_reclaimed;
}

2750 2751 2752 2753 2754 2755
/*
 * Test whether @memcg has children, dead or alive.  Note that this
 * function doesn't care whether @memcg has use_hierarchy enabled and
 * returns %true if there are child csses according to the cgroup
 * hierarchy.  Testing use_hierarchy is the caller's responsiblity.
 */
2756 2757
static inline bool memcg_has_children(struct mem_cgroup *memcg)
{
2758 2759
	bool ret;

2760
	/*
2761 2762 2763 2764
	 * The lock does not prevent addition or deletion of children, but
	 * it prevents a new child from being initialized based on this
	 * parent in css_online(), so it's enough to decide whether
	 * hierarchically inherited attributes can still be changed or not.
2765
	 */
2766 2767 2768 2769 2770 2771
	lockdep_assert_held(&memcg_create_mutex);

	rcu_read_lock();
	ret = css_next_child(NULL, &memcg->css);
	rcu_read_unlock();
	return ret;
2772 2773
}

2774 2775 2776 2777 2778 2779 2780 2781 2782 2783
/*
 * Reclaims as many pages from the given memcg as possible and moves
 * the rest to the parent.
 *
 * Caller is responsible for holding css reference for memcg.
 */
static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
{
	int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;

2784 2785
	/* we call try-to-free pages for make this cgroup empty */
	lru_add_drain_all();
2786
	/* try to free all pages in this cgroup */
2787
	while (nr_retries && page_counter_read(&memcg->memory)) {
2788
		int progress;
2789

2790 2791 2792
		if (signal_pending(current))
			return -EINTR;

2793 2794
		progress = try_to_free_mem_cgroup_pages(memcg, 1,
							GFP_KERNEL, true);
2795
		if (!progress) {
2796
			nr_retries--;
2797
			/* maybe some writeback is necessary */
2798
			congestion_wait(BLK_RW_ASYNC, HZ/10);
2799
		}
2800 2801

	}
2802 2803

	return 0;
2804 2805
}

2806 2807 2808
static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of,
					    char *buf, size_t nbytes,
					    loff_t off)
2809
{
2810
	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
2811

2812 2813
	if (mem_cgroup_is_root(memcg))
		return -EINVAL;
2814
	return mem_cgroup_force_empty(memcg) ?: nbytes;
2815 2816
}

2817 2818
static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
				     struct cftype *cft)
2819
{
2820
	return mem_cgroup_from_css(css)->use_hierarchy;
2821 2822
}

2823 2824
static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
				      struct cftype *cft, u64 val)
2825 2826
{
	int retval = 0;
2827
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
T
Tejun Heo 已提交
2828
	struct mem_cgroup *parent_memcg = mem_cgroup_from_css(memcg->css.parent);
2829

2830
	mutex_lock(&memcg_create_mutex);
2831 2832 2833 2834

	if (memcg->use_hierarchy == val)
		goto out;

2835
	/*
2836
	 * If parent's use_hierarchy is set, we can't make any modifications
2837 2838 2839 2840 2841 2842
	 * in the child subtrees. If it is unset, then the change can
	 * occur, provided the current cgroup has no children.
	 *
	 * For the root cgroup, parent_mem is NULL, we allow value to be
	 * set if there are no children.
	 */
2843
	if ((!parent_memcg || !parent_memcg->use_hierarchy) &&
2844
				(val == 1 || val == 0)) {
2845
		if (!memcg_has_children(memcg))
2846
			memcg->use_hierarchy = val;
2847 2848 2849 2850
		else
			retval = -EBUSY;
	} else
		retval = -EINVAL;
2851 2852

out:
2853
	mutex_unlock(&memcg_create_mutex);
2854 2855 2856 2857

	return retval;
}

2858 2859
static unsigned long tree_stat(struct mem_cgroup *memcg,
			       enum mem_cgroup_stat_index idx)
2860 2861
{
	struct mem_cgroup *iter;
2862
	unsigned long val = 0;
2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873

	for_each_mem_cgroup_tree(iter, memcg)
		val += mem_cgroup_read_stat(iter, idx);

	return val;
}

static inline u64 mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
{
	u64 val;

2874 2875 2876 2877 2878 2879
	if (mem_cgroup_is_root(memcg)) {
		val = tree_stat(memcg, MEM_CGROUP_STAT_CACHE);
		val += tree_stat(memcg, MEM_CGROUP_STAT_RSS);
		if (swap)
			val += tree_stat(memcg, MEM_CGROUP_STAT_SWAP);
	} else {
2880
		if (!swap)
2881
			val = page_counter_read(&memcg->memory);
2882
		else
2883
			val = page_counter_read(&memcg->memsw);
2884 2885 2886 2887
	}
	return val << PAGE_SHIFT;
}

2888 2889 2890 2891 2892 2893 2894
enum {
	RES_USAGE,
	RES_LIMIT,
	RES_MAX_USAGE,
	RES_FAILCNT,
	RES_SOFT_LIMIT,
};
2895

2896
static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
2897
			       struct cftype *cft)
B
Balbir Singh 已提交
2898
{
2899
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
2900
	struct page_counter *counter;
2901

2902
	switch (MEMFILE_TYPE(cft->private)) {
2903
	case _MEM:
2904 2905
		counter = &memcg->memory;
		break;
2906
	case _MEMSWAP:
2907 2908
		counter = &memcg->memsw;
		break;
2909
	case _KMEM:
2910
		counter = &memcg->kmem;
2911
		break;
2912 2913 2914
	default:
		BUG();
	}
2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933

	switch (MEMFILE_ATTR(cft->private)) {
	case RES_USAGE:
		if (counter == &memcg->memory)
			return mem_cgroup_usage(memcg, false);
		if (counter == &memcg->memsw)
			return mem_cgroup_usage(memcg, true);
		return (u64)page_counter_read(counter) * PAGE_SIZE;
	case RES_LIMIT:
		return (u64)counter->limit * PAGE_SIZE;
	case RES_MAX_USAGE:
		return (u64)counter->watermark * PAGE_SIZE;
	case RES_FAILCNT:
		return counter->failcnt;
	case RES_SOFT_LIMIT:
		return (u64)memcg->soft_limit * PAGE_SIZE;
	default:
		BUG();
	}
B
Balbir Singh 已提交
2934
}
2935 2936

#ifdef CONFIG_MEMCG_KMEM
2937 2938
static int memcg_activate_kmem(struct mem_cgroup *memcg,
			       unsigned long nr_pages)
2939 2940 2941 2942
{
	int err = 0;
	int memcg_id;

2943
	BUG_ON(memcg->kmemcg_id >= 0);
2944
	BUG_ON(memcg->kmem_acct_activated);
2945
	BUG_ON(memcg->kmem_acct_active);
2946

2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958
	/*
	 * For simplicity, we won't allow this to be disabled.  It also can't
	 * be changed if the cgroup has children already, or if tasks had
	 * already joined.
	 *
	 * If tasks join before we set the limit, a person looking at
	 * kmem.usage_in_bytes will have no way to determine when it took
	 * place, which makes the value quite meaningless.
	 *
	 * After it first became limited, changes in the value of the limit are
	 * of course permitted.
	 */
2959
	mutex_lock(&memcg_create_mutex);
2960 2961
	if (cgroup_has_tasks(memcg->css.cgroup) ||
	    (memcg->use_hierarchy && memcg_has_children(memcg)))
2962 2963 2964 2965
		err = -EBUSY;
	mutex_unlock(&memcg_create_mutex);
	if (err)
		goto out;
2966

2967
	memcg_id = memcg_alloc_cache_id();
2968 2969 2970 2971 2972 2973
	if (memcg_id < 0) {
		err = memcg_id;
		goto out;
	}

	/*
V
Vladimir Davydov 已提交
2974 2975
	 * We couldn't have accounted to this cgroup, because it hasn't got
	 * activated yet, so this should succeed.
2976
	 */
2977
	err = page_counter_limit(&memcg->kmem, nr_pages);
2978 2979 2980 2981
	VM_BUG_ON(err);

	static_key_slow_inc(&memcg_kmem_enabled_key);
	/*
V
Vladimir Davydov 已提交
2982 2983
	 * A memory cgroup is considered kmem-active as soon as it gets
	 * kmemcg_id. Setting the id after enabling static branching will
2984 2985 2986
	 * guarantee no one starts accounting before all call sites are
	 * patched.
	 */
V
Vladimir Davydov 已提交
2987
	memcg->kmemcg_id = memcg_id;
2988
	memcg->kmem_acct_activated = true;
2989
	memcg->kmem_acct_active = true;
2990
out:
2991 2992 2993 2994
	return err;
}

static int memcg_update_kmem_limit(struct mem_cgroup *memcg,
2995
				   unsigned long limit)
2996 2997 2998
{
	int ret;

2999
	mutex_lock(&memcg_limit_mutex);
3000
	if (!memcg_kmem_is_active(memcg))
3001
		ret = memcg_activate_kmem(memcg, limit);
3002
	else
3003 3004
		ret = page_counter_limit(&memcg->kmem, limit);
	mutex_unlock(&memcg_limit_mutex);
3005 3006 3007
	return ret;
}

3008
static int memcg_propagate_kmem(struct mem_cgroup *memcg)
3009
{
3010
	int ret = 0;
3011
	struct mem_cgroup *parent = parent_mem_cgroup(memcg);
3012

3013 3014
	if (!parent)
		return 0;
3015

3016
	mutex_lock(&memcg_limit_mutex);
3017
	/*
3018 3019
	 * If the parent cgroup is not kmem-active now, it cannot be activated
	 * after this point, because it has at least one child already.
3020
	 */
3021
	if (memcg_kmem_is_active(parent))
3022 3023
		ret = memcg_activate_kmem(memcg, PAGE_COUNTER_MAX);
	mutex_unlock(&memcg_limit_mutex);
3024
	return ret;
3025
}
3026 3027
#else
static int memcg_update_kmem_limit(struct mem_cgroup *memcg,
3028
				   unsigned long limit)
3029 3030 3031
{
	return -EINVAL;
}
3032
#endif /* CONFIG_MEMCG_KMEM */
3033

3034 3035 3036 3037
/*
 * The user of this function is...
 * RES_LIMIT.
 */
3038 3039
static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
				char *buf, size_t nbytes, loff_t off)
B
Balbir Singh 已提交
3040
{
3041
	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3042
	unsigned long nr_pages;
3043 3044
	int ret;

3045
	buf = strstrip(buf);
3046
	ret = page_counter_memparse(buf, "-1", &nr_pages);
3047 3048
	if (ret)
		return ret;
3049

3050
	switch (MEMFILE_ATTR(of_cft(of)->private)) {
3051
	case RES_LIMIT:
3052 3053 3054 3055
		if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
			ret = -EINVAL;
			break;
		}
3056 3057 3058
		switch (MEMFILE_TYPE(of_cft(of)->private)) {
		case _MEM:
			ret = mem_cgroup_resize_limit(memcg, nr_pages);
3059
			break;
3060 3061
		case _MEMSWAP:
			ret = mem_cgroup_resize_memsw_limit(memcg, nr_pages);
3062
			break;
3063 3064 3065 3066
		case _KMEM:
			ret = memcg_update_kmem_limit(memcg, nr_pages);
			break;
		}
3067
		break;
3068 3069 3070
	case RES_SOFT_LIMIT:
		memcg->soft_limit = nr_pages;
		ret = 0;
3071 3072
		break;
	}
3073
	return ret ?: nbytes;
B
Balbir Singh 已提交
3074 3075
}

3076 3077
static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf,
				size_t nbytes, loff_t off)
3078
{
3079
	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3080
	struct page_counter *counter;
3081

3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094
	switch (MEMFILE_TYPE(of_cft(of)->private)) {
	case _MEM:
		counter = &memcg->memory;
		break;
	case _MEMSWAP:
		counter = &memcg->memsw;
		break;
	case _KMEM:
		counter = &memcg->kmem;
		break;
	default:
		BUG();
	}
3095

3096
	switch (MEMFILE_ATTR(of_cft(of)->private)) {
3097
	case RES_MAX_USAGE:
3098
		page_counter_reset_watermark(counter);
3099 3100
		break;
	case RES_FAILCNT:
3101
		counter->failcnt = 0;
3102
		break;
3103 3104
	default:
		BUG();
3105
	}
3106

3107
	return nbytes;
3108 3109
}

3110
static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
3111 3112
					struct cftype *cft)
{
3113
	return mem_cgroup_from_css(css)->move_charge_at_immigrate;
3114 3115
}

3116
#ifdef CONFIG_MMU
3117
static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3118 3119
					struct cftype *cft, u64 val)
{
3120
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3121

3122
	if (val & ~MOVE_MASK)
3123
		return -EINVAL;
3124

3125
	/*
3126 3127 3128 3129
	 * No kind of locking is needed in here, because ->can_attach() will
	 * check this value once in the beginning of the process, and then carry
	 * on with stale data. This means that changes to this value will only
	 * affect task migrations starting after the change.
3130
	 */
3131
	memcg->move_charge_at_immigrate = val;
3132 3133
	return 0;
}
3134
#else
3135
static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3136 3137 3138 3139 3140
					struct cftype *cft, u64 val)
{
	return -ENOSYS;
}
#endif
3141

3142
#ifdef CONFIG_NUMA
3143
static int memcg_numa_stat_show(struct seq_file *m, void *v)
3144
{
3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156
	struct numa_stat {
		const char *name;
		unsigned int lru_mask;
	};

	static const struct numa_stat stats[] = {
		{ "total", LRU_ALL },
		{ "file", LRU_ALL_FILE },
		{ "anon", LRU_ALL_ANON },
		{ "unevictable", BIT(LRU_UNEVICTABLE) },
	};
	const struct numa_stat *stat;
3157
	int nid;
3158
	unsigned long nr;
3159
	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
3160

3161 3162 3163 3164 3165 3166 3167 3168 3169
	for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
		nr = mem_cgroup_nr_lru_pages(memcg, stat->lru_mask);
		seq_printf(m, "%s=%lu", stat->name, nr);
		for_each_node_state(nid, N_MEMORY) {
			nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
							  stat->lru_mask);
			seq_printf(m, " N%d=%lu", nid, nr);
		}
		seq_putc(m, '\n');
3170 3171
	}

3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186
	for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
		struct mem_cgroup *iter;

		nr = 0;
		for_each_mem_cgroup_tree(iter, memcg)
			nr += mem_cgroup_nr_lru_pages(iter, stat->lru_mask);
		seq_printf(m, "hierarchical_%s=%lu", stat->name, nr);
		for_each_node_state(nid, N_MEMORY) {
			nr = 0;
			for_each_mem_cgroup_tree(iter, memcg)
				nr += mem_cgroup_node_nr_lru_pages(
					iter, nid, stat->lru_mask);
			seq_printf(m, " N%d=%lu", nid, nr);
		}
		seq_putc(m, '\n');
3187 3188 3189 3190 3191 3192
	}

	return 0;
}
#endif /* CONFIG_NUMA */

3193
static int memcg_stat_show(struct seq_file *m, void *v)
3194
{
3195
	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
3196
	unsigned long memory, memsw;
3197 3198
	struct mem_cgroup *mi;
	unsigned int i;
3199

3200 3201 3202 3203
	BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_stat_names) !=
		     MEM_CGROUP_STAT_NSTATS);
	BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_events_names) !=
		     MEM_CGROUP_EVENTS_NSTATS);
3204 3205
	BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS);

3206
	for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
3207
		if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
3208
			continue;
3209
		seq_printf(m, "%s %lu\n", mem_cgroup_stat_names[i],
3210
			   mem_cgroup_read_stat(memcg, i) * PAGE_SIZE);
3211
	}
L
Lee Schermerhorn 已提交
3212

3213 3214 3215 3216 3217 3218 3219 3220
	for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++)
		seq_printf(m, "%s %lu\n", mem_cgroup_events_names[i],
			   mem_cgroup_read_events(memcg, i));

	for (i = 0; i < NR_LRU_LISTS; i++)
		seq_printf(m, "%s %lu\n", mem_cgroup_lru_names[i],
			   mem_cgroup_nr_lru_pages(memcg, BIT(i)) * PAGE_SIZE);

K
KAMEZAWA Hiroyuki 已提交
3221
	/* Hierarchical information */
3222 3223 3224 3225
	memory = memsw = PAGE_COUNTER_MAX;
	for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) {
		memory = min(memory, mi->memory.limit);
		memsw = min(memsw, mi->memsw.limit);
3226
	}
3227 3228 3229 3230 3231
	seq_printf(m, "hierarchical_memory_limit %llu\n",
		   (u64)memory * PAGE_SIZE);
	if (do_swap_account)
		seq_printf(m, "hierarchical_memsw_limit %llu\n",
			   (u64)memsw * PAGE_SIZE);
K
KOSAKI Motohiro 已提交
3232

3233
	for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
3234
		unsigned long long val = 0;
3235

3236
		if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
3237
			continue;
3238 3239
		for_each_mem_cgroup_tree(mi, memcg)
			val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE;
3240
		seq_printf(m, "total_%s %llu\n", mem_cgroup_stat_names[i], val);
3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257
	}

	for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
		unsigned long long val = 0;

		for_each_mem_cgroup_tree(mi, memcg)
			val += mem_cgroup_read_events(mi, i);
		seq_printf(m, "total_%s %llu\n",
			   mem_cgroup_events_names[i], val);
	}

	for (i = 0; i < NR_LRU_LISTS; i++) {
		unsigned long long val = 0;

		for_each_mem_cgroup_tree(mi, memcg)
			val += mem_cgroup_nr_lru_pages(mi, BIT(i)) * PAGE_SIZE;
		seq_printf(m, "total_%s %llu\n", mem_cgroup_lru_names[i], val);
3258
	}
K
KAMEZAWA Hiroyuki 已提交
3259

K
KOSAKI Motohiro 已提交
3260 3261 3262 3263
#ifdef CONFIG_DEBUG_VM
	{
		int nid, zid;
		struct mem_cgroup_per_zone *mz;
3264
		struct zone_reclaim_stat *rstat;
K
KOSAKI Motohiro 已提交
3265 3266 3267 3268 3269
		unsigned long recent_rotated[2] = {0, 0};
		unsigned long recent_scanned[2] = {0, 0};

		for_each_online_node(nid)
			for (zid = 0; zid < MAX_NR_ZONES; zid++) {
3270
				mz = &memcg->nodeinfo[nid]->zoneinfo[zid];
3271
				rstat = &mz->lruvec.reclaim_stat;
K
KOSAKI Motohiro 已提交
3272

3273 3274 3275 3276
				recent_rotated[0] += rstat->recent_rotated[0];
				recent_rotated[1] += rstat->recent_rotated[1];
				recent_scanned[0] += rstat->recent_scanned[0];
				recent_scanned[1] += rstat->recent_scanned[1];
K
KOSAKI Motohiro 已提交
3277
			}
3278 3279 3280 3281
		seq_printf(m, "recent_rotated_anon %lu\n", recent_rotated[0]);
		seq_printf(m, "recent_rotated_file %lu\n", recent_rotated[1]);
		seq_printf(m, "recent_scanned_anon %lu\n", recent_scanned[0]);
		seq_printf(m, "recent_scanned_file %lu\n", recent_scanned[1]);
K
KOSAKI Motohiro 已提交
3282 3283 3284
	}
#endif

3285 3286 3287
	return 0;
}

3288 3289
static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
				      struct cftype *cft)
K
KOSAKI Motohiro 已提交
3290
{
3291
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
K
KOSAKI Motohiro 已提交
3292

3293
	return mem_cgroup_swappiness(memcg);
K
KOSAKI Motohiro 已提交
3294 3295
}

3296 3297
static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
				       struct cftype *cft, u64 val)
K
KOSAKI Motohiro 已提交
3298
{
3299
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
K
KOSAKI Motohiro 已提交
3300

3301
	if (val > 100)
K
KOSAKI Motohiro 已提交
3302 3303
		return -EINVAL;

3304
	if (css->parent)
3305 3306 3307
		memcg->swappiness = val;
	else
		vm_swappiness = val;
3308

K
KOSAKI Motohiro 已提交
3309 3310 3311
	return 0;
}

3312 3313 3314
static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
{
	struct mem_cgroup_threshold_ary *t;
3315
	unsigned long usage;
3316 3317 3318 3319
	int i;

	rcu_read_lock();
	if (!swap)
3320
		t = rcu_dereference(memcg->thresholds.primary);
3321
	else
3322
		t = rcu_dereference(memcg->memsw_thresholds.primary);
3323 3324 3325 3326

	if (!t)
		goto unlock;

3327
	usage = mem_cgroup_usage(memcg, swap);
3328 3329

	/*
3330
	 * current_threshold points to threshold just below or equal to usage.
3331 3332 3333
	 * If it's not true, a threshold was crossed after last
	 * call of __mem_cgroup_threshold().
	 */
3334
	i = t->current_threshold;
3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357

	/*
	 * Iterate backward over array of thresholds starting from
	 * current_threshold and check if a threshold is crossed.
	 * If none of thresholds below usage is crossed, we read
	 * only one element of the array here.
	 */
	for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
		eventfd_signal(t->entries[i].eventfd, 1);

	/* i = current_threshold + 1 */
	i++;

	/*
	 * Iterate forward over array of thresholds starting from
	 * current_threshold+1 and check if a threshold is crossed.
	 * If none of thresholds above usage is crossed, we read
	 * only one element of the array here.
	 */
	for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
		eventfd_signal(t->entries[i].eventfd, 1);

	/* Update current_threshold */
3358
	t->current_threshold = i - 1;
3359 3360 3361 3362 3363 3364
unlock:
	rcu_read_unlock();
}

static void mem_cgroup_threshold(struct mem_cgroup *memcg)
{
3365 3366 3367 3368 3369 3370 3371
	while (memcg) {
		__mem_cgroup_threshold(memcg, false);
		if (do_swap_account)
			__mem_cgroup_threshold(memcg, true);

		memcg = parent_mem_cgroup(memcg);
	}
3372 3373 3374 3375 3376 3377 3378
}

static int compare_thresholds(const void *a, const void *b)
{
	const struct mem_cgroup_threshold *_a = a;
	const struct mem_cgroup_threshold *_b = b;

3379 3380 3381 3382 3383 3384 3385
	if (_a->threshold > _b->threshold)
		return 1;

	if (_a->threshold < _b->threshold)
		return -1;

	return 0;
3386 3387
}

3388
static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
K
KAMEZAWA Hiroyuki 已提交
3389 3390 3391
{
	struct mem_cgroup_eventfd_list *ev;

3392 3393
	spin_lock(&memcg_oom_lock);

3394
	list_for_each_entry(ev, &memcg->oom_notify, list)
K
KAMEZAWA Hiroyuki 已提交
3395
		eventfd_signal(ev->eventfd, 1);
3396 3397

	spin_unlock(&memcg_oom_lock);
K
KAMEZAWA Hiroyuki 已提交
3398 3399 3400
	return 0;
}

3401
static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
K
KAMEZAWA Hiroyuki 已提交
3402
{
K
KAMEZAWA Hiroyuki 已提交
3403 3404
	struct mem_cgroup *iter;

3405
	for_each_mem_cgroup_tree(iter, memcg)
K
KAMEZAWA Hiroyuki 已提交
3406
		mem_cgroup_oom_notify_cb(iter);
K
KAMEZAWA Hiroyuki 已提交
3407 3408
}

3409
static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
T
Tejun Heo 已提交
3410
	struct eventfd_ctx *eventfd, const char *args, enum res_type type)
3411
{
3412 3413
	struct mem_cgroup_thresholds *thresholds;
	struct mem_cgroup_threshold_ary *new;
3414 3415
	unsigned long threshold;
	unsigned long usage;
3416
	int i, size, ret;
3417

3418
	ret = page_counter_memparse(args, "-1", &threshold);
3419 3420
	if (ret)
		return ret;
S
Shaohua Li 已提交
3421
	threshold <<= PAGE_SHIFT;
3422 3423

	mutex_lock(&memcg->thresholds_lock);
3424

3425
	if (type == _MEM) {
3426
		thresholds = &memcg->thresholds;
3427
		usage = mem_cgroup_usage(memcg, false);
3428
	} else if (type == _MEMSWAP) {
3429
		thresholds = &memcg->memsw_thresholds;
3430
		usage = mem_cgroup_usage(memcg, true);
3431
	} else
3432 3433 3434
		BUG();

	/* Check if a threshold crossed before adding a new one */
3435
	if (thresholds->primary)
3436 3437
		__mem_cgroup_threshold(memcg, type == _MEMSWAP);

3438
	size = thresholds->primary ? thresholds->primary->size + 1 : 1;
3439 3440

	/* Allocate memory for new array of thresholds */
3441
	new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold),
3442
			GFP_KERNEL);
3443
	if (!new) {
3444 3445 3446
		ret = -ENOMEM;
		goto unlock;
	}
3447
	new->size = size;
3448 3449

	/* Copy thresholds (if any) to new array */
3450 3451
	if (thresholds->primary) {
		memcpy(new->entries, thresholds->primary->entries, (size - 1) *
3452
				sizeof(struct mem_cgroup_threshold));
3453 3454
	}

3455
	/* Add new threshold */
3456 3457
	new->entries[size - 1].eventfd = eventfd;
	new->entries[size - 1].threshold = threshold;
3458 3459

	/* Sort thresholds. Registering of new threshold isn't time-critical */
3460
	sort(new->entries, size, sizeof(struct mem_cgroup_threshold),
3461 3462 3463
			compare_thresholds, NULL);

	/* Find current threshold */
3464
	new->current_threshold = -1;
3465
	for (i = 0; i < size; i++) {
3466
		if (new->entries[i].threshold <= usage) {
3467
			/*
3468 3469
			 * new->current_threshold will not be used until
			 * rcu_assign_pointer(), so it's safe to increment
3470 3471
			 * it here.
			 */
3472
			++new->current_threshold;
3473 3474
		} else
			break;
3475 3476
	}

3477 3478 3479 3480 3481
	/* Free old spare buffer and save old primary buffer as spare */
	kfree(thresholds->spare);
	thresholds->spare = thresholds->primary;

	rcu_assign_pointer(thresholds->primary, new);
3482

3483
	/* To be sure that nobody uses thresholds */
3484 3485 3486 3487 3488 3489 3490 3491
	synchronize_rcu();

unlock:
	mutex_unlock(&memcg->thresholds_lock);

	return ret;
}

3492
static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
T
Tejun Heo 已提交
3493 3494
	struct eventfd_ctx *eventfd, const char *args)
{
3495
	return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM);
T
Tejun Heo 已提交
3496 3497
}

3498
static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg,
T
Tejun Heo 已提交
3499 3500
	struct eventfd_ctx *eventfd, const char *args)
{
3501
	return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP);
T
Tejun Heo 已提交
3502 3503
}

3504
static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
T
Tejun Heo 已提交
3505
	struct eventfd_ctx *eventfd, enum res_type type)
3506
{
3507 3508
	struct mem_cgroup_thresholds *thresholds;
	struct mem_cgroup_threshold_ary *new;
3509
	unsigned long usage;
3510
	int i, j, size;
3511 3512

	mutex_lock(&memcg->thresholds_lock);
3513 3514

	if (type == _MEM) {
3515
		thresholds = &memcg->thresholds;
3516
		usage = mem_cgroup_usage(memcg, false);
3517
	} else if (type == _MEMSWAP) {
3518
		thresholds = &memcg->memsw_thresholds;
3519
		usage = mem_cgroup_usage(memcg, true);
3520
	} else
3521 3522
		BUG();

3523 3524 3525
	if (!thresholds->primary)
		goto unlock;

3526 3527 3528 3529
	/* Check if a threshold crossed before removing */
	__mem_cgroup_threshold(memcg, type == _MEMSWAP);

	/* Calculate new number of threshold */
3530 3531 3532
	size = 0;
	for (i = 0; i < thresholds->primary->size; i++) {
		if (thresholds->primary->entries[i].eventfd != eventfd)
3533 3534 3535
			size++;
	}

3536
	new = thresholds->spare;
3537

3538 3539
	/* Set thresholds array to NULL if we don't have thresholds */
	if (!size) {
3540 3541
		kfree(new);
		new = NULL;
3542
		goto swap_buffers;
3543 3544
	}

3545
	new->size = size;
3546 3547

	/* Copy thresholds and find current threshold */
3548 3549 3550
	new->current_threshold = -1;
	for (i = 0, j = 0; i < thresholds->primary->size; i++) {
		if (thresholds->primary->entries[i].eventfd == eventfd)
3551 3552
			continue;

3553
		new->entries[j] = thresholds->primary->entries[i];
3554
		if (new->entries[j].threshold <= usage) {
3555
			/*
3556
			 * new->current_threshold will not be used
3557 3558 3559
			 * until rcu_assign_pointer(), so it's safe to increment
			 * it here.
			 */
3560
			++new->current_threshold;
3561 3562 3563 3564
		}
		j++;
	}

3565
swap_buffers:
3566 3567
	/* Swap primary and spare array */
	thresholds->spare = thresholds->primary;
3568 3569 3570 3571 3572 3573
	/* If all events are unregistered, free the spare array */
	if (!new) {
		kfree(thresholds->spare);
		thresholds->spare = NULL;
	}

3574
	rcu_assign_pointer(thresholds->primary, new);
3575

3576
	/* To be sure that nobody uses thresholds */
3577
	synchronize_rcu();
3578
unlock:
3579 3580
	mutex_unlock(&memcg->thresholds_lock);
}
3581

3582
static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
T
Tejun Heo 已提交
3583 3584
	struct eventfd_ctx *eventfd)
{
3585
	return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM);
T
Tejun Heo 已提交
3586 3587
}

3588
static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
T
Tejun Heo 已提交
3589 3590
	struct eventfd_ctx *eventfd)
{
3591
	return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP);
T
Tejun Heo 已提交
3592 3593
}

3594
static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
T
Tejun Heo 已提交
3595
	struct eventfd_ctx *eventfd, const char *args)
K
KAMEZAWA Hiroyuki 已提交
3596 3597 3598 3599 3600 3601 3602
{
	struct mem_cgroup_eventfd_list *event;

	event = kmalloc(sizeof(*event),	GFP_KERNEL);
	if (!event)
		return -ENOMEM;

3603
	spin_lock(&memcg_oom_lock);
K
KAMEZAWA Hiroyuki 已提交
3604 3605 3606 3607 3608

	event->eventfd = eventfd;
	list_add(&event->list, &memcg->oom_notify);

	/* already in OOM ? */
3609
	if (memcg->under_oom)
K
KAMEZAWA Hiroyuki 已提交
3610
		eventfd_signal(eventfd, 1);
3611
	spin_unlock(&memcg_oom_lock);
K
KAMEZAWA Hiroyuki 已提交
3612 3613 3614 3615

	return 0;
}

3616
static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg,
T
Tejun Heo 已提交
3617
	struct eventfd_ctx *eventfd)
K
KAMEZAWA Hiroyuki 已提交
3618 3619 3620
{
	struct mem_cgroup_eventfd_list *ev, *tmp;

3621
	spin_lock(&memcg_oom_lock);
K
KAMEZAWA Hiroyuki 已提交
3622

3623
	list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
K
KAMEZAWA Hiroyuki 已提交
3624 3625 3626 3627 3628 3629
		if (ev->eventfd == eventfd) {
			list_del(&ev->list);
			kfree(ev);
		}
	}

3630
	spin_unlock(&memcg_oom_lock);
K
KAMEZAWA Hiroyuki 已提交
3631 3632
}

3633
static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
3634
{
3635
	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf));
3636

3637
	seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable);
3638
	seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom);
3639 3640 3641
	return 0;
}

3642
static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
3643 3644
	struct cftype *cft, u64 val)
{
3645
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3646 3647

	/* cannot set to root cgroup and only 0 and 1 are allowed */
3648
	if (!css->parent || !((val == 0) || (val == 1)))
3649 3650
		return -EINVAL;

3651
	memcg->oom_kill_disable = val;
3652
	if (!val)
3653
		memcg_oom_recover(memcg);
3654

3655 3656 3657
	return 0;
}

A
Andrew Morton 已提交
3658
#ifdef CONFIG_MEMCG_KMEM
3659
static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
3660
{
3661 3662 3663 3664 3665
	int ret;

	ret = memcg_propagate_kmem(memcg);
	if (ret)
		return ret;
3666

3667
	return mem_cgroup_sockets_init(memcg, ss);
3668
}
3669

3670 3671
static void memcg_deactivate_kmem(struct mem_cgroup *memcg)
{
3672 3673 3674 3675
	struct cgroup_subsys_state *css;
	struct mem_cgroup *parent, *child;
	int kmemcg_id;

3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687
	if (!memcg->kmem_acct_active)
		return;

	/*
	 * Clear the 'active' flag before clearing memcg_caches arrays entries.
	 * Since we take the slab_mutex in memcg_deactivate_kmem_caches(), it
	 * guarantees no cache will be created for this cgroup after we are
	 * done (see memcg_create_kmem_cache()).
	 */
	memcg->kmem_acct_active = false;

	memcg_deactivate_kmem_caches(memcg);
3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713

	kmemcg_id = memcg->kmemcg_id;
	BUG_ON(kmemcg_id < 0);

	parent = parent_mem_cgroup(memcg);
	if (!parent)
		parent = root_mem_cgroup;

	/*
	 * Change kmemcg_id of this cgroup and all its descendants to the
	 * parent's id, and then move all entries from this cgroup's list_lrus
	 * to ones of the parent. After we have finished, all list_lrus
	 * corresponding to this cgroup are guaranteed to remain empty. The
	 * ordering is imposed by list_lru_node->lock taken by
	 * memcg_drain_all_list_lrus().
	 */
	css_for_each_descendant_pre(css, &memcg->css) {
		child = mem_cgroup_from_css(css);
		BUG_ON(child->kmemcg_id != kmemcg_id);
		child->kmemcg_id = parent->kmemcg_id;
		if (!memcg->use_hierarchy)
			break;
	}
	memcg_drain_all_list_lrus(kmemcg_id, parent->kmemcg_id);

	memcg_free_cache_id(kmemcg_id);
3714 3715
}

3716
static void memcg_destroy_kmem(struct mem_cgroup *memcg)
G
Glauber Costa 已提交
3717
{
3718 3719 3720 3721 3722
	if (memcg->kmem_acct_activated) {
		memcg_destroy_kmem_caches(memcg);
		static_key_slow_dec(&memcg_kmem_enabled_key);
		WARN_ON(page_counter_read(&memcg->kmem));
	}
3723
	mem_cgroup_sockets_destroy(memcg);
3724
}
3725
#else
3726
static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
3727 3728 3729
{
	return 0;
}
G
Glauber Costa 已提交
3730

3731 3732 3733 3734
static void memcg_deactivate_kmem(struct mem_cgroup *memcg)
{
}

3735 3736 3737
static void memcg_destroy_kmem(struct mem_cgroup *memcg)
{
}
3738 3739
#endif

3740 3741 3742 3743 3744 3745 3746
#ifdef CONFIG_CGROUP_WRITEBACK

struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg)
{
	return &memcg->cgwb_list;
}

T
Tejun Heo 已提交
3747 3748 3749 3750 3751 3752 3753 3754 3755 3756
static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
{
	return wb_domain_init(&memcg->cgwb_domain, gfp);
}

static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
{
	wb_domain_exit(&memcg->cgwb_domain);
}

3757 3758 3759 3760 3761
static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
{
	wb_domain_size_changed(&memcg->cgwb_domain);
}

T
Tejun Heo 已提交
3762 3763 3764 3765 3766 3767 3768 3769 3770 3771
struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);

	if (!memcg->css.parent)
		return NULL;

	return &memcg->cgwb_domain;
}

3772 3773 3774
/**
 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
 * @wb: bdi_writeback in question
3775 3776
 * @pfilepages: out parameter for number of file pages
 * @pheadroom: out parameter for number of allocatable pages according to memcg
3777 3778 3779
 * @pdirty: out parameter for number of dirty pages
 * @pwriteback: out parameter for number of pages under writeback
 *
3780 3781 3782
 * Determine the numbers of file, headroom, dirty, and writeback pages in
 * @wb's memcg.  File, dirty and writeback are self-explanatory.  Headroom
 * is a bit more involved.
3783
 *
3784 3785 3786 3787 3788
 * A memcg's headroom is "min(max, high) - used".  In the hierarchy, the
 * headroom is calculated as the lowest headroom of itself and the
 * ancestors.  Note that this doesn't consider the actual amount of
 * available memory in the system.  The caller should further cap
 * *@pheadroom accordingly.
3789
 */
3790 3791 3792
void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
			 unsigned long *pheadroom, unsigned long *pdirty,
			 unsigned long *pwriteback)
3793 3794 3795 3796 3797 3798 3799 3800
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
	struct mem_cgroup *parent;

	*pdirty = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_DIRTY);

	/* this should eventually include NR_UNSTABLE_NFS */
	*pwriteback = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_WRITEBACK);
3801 3802 3803
	*pfilepages = mem_cgroup_nr_lru_pages(memcg, (1 << LRU_INACTIVE_FILE) |
						     (1 << LRU_ACTIVE_FILE));
	*pheadroom = PAGE_COUNTER_MAX;
3804 3805 3806 3807 3808

	while ((parent = parent_mem_cgroup(memcg))) {
		unsigned long ceiling = min(memcg->memory.limit, memcg->high);
		unsigned long used = page_counter_read(&memcg->memory);

3809
		*pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
3810 3811 3812 3813
		memcg = parent;
	}
}

T
Tejun Heo 已提交
3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824
#else	/* CONFIG_CGROUP_WRITEBACK */

static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
{
	return 0;
}

static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
{
}

3825 3826 3827 3828
static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
{
}

3829 3830
#endif	/* CONFIG_CGROUP_WRITEBACK */

3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843
/*
 * DO NOT USE IN NEW FILES.
 *
 * "cgroup.event_control" implementation.
 *
 * This is way over-engineered.  It tries to support fully configurable
 * events for each user.  Such level of flexibility is completely
 * unnecessary especially in the light of the planned unified hierarchy.
 *
 * Please deprecate this and replace with something simpler if at all
 * possible.
 */

3844 3845 3846 3847 3848
/*
 * Unregister event and free resources.
 *
 * Gets called from workqueue.
 */
3849
static void memcg_event_remove(struct work_struct *work)
3850
{
3851 3852
	struct mem_cgroup_event *event =
		container_of(work, struct mem_cgroup_event, remove);
3853
	struct mem_cgroup *memcg = event->memcg;
3854 3855 3856

	remove_wait_queue(event->wqh, &event->wait);

3857
	event->unregister_event(memcg, event->eventfd);
3858 3859 3860 3861 3862 3863

	/* Notify userspace the event is going away. */
	eventfd_signal(event->eventfd, 1);

	eventfd_ctx_put(event->eventfd);
	kfree(event);
3864
	css_put(&memcg->css);
3865 3866 3867 3868 3869 3870 3871
}

/*
 * Gets called on POLLHUP on eventfd when user closes it.
 *
 * Called with wqh->lock held and interrupts disabled.
 */
3872 3873
static int memcg_event_wake(wait_queue_t *wait, unsigned mode,
			    int sync, void *key)
3874
{
3875 3876
	struct mem_cgroup_event *event =
		container_of(wait, struct mem_cgroup_event, wait);
3877
	struct mem_cgroup *memcg = event->memcg;
3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889
	unsigned long flags = (unsigned long)key;

	if (flags & POLLHUP) {
		/*
		 * If the event has been detached at cgroup removal, we
		 * can simply return knowing the other side will cleanup
		 * for us.
		 *
		 * We can't race against event freeing since the other
		 * side will require wqh->lock via remove_wait_queue(),
		 * which we hold.
		 */
3890
		spin_lock(&memcg->event_list_lock);
3891 3892 3893 3894 3895 3896 3897 3898
		if (!list_empty(&event->list)) {
			list_del_init(&event->list);
			/*
			 * We are in atomic context, but cgroup_event_remove()
			 * may sleep, so we have to call it in workqueue.
			 */
			schedule_work(&event->remove);
		}
3899
		spin_unlock(&memcg->event_list_lock);
3900 3901 3902 3903 3904
	}

	return 0;
}

3905
static void memcg_event_ptable_queue_proc(struct file *file,
3906 3907
		wait_queue_head_t *wqh, poll_table *pt)
{
3908 3909
	struct mem_cgroup_event *event =
		container_of(pt, struct mem_cgroup_event, pt);
3910 3911 3912 3913 3914 3915

	event->wqh = wqh;
	add_wait_queue(wqh, &event->wait);
}

/*
3916 3917
 * DO NOT USE IN NEW FILES.
 *
3918 3919 3920 3921 3922
 * Parse input and register new cgroup event handler.
 *
 * Input must be in format '<event_fd> <control_fd> <args>'.
 * Interpretation of args is defined by control file implementation.
 */
3923 3924
static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
					 char *buf, size_t nbytes, loff_t off)
3925
{
3926
	struct cgroup_subsys_state *css = of_css(of);
3927
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3928
	struct mem_cgroup_event *event;
3929 3930 3931 3932
	struct cgroup_subsys_state *cfile_css;
	unsigned int efd, cfd;
	struct fd efile;
	struct fd cfile;
3933
	const char *name;
3934 3935 3936
	char *endp;
	int ret;

3937 3938 3939
	buf = strstrip(buf);

	efd = simple_strtoul(buf, &endp, 10);
3940 3941
	if (*endp != ' ')
		return -EINVAL;
3942
	buf = endp + 1;
3943

3944
	cfd = simple_strtoul(buf, &endp, 10);
3945 3946
	if ((*endp != ' ') && (*endp != '\0'))
		return -EINVAL;
3947
	buf = endp + 1;
3948 3949 3950 3951 3952

	event = kzalloc(sizeof(*event), GFP_KERNEL);
	if (!event)
		return -ENOMEM;

3953
	event->memcg = memcg;
3954
	INIT_LIST_HEAD(&event->list);
3955 3956 3957
	init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc);
	init_waitqueue_func_entry(&event->wait, memcg_event_wake);
	INIT_WORK(&event->remove, memcg_event_remove);
3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982

	efile = fdget(efd);
	if (!efile.file) {
		ret = -EBADF;
		goto out_kfree;
	}

	event->eventfd = eventfd_ctx_fileget(efile.file);
	if (IS_ERR(event->eventfd)) {
		ret = PTR_ERR(event->eventfd);
		goto out_put_efile;
	}

	cfile = fdget(cfd);
	if (!cfile.file) {
		ret = -EBADF;
		goto out_put_eventfd;
	}

	/* the process need read permission on control file */
	/* AV: shouldn't we check that it's been opened for read instead? */
	ret = inode_permission(file_inode(cfile.file), MAY_READ);
	if (ret < 0)
		goto out_put_cfile;

3983 3984 3985 3986 3987
	/*
	 * Determine the event callbacks and set them in @event.  This used
	 * to be done via struct cftype but cgroup core no longer knows
	 * about these events.  The following is crude but the whole thing
	 * is for compatibility anyway.
3988 3989
	 *
	 * DO NOT ADD NEW FILES.
3990
	 */
A
Al Viro 已提交
3991
	name = cfile.file->f_path.dentry->d_name.name;
3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002

	if (!strcmp(name, "memory.usage_in_bytes")) {
		event->register_event = mem_cgroup_usage_register_event;
		event->unregister_event = mem_cgroup_usage_unregister_event;
	} else if (!strcmp(name, "memory.oom_control")) {
		event->register_event = mem_cgroup_oom_register_event;
		event->unregister_event = mem_cgroup_oom_unregister_event;
	} else if (!strcmp(name, "memory.pressure_level")) {
		event->register_event = vmpressure_register_event;
		event->unregister_event = vmpressure_unregister_event;
	} else if (!strcmp(name, "memory.memsw.usage_in_bytes")) {
T
Tejun Heo 已提交
4003 4004
		event->register_event = memsw_cgroup_usage_register_event;
		event->unregister_event = memsw_cgroup_usage_unregister_event;
4005 4006 4007 4008 4009
	} else {
		ret = -EINVAL;
		goto out_put_cfile;
	}

4010
	/*
4011 4012 4013
	 * Verify @cfile should belong to @css.  Also, remaining events are
	 * automatically removed on cgroup destruction but the removal is
	 * asynchronous, so take an extra ref on @css.
4014
	 */
A
Al Viro 已提交
4015
	cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent,
4016
					       &memory_cgrp_subsys);
4017
	ret = -EINVAL;
4018
	if (IS_ERR(cfile_css))
4019
		goto out_put_cfile;
4020 4021
	if (cfile_css != css) {
		css_put(cfile_css);
4022
		goto out_put_cfile;
4023
	}
4024

4025
	ret = event->register_event(memcg, event->eventfd, buf);
4026 4027 4028 4029 4030
	if (ret)
		goto out_put_css;

	efile.file->f_op->poll(efile.file, &event->pt);

4031 4032 4033
	spin_lock(&memcg->event_list_lock);
	list_add(&event->list, &memcg->event_list);
	spin_unlock(&memcg->event_list_lock);
4034 4035 4036 4037

	fdput(cfile);
	fdput(efile);

4038
	return nbytes;
4039 4040

out_put_css:
4041
	css_put(css);
4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053
out_put_cfile:
	fdput(cfile);
out_put_eventfd:
	eventfd_ctx_put(event->eventfd);
out_put_efile:
	fdput(efile);
out_kfree:
	kfree(event);

	return ret;
}

4054
static struct cftype mem_cgroup_legacy_files[] = {
B
Balbir Singh 已提交
4055
	{
4056
		.name = "usage_in_bytes",
4057
		.private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
4058
		.read_u64 = mem_cgroup_read_u64,
B
Balbir Singh 已提交
4059
	},
4060 4061
	{
		.name = "max_usage_in_bytes",
4062
		.private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
4063
		.write = mem_cgroup_reset,
4064
		.read_u64 = mem_cgroup_read_u64,
4065
	},
B
Balbir Singh 已提交
4066
	{
4067
		.name = "limit_in_bytes",
4068
		.private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
4069
		.write = mem_cgroup_write,
4070
		.read_u64 = mem_cgroup_read_u64,
B
Balbir Singh 已提交
4071
	},
4072 4073 4074
	{
		.name = "soft_limit_in_bytes",
		.private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
4075
		.write = mem_cgroup_write,
4076
		.read_u64 = mem_cgroup_read_u64,
4077
	},
B
Balbir Singh 已提交
4078 4079
	{
		.name = "failcnt",
4080
		.private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
4081
		.write = mem_cgroup_reset,
4082
		.read_u64 = mem_cgroup_read_u64,
B
Balbir Singh 已提交
4083
	},
4084 4085
	{
		.name = "stat",
4086
		.seq_show = memcg_stat_show,
4087
	},
4088 4089
	{
		.name = "force_empty",
4090
		.write = mem_cgroup_force_empty_write,
4091
	},
4092 4093 4094 4095 4096
	{
		.name = "use_hierarchy",
		.write_u64 = mem_cgroup_hierarchy_write,
		.read_u64 = mem_cgroup_hierarchy_read,
	},
4097
	{
4098
		.name = "cgroup.event_control",		/* XXX: for compat */
4099
		.write = memcg_write_event_control,
4100 4101 4102
		.flags = CFTYPE_NO_PREFIX,
		.mode = S_IWUGO,
	},
K
KOSAKI Motohiro 已提交
4103 4104 4105 4106 4107
	{
		.name = "swappiness",
		.read_u64 = mem_cgroup_swappiness_read,
		.write_u64 = mem_cgroup_swappiness_write,
	},
4108 4109 4110 4111 4112
	{
		.name = "move_charge_at_immigrate",
		.read_u64 = mem_cgroup_move_charge_read,
		.write_u64 = mem_cgroup_move_charge_write,
	},
K
KAMEZAWA Hiroyuki 已提交
4113 4114
	{
		.name = "oom_control",
4115
		.seq_show = mem_cgroup_oom_control_read,
4116
		.write_u64 = mem_cgroup_oom_control_write,
K
KAMEZAWA Hiroyuki 已提交
4117 4118
		.private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
	},
4119 4120 4121
	{
		.name = "pressure_level",
	},
4122 4123 4124
#ifdef CONFIG_NUMA
	{
		.name = "numa_stat",
4125
		.seq_show = memcg_numa_stat_show,
4126 4127
	},
#endif
4128 4129 4130 4131
#ifdef CONFIG_MEMCG_KMEM
	{
		.name = "kmem.limit_in_bytes",
		.private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
4132
		.write = mem_cgroup_write,
4133
		.read_u64 = mem_cgroup_read_u64,
4134 4135 4136 4137
	},
	{
		.name = "kmem.usage_in_bytes",
		.private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
4138
		.read_u64 = mem_cgroup_read_u64,
4139 4140 4141 4142
	},
	{
		.name = "kmem.failcnt",
		.private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
4143
		.write = mem_cgroup_reset,
4144
		.read_u64 = mem_cgroup_read_u64,
4145 4146 4147 4148
	},
	{
		.name = "kmem.max_usage_in_bytes",
		.private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
4149
		.write = mem_cgroup_reset,
4150
		.read_u64 = mem_cgroup_read_u64,
4151
	},
4152 4153 4154
#ifdef CONFIG_SLABINFO
	{
		.name = "kmem.slabinfo",
4155 4156 4157 4158
		.seq_start = slab_start,
		.seq_next = slab_next,
		.seq_stop = slab_stop,
		.seq_show = memcg_slab_show,
4159 4160
	},
#endif
4161
#endif
4162
	{ },	/* terminate */
4163
};
4164

4165
static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
4166 4167
{
	struct mem_cgroup_per_node *pn;
4168
	struct mem_cgroup_per_zone *mz;
4169
	int zone, tmp = node;
4170 4171 4172 4173 4174 4175 4176 4177
	/*
	 * This routine is called against possible nodes.
	 * But it's BUG to call kmalloc() against offline node.
	 *
	 * TODO: this routine can waste much memory for nodes which will
	 *       never be onlined. It's better to use memory hotplug callback
	 *       function.
	 */
4178 4179
	if (!node_state(node, N_NORMAL_MEMORY))
		tmp = -1;
4180
	pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
4181 4182
	if (!pn)
		return 1;
4183 4184 4185

	for (zone = 0; zone < MAX_NR_ZONES; zone++) {
		mz = &pn->zoneinfo[zone];
4186
		lruvec_init(&mz->lruvec);
4187 4188
		mz->usage_in_excess = 0;
		mz->on_tree = false;
4189
		mz->memcg = memcg;
4190
	}
4191
	memcg->nodeinfo[node] = pn;
4192 4193 4194
	return 0;
}

4195
static void free_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
4196
{
4197
	kfree(memcg->nodeinfo[node]);
4198 4199
}

4200 4201
static struct mem_cgroup *mem_cgroup_alloc(void)
{
4202
	struct mem_cgroup *memcg;
4203
	size_t size;
4204

4205 4206
	size = sizeof(struct mem_cgroup);
	size += nr_node_ids * sizeof(struct mem_cgroup_per_node *);
4207

4208
	memcg = kzalloc(size, GFP_KERNEL);
4209
	if (!memcg)
4210 4211
		return NULL;

4212 4213
	memcg->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
	if (!memcg->stat)
4214
		goto out_free;
T
Tejun Heo 已提交
4215 4216 4217 4218

	if (memcg_wb_domain_init(memcg, GFP_KERNEL))
		goto out_free_stat;

4219
	return memcg;
4220

T
Tejun Heo 已提交
4221 4222
out_free_stat:
	free_percpu(memcg->stat);
4223
out_free:
4224
	kfree(memcg);
4225
	return NULL;
4226 4227
}

4228
/*
4229 4230 4231 4232 4233 4234 4235 4236
 * At destroying mem_cgroup, references from swap_cgroup can remain.
 * (scanning all at force_empty is too costly...)
 *
 * Instead of clearing all references at force_empty, we remember
 * the number of reference from swap_cgroup and free mem_cgroup when
 * it goes down to 0.
 *
 * Removal of cgroup itself succeeds regardless of refs from swap.
4237
 */
4238 4239

static void __mem_cgroup_free(struct mem_cgroup *memcg)
4240
{
4241
	int node;
4242

4243
	mem_cgroup_remove_from_trees(memcg);
4244 4245 4246 4247 4248

	for_each_node(node)
		free_mem_cgroup_per_zone_info(memcg, node);

	free_percpu(memcg->stat);
T
Tejun Heo 已提交
4249
	memcg_wb_domain_exit(memcg);
4250
	kfree(memcg);
4251
}
4252

4253 4254 4255
/*
 * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled.
 */
G
Glauber Costa 已提交
4256
struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
4257
{
4258
	if (!memcg->memory.parent)
4259
		return NULL;
4260
	return mem_cgroup_from_counter(memcg->memory.parent, memory);
4261
}
G
Glauber Costa 已提交
4262
EXPORT_SYMBOL(parent_mem_cgroup);
4263

L
Li Zefan 已提交
4264
static struct cgroup_subsys_state * __ref
4265
mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
B
Balbir Singh 已提交
4266
{
4267
	struct mem_cgroup *memcg;
K
KAMEZAWA Hiroyuki 已提交
4268
	long error = -ENOMEM;
4269
	int node;
B
Balbir Singh 已提交
4270

4271 4272
	memcg = mem_cgroup_alloc();
	if (!memcg)
K
KAMEZAWA Hiroyuki 已提交
4273
		return ERR_PTR(error);
4274

B
Bob Liu 已提交
4275
	for_each_node(node)
4276
		if (alloc_mem_cgroup_per_zone_info(memcg, node))
4277
			goto free_out;
4278

4279
	/* root ? */
4280
	if (parent_css == NULL) {
4281
		root_mem_cgroup = memcg;
T
Tejun Heo 已提交
4282
		mem_cgroup_root_css = &memcg->css;
4283
		page_counter_init(&memcg->memory, NULL);
4284
		memcg->high = PAGE_COUNTER_MAX;
4285
		memcg->soft_limit = PAGE_COUNTER_MAX;
4286 4287
		page_counter_init(&memcg->memsw, NULL);
		page_counter_init(&memcg->kmem, NULL);
4288
	}
4289

4290 4291 4292 4293 4294
	memcg->last_scanned_node = MAX_NUMNODES;
	INIT_LIST_HEAD(&memcg->oom_notify);
	memcg->move_charge_at_immigrate = 0;
	mutex_init(&memcg->thresholds_lock);
	spin_lock_init(&memcg->move_lock);
4295
	vmpressure_init(&memcg->vmpressure);
4296 4297
	INIT_LIST_HEAD(&memcg->event_list);
	spin_lock_init(&memcg->event_list_lock);
V
Vladimir Davydov 已提交
4298 4299 4300
#ifdef CONFIG_MEMCG_KMEM
	memcg->kmemcg_id = -1;
#endif
4301 4302 4303
#ifdef CONFIG_CGROUP_WRITEBACK
	INIT_LIST_HEAD(&memcg->cgwb_list);
#endif
4304 4305 4306 4307 4308 4309 4310 4311
	return &memcg->css;

free_out:
	__mem_cgroup_free(memcg);
	return ERR_PTR(error);
}

static int
4312
mem_cgroup_css_online(struct cgroup_subsys_state *css)
4313
{
4314
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
T
Tejun Heo 已提交
4315
	struct mem_cgroup *parent = mem_cgroup_from_css(css->parent);
4316
	int ret;
4317

4318
	if (css->id > MEM_CGROUP_ID_MAX)
4319 4320
		return -ENOSPC;

T
Tejun Heo 已提交
4321
	if (!parent)
4322 4323
		return 0;

4324
	mutex_lock(&memcg_create_mutex);
4325 4326 4327 4328 4329 4330

	memcg->use_hierarchy = parent->use_hierarchy;
	memcg->oom_kill_disable = parent->oom_kill_disable;
	memcg->swappiness = mem_cgroup_swappiness(parent);

	if (parent->use_hierarchy) {
4331
		page_counter_init(&memcg->memory, &parent->memory);
4332
		memcg->high = PAGE_COUNTER_MAX;
4333
		memcg->soft_limit = PAGE_COUNTER_MAX;
4334 4335
		page_counter_init(&memcg->memsw, &parent->memsw);
		page_counter_init(&memcg->kmem, &parent->kmem);
4336

4337
		/*
4338 4339
		 * No need to take a reference to the parent because cgroup
		 * core guarantees its existence.
4340
		 */
4341
	} else {
4342
		page_counter_init(&memcg->memory, NULL);
4343
		memcg->high = PAGE_COUNTER_MAX;
4344
		memcg->soft_limit = PAGE_COUNTER_MAX;
4345 4346
		page_counter_init(&memcg->memsw, NULL);
		page_counter_init(&memcg->kmem, NULL);
4347 4348 4349 4350 4351
		/*
		 * Deeper hierachy with use_hierarchy == false doesn't make
		 * much sense so let cgroup subsystem know about this
		 * unfortunate state in our controller.
		 */
4352
		if (parent != root_mem_cgroup)
4353
			memory_cgrp_subsys.broken_hierarchy = true;
4354
	}
4355
	mutex_unlock(&memcg_create_mutex);
4356

4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368
	ret = memcg_init_kmem(memcg, &memory_cgrp_subsys);
	if (ret)
		return ret;

	/*
	 * Make sure the memcg is initialized: mem_cgroup_iter()
	 * orders reading memcg->initialized against its callers
	 * reading the memcg members.
	 */
	smp_store_release(&memcg->initialized, 1);

	return 0;
B
Balbir Singh 已提交
4369 4370
}

4371
static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
4372
{
4373
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4374
	struct mem_cgroup_event *event, *tmp;
4375 4376 4377 4378 4379 4380

	/*
	 * Unregister events and notify userspace.
	 * Notify userspace about cgroup removing only after rmdir of cgroup
	 * directory to avoid race between userspace and kernelspace.
	 */
4381 4382
	spin_lock(&memcg->event_list_lock);
	list_for_each_entry_safe(event, tmp, &memcg->event_list, list) {
4383 4384 4385
		list_del_init(&event->list);
		schedule_work(&event->remove);
	}
4386
	spin_unlock(&memcg->event_list_lock);
4387

4388
	vmpressure_cleanup(&memcg->vmpressure);
4389 4390

	memcg_deactivate_kmem(memcg);
4391 4392

	wb_memcg_offline(memcg);
4393 4394
}

4395
static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
B
Balbir Singh 已提交
4396
{
4397
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4398

4399
	memcg_destroy_kmem(memcg);
4400
	__mem_cgroup_free(memcg);
B
Balbir Singh 已提交
4401 4402
}

4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419
/**
 * mem_cgroup_css_reset - reset the states of a mem_cgroup
 * @css: the target css
 *
 * Reset the states of the mem_cgroup associated with @css.  This is
 * invoked when the userland requests disabling on the default hierarchy
 * but the memcg is pinned through dependency.  The memcg should stop
 * applying policies and should revert to the vanilla state as it may be
 * made visible again.
 *
 * The current implementation only resets the essential configurations.
 * This needs to be expanded to cover all the visible parts.
 */
static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);

4420 4421 4422
	mem_cgroup_resize_limit(memcg, PAGE_COUNTER_MAX);
	mem_cgroup_resize_memsw_limit(memcg, PAGE_COUNTER_MAX);
	memcg_update_kmem_limit(memcg, PAGE_COUNTER_MAX);
4423 4424
	memcg->low = 0;
	memcg->high = PAGE_COUNTER_MAX;
4425
	memcg->soft_limit = PAGE_COUNTER_MAX;
4426
	memcg_wb_domain_size_changed(memcg);
4427 4428
}

4429
#ifdef CONFIG_MMU
4430
/* Handlers for move charge at task migration. */
4431
static int mem_cgroup_do_precharge(unsigned long count)
4432
{
4433
	int ret;
4434 4435

	/* Try a single bulk charge without reclaim first */
4436
	ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_WAIT, count);
4437
	if (!ret) {
4438 4439 4440
		mc.precharge += count;
		return ret;
	}
4441
	if (ret == -EINTR) {
4442
		cancel_charge(root_mem_cgroup, count);
4443 4444
		return ret;
	}
4445 4446

	/* Try charges one by one with reclaim */
4447
	while (count--) {
4448
		ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_NORETRY, 1);
4449 4450 4451
		/*
		 * In case of failure, any residual charges against
		 * mc.to will be dropped by mem_cgroup_clear_mc()
4452 4453
		 * later on.  However, cancel any charges that are
		 * bypassed to root right away or they'll be lost.
4454
		 */
4455
		if (ret == -EINTR)
4456
			cancel_charge(root_mem_cgroup, 1);
4457 4458
		if (ret)
			return ret;
4459
		mc.precharge++;
4460
		cond_resched();
4461
	}
4462
	return 0;
4463 4464 4465
}

/**
4466
 * get_mctgt_type - get target type of moving charge
4467 4468 4469
 * @vma: the vma the pte to be checked belongs
 * @addr: the address corresponding to the pte to be checked
 * @ptent: the pte to be checked
4470
 * @target: the pointer the target page or swap ent will be stored(can be NULL)
4471 4472 4473 4474 4475 4476
 *
 * Returns
 *   0(MC_TARGET_NONE): if the pte is not a target for move charge.
 *   1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
 *     move charge. if @target is not NULL, the page is stored in target->page
 *     with extra refcnt got(Callers should handle it).
4477 4478 4479
 *   2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
 *     target for charge migration. if @target is not NULL, the entry is stored
 *     in target->ent.
4480 4481 4482 4483 4484
 *
 * Called with pte lock held.
 */
union mc_target {
	struct page	*page;
4485
	swp_entry_t	ent;
4486 4487 4488
};

enum mc_target_type {
4489
	MC_TARGET_NONE = 0,
4490
	MC_TARGET_PAGE,
4491
	MC_TARGET_SWAP,
4492 4493
};

D
Daisuke Nishimura 已提交
4494 4495
static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
						unsigned long addr, pte_t ptent)
4496
{
D
Daisuke Nishimura 已提交
4497
	struct page *page = vm_normal_page(vma, addr, ptent);
4498

D
Daisuke Nishimura 已提交
4499 4500 4501
	if (!page || !page_mapped(page))
		return NULL;
	if (PageAnon(page)) {
4502
		if (!(mc.flags & MOVE_ANON))
D
Daisuke Nishimura 已提交
4503
			return NULL;
4504 4505 4506 4507
	} else {
		if (!(mc.flags & MOVE_FILE))
			return NULL;
	}
D
Daisuke Nishimura 已提交
4508 4509 4510 4511 4512 4513
	if (!get_page_unless_zero(page))
		return NULL;

	return page;
}

4514
#ifdef CONFIG_SWAP
D
Daisuke Nishimura 已提交
4515 4516 4517 4518 4519 4520
static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
			unsigned long addr, pte_t ptent, swp_entry_t *entry)
{
	struct page *page = NULL;
	swp_entry_t ent = pte_to_swp_entry(ptent);

4521
	if (!(mc.flags & MOVE_ANON) || non_swap_entry(ent))
D
Daisuke Nishimura 已提交
4522
		return NULL;
4523 4524 4525 4526
	/*
	 * Because lookup_swap_cache() updates some statistics counter,
	 * we call find_get_page() with swapper_space directly.
	 */
4527
	page = find_get_page(swap_address_space(ent), ent.val);
D
Daisuke Nishimura 已提交
4528 4529 4530 4531 4532
	if (do_swap_account)
		entry->val = ent.val;

	return page;
}
4533 4534 4535 4536 4537 4538 4539
#else
static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
			unsigned long addr, pte_t ptent, swp_entry_t *entry)
{
	return NULL;
}
#endif
D
Daisuke Nishimura 已提交
4540

4541 4542 4543 4544 4545 4546 4547 4548 4549
static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
			unsigned long addr, pte_t ptent, swp_entry_t *entry)
{
	struct page *page = NULL;
	struct address_space *mapping;
	pgoff_t pgoff;

	if (!vma->vm_file) /* anonymous vma */
		return NULL;
4550
	if (!(mc.flags & MOVE_FILE))
4551 4552 4553
		return NULL;

	mapping = vma->vm_file->f_mapping;
4554
	pgoff = linear_page_index(vma, addr);
4555 4556

	/* page is moved even if it's not RSS of this task(page-faulted). */
4557 4558
#ifdef CONFIG_SWAP
	/* shmem/tmpfs may report page out on swap: account for that too. */
4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570
	if (shmem_mapping(mapping)) {
		page = find_get_entry(mapping, pgoff);
		if (radix_tree_exceptional_entry(page)) {
			swp_entry_t swp = radix_to_swp_entry(page);
			if (do_swap_account)
				*entry = swp;
			page = find_get_page(swap_address_space(swp), swp.val);
		}
	} else
		page = find_get_page(mapping, pgoff);
#else
	page = find_get_page(mapping, pgoff);
4571
#endif
4572 4573 4574
	return page;
}

4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595
/**
 * mem_cgroup_move_account - move account of the page
 * @page: the page
 * @nr_pages: number of regular pages (>1 for huge pages)
 * @from: mem_cgroup which the page is moved from.
 * @to:	mem_cgroup which the page is moved to. @from != @to.
 *
 * The caller must confirm following.
 * - page is not on LRU (isolate_page() is useful.)
 * - compound_lock is held when nr_pages > 1
 *
 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
 * from old cgroup.
 */
static int mem_cgroup_move_account(struct page *page,
				   unsigned int nr_pages,
				   struct mem_cgroup *from,
				   struct mem_cgroup *to)
{
	unsigned long flags;
	int ret;
4596
	bool anon;
4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621

	VM_BUG_ON(from == to);
	VM_BUG_ON_PAGE(PageLRU(page), page);
	/*
	 * The page is isolated from LRU. So, collapse function
	 * will not handle this page. But page splitting can happen.
	 * Do this check under compound_page_lock(). The caller should
	 * hold it.
	 */
	ret = -EBUSY;
	if (nr_pages > 1 && !PageTransHuge(page))
		goto out;

	/*
	 * Prevent mem_cgroup_migrate() from looking at page->mem_cgroup
	 * of its source page while we change it: page migration takes
	 * both pages off the LRU, but page cache replacement doesn't.
	 */
	if (!trylock_page(page))
		goto out;

	ret = -EINVAL;
	if (page->mem_cgroup != from)
		goto out_unlock;

4622 4623
	anon = PageAnon(page);

4624 4625
	spin_lock_irqsave(&from->move_lock, flags);

4626
	if (!anon && page_mapped(page)) {
4627 4628 4629 4630 4631 4632
		__this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
			       nr_pages);
		__this_cpu_add(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
			       nr_pages);
	}

4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648
	/*
	 * move_lock grabbed above and caller set from->moving_account, so
	 * mem_cgroup_update_page_stat() will serialize updates to PageDirty.
	 * So mapping should be stable for dirty pages.
	 */
	if (!anon && PageDirty(page)) {
		struct address_space *mapping = page_mapping(page);

		if (mapping_cap_account_dirty(mapping)) {
			__this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_DIRTY],
				       nr_pages);
			__this_cpu_add(to->stat->count[MEM_CGROUP_STAT_DIRTY],
				       nr_pages);
		}
	}

4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679
	if (PageWriteback(page)) {
		__this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_WRITEBACK],
			       nr_pages);
		__this_cpu_add(to->stat->count[MEM_CGROUP_STAT_WRITEBACK],
			       nr_pages);
	}

	/*
	 * It is safe to change page->mem_cgroup here because the page
	 * is referenced, charged, and isolated - we can't race with
	 * uncharging, charging, migration, or LRU putback.
	 */

	/* caller should have done css_get */
	page->mem_cgroup = to;
	spin_unlock_irqrestore(&from->move_lock, flags);

	ret = 0;

	local_irq_disable();
	mem_cgroup_charge_statistics(to, page, nr_pages);
	memcg_check_events(to, page);
	mem_cgroup_charge_statistics(from, page, -nr_pages);
	memcg_check_events(from, page);
	local_irq_enable();
out_unlock:
	unlock_page(page);
out:
	return ret;
}

4680
static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
D
Daisuke Nishimura 已提交
4681 4682 4683
		unsigned long addr, pte_t ptent, union mc_target *target)
{
	struct page *page = NULL;
4684
	enum mc_target_type ret = MC_TARGET_NONE;
D
Daisuke Nishimura 已提交
4685 4686 4687 4688 4689 4690
	swp_entry_t ent = { .val = 0 };

	if (pte_present(ptent))
		page = mc_handle_present_pte(vma, addr, ptent);
	else if (is_swap_pte(ptent))
		page = mc_handle_swap_pte(vma, addr, ptent, &ent);
4691
	else if (pte_none(ptent))
4692
		page = mc_handle_file_pte(vma, addr, ptent, &ent);
D
Daisuke Nishimura 已提交
4693 4694

	if (!page && !ent.val)
4695
		return ret;
4696 4697
	if (page) {
		/*
4698
		 * Do only loose check w/o serialization.
4699
		 * mem_cgroup_move_account() checks the page is valid or
4700
		 * not under LRU exclusion.
4701
		 */
4702
		if (page->mem_cgroup == mc.from) {
4703 4704 4705 4706 4707 4708 4709
			ret = MC_TARGET_PAGE;
			if (target)
				target->page = page;
		}
		if (!ret || !target)
			put_page(page);
	}
D
Daisuke Nishimura 已提交
4710 4711
	/* There is a swap entry and a page doesn't exist or isn't charged */
	if (ent.val && !ret &&
L
Li Zefan 已提交
4712
	    mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) {
4713 4714 4715
		ret = MC_TARGET_SWAP;
		if (target)
			target->ent = ent;
4716 4717 4718 4719
	}
	return ret;
}

4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/*
 * We don't consider swapping or file mapped pages because THP does not
 * support them for now.
 * Caller should make sure that pmd_trans_huge(pmd) is true.
 */
static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
		unsigned long addr, pmd_t pmd, union mc_target *target)
{
	struct page *page = NULL;
	enum mc_target_type ret = MC_TARGET_NONE;

	page = pmd_page(pmd);
4733
	VM_BUG_ON_PAGE(!page || !PageHead(page), page);
4734
	if (!(mc.flags & MOVE_ANON))
4735
		return ret;
4736
	if (page->mem_cgroup == mc.from) {
4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752
		ret = MC_TARGET_PAGE;
		if (target) {
			get_page(page);
			target->page = page;
		}
	}
	return ret;
}
#else
static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
		unsigned long addr, pmd_t pmd, union mc_target *target)
{
	return MC_TARGET_NONE;
}
#endif

4753 4754 4755 4756
static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
					unsigned long addr, unsigned long end,
					struct mm_walk *walk)
{
4757
	struct vm_area_struct *vma = walk->vma;
4758 4759 4760
	pte_t *pte;
	spinlock_t *ptl;

4761
	if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
4762 4763
		if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
			mc.precharge += HPAGE_PMD_NR;
4764
		spin_unlock(ptl);
4765
		return 0;
4766
	}
4767

4768 4769
	if (pmd_trans_unstable(pmd))
		return 0;
4770 4771
	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
	for (; addr != end; pte++, addr += PAGE_SIZE)
4772
		if (get_mctgt_type(vma, addr, *pte, NULL))
4773 4774 4775 4776
			mc.precharge++;	/* increment precharge temporarily */
	pte_unmap_unlock(pte - 1, ptl);
	cond_resched();

4777 4778 4779
	return 0;
}

4780 4781 4782 4783
static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
{
	unsigned long precharge;

4784 4785 4786 4787
	struct mm_walk mem_cgroup_count_precharge_walk = {
		.pmd_entry = mem_cgroup_count_precharge_pte_range,
		.mm = mm,
	};
4788
	down_read(&mm->mmap_sem);
4789
	walk_page_range(0, ~0UL, &mem_cgroup_count_precharge_walk);
4790
	up_read(&mm->mmap_sem);
4791 4792 4793 4794 4795 4796 4797 4798 4799

	precharge = mc.precharge;
	mc.precharge = 0;

	return precharge;
}

static int mem_cgroup_precharge_mc(struct mm_struct *mm)
{
4800 4801 4802 4803 4804
	unsigned long precharge = mem_cgroup_count_precharge(mm);

	VM_BUG_ON(mc.moving_task);
	mc.moving_task = current;
	return mem_cgroup_do_precharge(precharge);
4805 4806
}

4807 4808
/* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
static void __mem_cgroup_clear_mc(void)
4809
{
4810 4811 4812
	struct mem_cgroup *from = mc.from;
	struct mem_cgroup *to = mc.to;

4813
	/* we must uncharge all the leftover precharges from mc.to */
4814
	if (mc.precharge) {
4815
		cancel_charge(mc.to, mc.precharge);
4816 4817 4818 4819 4820 4821 4822
		mc.precharge = 0;
	}
	/*
	 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
	 * we must uncharge here.
	 */
	if (mc.moved_charge) {
4823
		cancel_charge(mc.from, mc.moved_charge);
4824
		mc.moved_charge = 0;
4825
	}
4826 4827 4828
	/* we must fixup refcnts and charges */
	if (mc.moved_swap) {
		/* uncharge swap account from the old cgroup */
4829
		if (!mem_cgroup_is_root(mc.from))
4830
			page_counter_uncharge(&mc.from->memsw, mc.moved_swap);
4831

4832
		/*
4833 4834
		 * we charged both to->memory and to->memsw, so we
		 * should uncharge to->memory.
4835
		 */
4836
		if (!mem_cgroup_is_root(mc.to))
4837 4838
			page_counter_uncharge(&mc.to->memory, mc.moved_swap);

4839
		css_put_many(&mc.from->css, mc.moved_swap);
4840

L
Li Zefan 已提交
4841
		/* we've already done css_get(mc.to) */
4842 4843
		mc.moved_swap = 0;
	}
4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856
	memcg_oom_recover(from);
	memcg_oom_recover(to);
	wake_up_all(&mc.waitq);
}

static void mem_cgroup_clear_mc(void)
{
	/*
	 * we must clear moving_task before waking up waiters at the end of
	 * task migration.
	 */
	mc.moving_task = NULL;
	__mem_cgroup_clear_mc();
4857
	spin_lock(&mc.lock);
4858 4859
	mc.from = NULL;
	mc.to = NULL;
4860
	spin_unlock(&mc.lock);
4861 4862
}

4863
static int mem_cgroup_can_attach(struct cgroup_subsys_state *css,
4864
				 struct cgroup_taskset *tset)
4865
{
4866
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4867 4868 4869
	struct mem_cgroup *from;
	struct task_struct *p;
	struct mm_struct *mm;
4870
	unsigned long move_flags;
4871
	int ret = 0;
4872

4873 4874 4875 4876 4877
	/*
	 * We are now commited to this value whatever it is. Changes in this
	 * tunable will only affect upcoming migrations, not the current one.
	 * So we need to save it, and keep it going.
	 */
4878
	move_flags = READ_ONCE(memcg->move_charge_at_immigrate);
4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907
	if (!move_flags)
		return 0;

	p = cgroup_taskset_first(tset);
	from = mem_cgroup_from_task(p);

	VM_BUG_ON(from == memcg);

	mm = get_task_mm(p);
	if (!mm)
		return 0;
	/* We move charges only when we move a owner of the mm */
	if (mm->owner == p) {
		VM_BUG_ON(mc.from);
		VM_BUG_ON(mc.to);
		VM_BUG_ON(mc.precharge);
		VM_BUG_ON(mc.moved_charge);
		VM_BUG_ON(mc.moved_swap);

		spin_lock(&mc.lock);
		mc.from = from;
		mc.to = memcg;
		mc.flags = move_flags;
		spin_unlock(&mc.lock);
		/* We set mc.moving_task later */

		ret = mem_cgroup_precharge_mc(mm);
		if (ret)
			mem_cgroup_clear_mc();
4908
	}
4909
	mmput(mm);
4910 4911 4912
	return ret;
}

4913
static void mem_cgroup_cancel_attach(struct cgroup_subsys_state *css,
4914
				     struct cgroup_taskset *tset)
4915
{
4916 4917
	if (mc.to)
		mem_cgroup_clear_mc();
4918 4919
}

4920 4921 4922
static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
				unsigned long addr, unsigned long end,
				struct mm_walk *walk)
4923
{
4924
	int ret = 0;
4925
	struct vm_area_struct *vma = walk->vma;
4926 4927
	pte_t *pte;
	spinlock_t *ptl;
4928 4929 4930
	enum mc_target_type target_type;
	union mc_target target;
	struct page *page;
4931

4932 4933 4934 4935 4936 4937 4938 4939 4940 4941
	/*
	 * We don't take compound_lock() here but no race with splitting thp
	 * happens because:
	 *  - if pmd_trans_huge_lock() returns 1, the relevant thp is not
	 *    under splitting, which means there's no concurrent thp split,
	 *  - if another thread runs into split_huge_page() just after we
	 *    entered this if-block, the thread must wait for page table lock
	 *    to be unlocked in __split_huge_page_splitting(), where the main
	 *    part of thp split is not executed yet.
	 */
4942
	if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
4943
		if (mc.precharge < HPAGE_PMD_NR) {
4944
			spin_unlock(ptl);
4945 4946 4947 4948 4949 4950 4951
			return 0;
		}
		target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
		if (target_type == MC_TARGET_PAGE) {
			page = target.page;
			if (!isolate_lru_page(page)) {
				if (!mem_cgroup_move_account(page, HPAGE_PMD_NR,
4952
							     mc.from, mc.to)) {
4953 4954 4955 4956 4957 4958 4959
					mc.precharge -= HPAGE_PMD_NR;
					mc.moved_charge += HPAGE_PMD_NR;
				}
				putback_lru_page(page);
			}
			put_page(page);
		}
4960
		spin_unlock(ptl);
4961
		return 0;
4962 4963
	}

4964 4965
	if (pmd_trans_unstable(pmd))
		return 0;
4966 4967 4968 4969
retry:
	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
	for (; addr != end; addr += PAGE_SIZE) {
		pte_t ptent = *(pte++);
4970
		swp_entry_t ent;
4971 4972 4973 4974

		if (!mc.precharge)
			break;

4975
		switch (get_mctgt_type(vma, addr, ptent, &target)) {
4976 4977 4978 4979
		case MC_TARGET_PAGE:
			page = target.page;
			if (isolate_lru_page(page))
				goto put;
4980
			if (!mem_cgroup_move_account(page, 1, mc.from, mc.to)) {
4981
				mc.precharge--;
4982 4983
				/* we uncharge from mc.from later. */
				mc.moved_charge++;
4984 4985
			}
			putback_lru_page(page);
4986
put:			/* get_mctgt_type() gets the page */
4987 4988
			put_page(page);
			break;
4989 4990
		case MC_TARGET_SWAP:
			ent = target.ent;
4991
			if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
4992
				mc.precharge--;
4993 4994 4995
				/* we fixup refcnts and charges later. */
				mc.moved_swap++;
			}
4996
			break;
4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010
		default:
			break;
		}
	}
	pte_unmap_unlock(pte - 1, ptl);
	cond_resched();

	if (addr != end) {
		/*
		 * We have consumed all precharges we got in can_attach().
		 * We try charge one by one, but don't do any additional
		 * charges to mc.to if we have failed in charge once in attach()
		 * phase.
		 */
5011
		ret = mem_cgroup_do_precharge(1);
5012 5013 5014 5015 5016 5017 5018 5019 5020
		if (!ret)
			goto retry;
	}

	return ret;
}

static void mem_cgroup_move_charge(struct mm_struct *mm)
{
5021 5022 5023 5024
	struct mm_walk mem_cgroup_move_charge_walk = {
		.pmd_entry = mem_cgroup_move_charge_pte_range,
		.mm = mm,
	};
5025 5026

	lru_add_drain_all();
5027 5028 5029 5030 5031 5032 5033
	/*
	 * Signal mem_cgroup_begin_page_stat() to take the memcg's
	 * move_lock while we're moving its pages to another memcg.
	 * Then wait for already started RCU-only updates to finish.
	 */
	atomic_inc(&mc.from->moving_account);
	synchronize_rcu();
5034 5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045 5046
retry:
	if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
		/*
		 * Someone who are holding the mmap_sem might be waiting in
		 * waitq. So we cancel all extra charges, wake up all waiters,
		 * and retry. Because we cancel precharges, we might not be able
		 * to move enough charges, but moving charge is a best-effort
		 * feature anyway, so it wouldn't be a big problem.
		 */
		__mem_cgroup_clear_mc();
		cond_resched();
		goto retry;
	}
5047 5048 5049 5050 5051
	/*
	 * When we have consumed all precharges and failed in doing
	 * additional charge, the page walk just aborts.
	 */
	walk_page_range(0, ~0UL, &mem_cgroup_move_charge_walk);
5052
	up_read(&mm->mmap_sem);
5053
	atomic_dec(&mc.from->moving_account);
5054 5055
}

5056
static void mem_cgroup_move_task(struct cgroup_subsys_state *css,
5057
				 struct cgroup_taskset *tset)
B
Balbir Singh 已提交
5058
{
5059
	struct task_struct *p = cgroup_taskset_first(tset);
5060
	struct mm_struct *mm = get_task_mm(p);
5061 5062

	if (mm) {
5063 5064
		if (mc.to)
			mem_cgroup_move_charge(mm);
5065 5066
		mmput(mm);
	}
5067 5068
	if (mc.to)
		mem_cgroup_clear_mc();
B
Balbir Singh 已提交
5069
}
5070
#else	/* !CONFIG_MMU */
5071
static int mem_cgroup_can_attach(struct cgroup_subsys_state *css,
5072
				 struct cgroup_taskset *tset)
5073 5074 5075
{
	return 0;
}
5076
static void mem_cgroup_cancel_attach(struct cgroup_subsys_state *css,
5077
				     struct cgroup_taskset *tset)
5078 5079
{
}
5080
static void mem_cgroup_move_task(struct cgroup_subsys_state *css,
5081
				 struct cgroup_taskset *tset)
5082 5083 5084
{
}
#endif
B
Balbir Singh 已提交
5085

5086 5087
/*
 * Cgroup retains root cgroups across [un]mount cycles making it necessary
5088 5089
 * to verify whether we're attached to the default hierarchy on each mount
 * attempt.
5090
 */
5091
static void mem_cgroup_bind(struct cgroup_subsys_state *root_css)
5092 5093
{
	/*
5094
	 * use_hierarchy is forced on the default hierarchy.  cgroup core
5095 5096 5097
	 * guarantees that @root doesn't have any children, so turning it
	 * on for the root memcg is enough.
	 */
5098
	if (cgroup_on_dfl(root_css->cgroup))
5099 5100 5101
		root_mem_cgroup->use_hierarchy = true;
	else
		root_mem_cgroup->use_hierarchy = false;
5102 5103
}

5104 5105 5106 5107 5108 5109 5110 5111 5112
static u64 memory_current_read(struct cgroup_subsys_state *css,
			       struct cftype *cft)
{
	return mem_cgroup_usage(mem_cgroup_from_css(css), false);
}

static int memory_low_show(struct seq_file *m, void *v)
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5113
	unsigned long low = READ_ONCE(memcg->low);
5114 5115

	if (low == PAGE_COUNTER_MAX)
5116
		seq_puts(m, "max\n");
5117 5118 5119 5120 5121 5122 5123 5124 5125 5126 5127 5128 5129 5130
	else
		seq_printf(m, "%llu\n", (u64)low * PAGE_SIZE);

	return 0;
}

static ssize_t memory_low_write(struct kernfs_open_file *of,
				char *buf, size_t nbytes, loff_t off)
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
	unsigned long low;
	int err;

	buf = strstrip(buf);
5131
	err = page_counter_memparse(buf, "max", &low);
5132 5133 5134 5135 5136 5137 5138 5139 5140 5141 5142
	if (err)
		return err;

	memcg->low = low;

	return nbytes;
}

static int memory_high_show(struct seq_file *m, void *v)
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5143
	unsigned long high = READ_ONCE(memcg->high);
5144 5145

	if (high == PAGE_COUNTER_MAX)
5146
		seq_puts(m, "max\n");
5147 5148 5149 5150 5151 5152 5153 5154 5155 5156 5157 5158 5159 5160
	else
		seq_printf(m, "%llu\n", (u64)high * PAGE_SIZE);

	return 0;
}

static ssize_t memory_high_write(struct kernfs_open_file *of,
				 char *buf, size_t nbytes, loff_t off)
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
	unsigned long high;
	int err;

	buf = strstrip(buf);
5161
	err = page_counter_memparse(buf, "max", &high);
5162 5163 5164 5165 5166
	if (err)
		return err;

	memcg->high = high;

5167
	memcg_wb_domain_size_changed(memcg);
5168 5169 5170 5171 5172 5173
	return nbytes;
}

static int memory_max_show(struct seq_file *m, void *v)
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5174
	unsigned long max = READ_ONCE(memcg->memory.limit);
5175 5176

	if (max == PAGE_COUNTER_MAX)
5177
		seq_puts(m, "max\n");
5178 5179 5180 5181 5182 5183 5184 5185 5186 5187 5188 5189 5190 5191
	else
		seq_printf(m, "%llu\n", (u64)max * PAGE_SIZE);

	return 0;
}

static ssize_t memory_max_write(struct kernfs_open_file *of,
				char *buf, size_t nbytes, loff_t off)
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
	unsigned long max;
	int err;

	buf = strstrip(buf);
5192
	err = page_counter_memparse(buf, "max", &max);
5193 5194 5195 5196 5197 5198 5199
	if (err)
		return err;

	err = mem_cgroup_resize_limit(memcg, max);
	if (err)
		return err;

5200
	memcg_wb_domain_size_changed(memcg);
5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214 5215 5216 5217 5218 5219 5220 5221 5222 5223 5224 5225 5226 5227 5228 5229 5230 5231 5232 5233 5234 5235 5236 5237 5238 5239 5240 5241 5242 5243 5244 5245 5246
	return nbytes;
}

static int memory_events_show(struct seq_file *m, void *v)
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));

	seq_printf(m, "low %lu\n", mem_cgroup_read_events(memcg, MEMCG_LOW));
	seq_printf(m, "high %lu\n", mem_cgroup_read_events(memcg, MEMCG_HIGH));
	seq_printf(m, "max %lu\n", mem_cgroup_read_events(memcg, MEMCG_MAX));
	seq_printf(m, "oom %lu\n", mem_cgroup_read_events(memcg, MEMCG_OOM));

	return 0;
}

static struct cftype memory_files[] = {
	{
		.name = "current",
		.read_u64 = memory_current_read,
	},
	{
		.name = "low",
		.flags = CFTYPE_NOT_ON_ROOT,
		.seq_show = memory_low_show,
		.write = memory_low_write,
	},
	{
		.name = "high",
		.flags = CFTYPE_NOT_ON_ROOT,
		.seq_show = memory_high_show,
		.write = memory_high_write,
	},
	{
		.name = "max",
		.flags = CFTYPE_NOT_ON_ROOT,
		.seq_show = memory_max_show,
		.write = memory_max_write,
	},
	{
		.name = "events",
		.flags = CFTYPE_NOT_ON_ROOT,
		.seq_show = memory_events_show,
	},
	{ }	/* terminate */
};

5247
struct cgroup_subsys memory_cgrp_subsys = {
5248
	.css_alloc = mem_cgroup_css_alloc,
5249
	.css_online = mem_cgroup_css_online,
5250 5251
	.css_offline = mem_cgroup_css_offline,
	.css_free = mem_cgroup_css_free,
5252
	.css_reset = mem_cgroup_css_reset,
5253 5254
	.can_attach = mem_cgroup_can_attach,
	.cancel_attach = mem_cgroup_cancel_attach,
B
Balbir Singh 已提交
5255
	.attach = mem_cgroup_move_task,
5256
	.bind = mem_cgroup_bind,
5257 5258
	.dfl_cftypes = memory_files,
	.legacy_cftypes = mem_cgroup_legacy_files,
5259
	.early_init = 0,
B
Balbir Singh 已提交
5260
};
5261

5262 5263 5264 5265 5266 5267 5268 5269 5270 5271 5272 5273 5274 5275 5276 5277 5278 5279 5280 5281 5282 5283
/**
 * mem_cgroup_low - check if memory consumption is below the normal range
 * @root: the highest ancestor to consider
 * @memcg: the memory cgroup to check
 *
 * Returns %true if memory consumption of @memcg, and that of all
 * configurable ancestors up to @root, is below the normal range.
 */
bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg)
{
	if (mem_cgroup_disabled())
		return false;

	/*
	 * The toplevel group doesn't have a configurable range, so
	 * it's never low when looked at directly, and it is not
	 * considered an ancestor when assessing the hierarchy.
	 */

	if (memcg == root_mem_cgroup)
		return false;

M
Michal Hocko 已提交
5284
	if (page_counter_read(&memcg->memory) >= memcg->low)
5285 5286 5287 5288 5289 5290 5291 5292
		return false;

	while (memcg != root) {
		memcg = parent_mem_cgroup(memcg);

		if (memcg == root_mem_cgroup)
			break;

M
Michal Hocko 已提交
5293
		if (page_counter_read(&memcg->memory) >= memcg->low)
5294 5295 5296 5297 5298
			return false;
	}
	return true;
}

5299 5300 5301 5302 5303 5304 5305 5306 5307 5308 5309 5310 5311 5312 5313 5314 5315 5316 5317 5318 5319 5320 5321 5322 5323 5324 5325 5326 5327 5328 5329 5330 5331 5332 5333
/**
 * mem_cgroup_try_charge - try charging a page
 * @page: page to charge
 * @mm: mm context of the victim
 * @gfp_mask: reclaim mode
 * @memcgp: charged memcg return
 *
 * Try to charge @page to the memcg that @mm belongs to, reclaiming
 * pages according to @gfp_mask if necessary.
 *
 * Returns 0 on success, with *@memcgp pointing to the charged memcg.
 * Otherwise, an error code is returned.
 *
 * After page->mapping has been set up, the caller must finalize the
 * charge with mem_cgroup_commit_charge().  Or abort the transaction
 * with mem_cgroup_cancel_charge() in case page instantiation fails.
 */
int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
			  gfp_t gfp_mask, struct mem_cgroup **memcgp)
{
	struct mem_cgroup *memcg = NULL;
	unsigned int nr_pages = 1;
	int ret = 0;

	if (mem_cgroup_disabled())
		goto out;

	if (PageSwapCache(page)) {
		/*
		 * Every swap fault against a single page tries to charge the
		 * page, bail as early as possible.  shmem_unuse() encounters
		 * already charged pages, too.  The USED bit is protected by
		 * the page lock, which serializes swap cache removal, which
		 * in turn serializes uncharging.
		 */
5334
		VM_BUG_ON_PAGE(!PageLocked(page), page);
5335
		if (page->mem_cgroup)
5336
			goto out;
5337 5338 5339 5340 5341 5342 5343 5344 5345 5346 5347

		if (do_swap_account) {
			swp_entry_t ent = { .val = page_private(page), };
			unsigned short id = lookup_swap_cgroup_id(ent);

			rcu_read_lock();
			memcg = mem_cgroup_from_id(id);
			if (memcg && !css_tryget_online(&memcg->css))
				memcg = NULL;
			rcu_read_unlock();
		}
5348 5349 5350 5351 5352 5353 5354 5355 5356 5357 5358 5359 5360 5361 5362 5363 5364 5365 5366 5367 5368 5369 5370 5371 5372 5373 5374 5375 5376 5377 5378 5379 5380 5381 5382 5383 5384 5385 5386 5387 5388 5389 5390 5391 5392 5393 5394 5395 5396 5397 5398 5399 5400 5401 5402 5403 5404
	}

	if (PageTransHuge(page)) {
		nr_pages <<= compound_order(page);
		VM_BUG_ON_PAGE(!PageTransHuge(page), page);
	}

	if (!memcg)
		memcg = get_mem_cgroup_from_mm(mm);

	ret = try_charge(memcg, gfp_mask, nr_pages);

	css_put(&memcg->css);

	if (ret == -EINTR) {
		memcg = root_mem_cgroup;
		ret = 0;
	}
out:
	*memcgp = memcg;
	return ret;
}

/**
 * mem_cgroup_commit_charge - commit a page charge
 * @page: page to charge
 * @memcg: memcg to charge the page to
 * @lrucare: page might be on LRU already
 *
 * Finalize a charge transaction started by mem_cgroup_try_charge(),
 * after page->mapping has been set up.  This must happen atomically
 * as part of the page instantiation, i.e. under the page table lock
 * for anonymous pages, under the page lock for page and swap cache.
 *
 * In addition, the page must not be on the LRU during the commit, to
 * prevent racing with task migration.  If it might be, use @lrucare.
 *
 * Use mem_cgroup_cancel_charge() to cancel the transaction instead.
 */
void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
			      bool lrucare)
{
	unsigned int nr_pages = 1;

	VM_BUG_ON_PAGE(!page->mapping, page);
	VM_BUG_ON_PAGE(PageLRU(page) && !lrucare, page);

	if (mem_cgroup_disabled())
		return;
	/*
	 * Swap faults will attempt to charge the same page multiple
	 * times.  But reuse_swap_page() might have removed the page
	 * from swapcache already, so we can't check PageSwapCache().
	 */
	if (!memcg)
		return;

5405 5406
	commit_charge(page, memcg, lrucare);

5407 5408 5409 5410 5411
	if (PageTransHuge(page)) {
		nr_pages <<= compound_order(page);
		VM_BUG_ON_PAGE(!PageTransHuge(page), page);
	}

5412 5413 5414 5415
	local_irq_disable();
	mem_cgroup_charge_statistics(memcg, page, nr_pages);
	memcg_check_events(memcg, page);
	local_irq_enable();
5416 5417 5418 5419 5420 5421 5422 5423 5424 5425 5426 5427 5428 5429 5430 5431 5432 5433 5434 5435 5436 5437 5438 5439 5440 5441 5442 5443 5444 5445 5446 5447 5448 5449 5450 5451 5452 5453 5454 5455 5456

	if (do_swap_account && PageSwapCache(page)) {
		swp_entry_t entry = { .val = page_private(page) };
		/*
		 * The swap entry might not get freed for a long time,
		 * let's not wait for it.  The page already received a
		 * memory+swap charge, drop the swap entry duplicate.
		 */
		mem_cgroup_uncharge_swap(entry);
	}
}

/**
 * mem_cgroup_cancel_charge - cancel a page charge
 * @page: page to charge
 * @memcg: memcg to charge the page to
 *
 * Cancel a charge transaction started by mem_cgroup_try_charge().
 */
void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg)
{
	unsigned int nr_pages = 1;

	if (mem_cgroup_disabled())
		return;
	/*
	 * Swap faults will attempt to charge the same page multiple
	 * times.  But reuse_swap_page() might have removed the page
	 * from swapcache already, so we can't check PageSwapCache().
	 */
	if (!memcg)
		return;

	if (PageTransHuge(page)) {
		nr_pages <<= compound_order(page);
		VM_BUG_ON_PAGE(!PageTransHuge(page), page);
	}

	cancel_charge(memcg, nr_pages);
}

5457 5458 5459 5460
static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout,
			   unsigned long nr_anon, unsigned long nr_file,
			   unsigned long nr_huge, struct page *dummy_page)
{
5461
	unsigned long nr_pages = nr_anon + nr_file;
5462 5463
	unsigned long flags;

5464
	if (!mem_cgroup_is_root(memcg)) {
5465 5466 5467
		page_counter_uncharge(&memcg->memory, nr_pages);
		if (do_swap_account)
			page_counter_uncharge(&memcg->memsw, nr_pages);
5468 5469
		memcg_oom_recover(memcg);
	}
5470 5471 5472 5473 5474 5475

	local_irq_save(flags);
	__this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS], nr_anon);
	__this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_CACHE], nr_file);
	__this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], nr_huge);
	__this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT], pgpgout);
5476
	__this_cpu_add(memcg->stat->nr_page_events, nr_pages);
5477 5478
	memcg_check_events(memcg, dummy_page);
	local_irq_restore(flags);
5479 5480

	if (!mem_cgroup_is_root(memcg))
5481
		css_put_many(&memcg->css, nr_pages);
5482 5483 5484 5485 5486 5487 5488 5489 5490 5491 5492 5493 5494 5495 5496 5497 5498 5499 5500 5501 5502 5503
}

static void uncharge_list(struct list_head *page_list)
{
	struct mem_cgroup *memcg = NULL;
	unsigned long nr_anon = 0;
	unsigned long nr_file = 0;
	unsigned long nr_huge = 0;
	unsigned long pgpgout = 0;
	struct list_head *next;
	struct page *page;

	next = page_list->next;
	do {
		unsigned int nr_pages = 1;

		page = list_entry(next, struct page, lru);
		next = page->lru.next;

		VM_BUG_ON_PAGE(PageLRU(page), page);
		VM_BUG_ON_PAGE(page_count(page), page);

5504
		if (!page->mem_cgroup)
5505 5506 5507 5508
			continue;

		/*
		 * Nobody should be changing or seriously looking at
5509
		 * page->mem_cgroup at this point, we have fully
5510
		 * exclusive access to the page.
5511 5512
		 */

5513
		if (memcg != page->mem_cgroup) {
5514
			if (memcg) {
5515 5516 5517
				uncharge_batch(memcg, pgpgout, nr_anon, nr_file,
					       nr_huge, page);
				pgpgout = nr_anon = nr_file = nr_huge = 0;
5518
			}
5519
			memcg = page->mem_cgroup;
5520 5521 5522 5523 5524 5525 5526 5527 5528 5529 5530 5531 5532
		}

		if (PageTransHuge(page)) {
			nr_pages <<= compound_order(page);
			VM_BUG_ON_PAGE(!PageTransHuge(page), page);
			nr_huge += nr_pages;
		}

		if (PageAnon(page))
			nr_anon += nr_pages;
		else
			nr_file += nr_pages;

5533
		page->mem_cgroup = NULL;
5534 5535 5536 5537 5538

		pgpgout++;
	} while (next != page_list);

	if (memcg)
5539 5540
		uncharge_batch(memcg, pgpgout, nr_anon, nr_file,
			       nr_huge, page);
5541 5542
}

5543 5544 5545 5546 5547 5548 5549 5550 5551 5552 5553 5554
/**
 * mem_cgroup_uncharge - uncharge a page
 * @page: page to uncharge
 *
 * Uncharge a page previously charged with mem_cgroup_try_charge() and
 * mem_cgroup_commit_charge().
 */
void mem_cgroup_uncharge(struct page *page)
{
	if (mem_cgroup_disabled())
		return;

5555
	/* Don't touch page->lru of any random page, pre-check: */
5556
	if (!page->mem_cgroup)
5557 5558
		return;

5559 5560 5561
	INIT_LIST_HEAD(&page->lru);
	uncharge_list(&page->lru);
}
5562

5563 5564 5565 5566 5567 5568 5569 5570 5571 5572 5573
/**
 * mem_cgroup_uncharge_list - uncharge a list of page
 * @page_list: list of pages to uncharge
 *
 * Uncharge a list of pages previously charged with
 * mem_cgroup_try_charge() and mem_cgroup_commit_charge().
 */
void mem_cgroup_uncharge_list(struct list_head *page_list)
{
	if (mem_cgroup_disabled())
		return;
5574

5575 5576
	if (!list_empty(page_list))
		uncharge_list(page_list);
5577 5578 5579 5580 5581 5582
}

/**
 * mem_cgroup_migrate - migrate a charge to another page
 * @oldpage: currently charged page
 * @newpage: page to transfer the charge to
5583
 * @lrucare: either or both pages might be on the LRU already
5584 5585 5586 5587 5588 5589 5590 5591
 *
 * Migrate the charge from @oldpage to @newpage.
 *
 * Both pages must be locked, @newpage->mapping must be set up.
 */
void mem_cgroup_migrate(struct page *oldpage, struct page *newpage,
			bool lrucare)
{
5592
	struct mem_cgroup *memcg;
5593 5594 5595 5596 5597 5598 5599
	int isolated;

	VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
	VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
	VM_BUG_ON_PAGE(!lrucare && PageLRU(oldpage), oldpage);
	VM_BUG_ON_PAGE(!lrucare && PageLRU(newpage), newpage);
	VM_BUG_ON_PAGE(PageAnon(oldpage) != PageAnon(newpage), newpage);
5600 5601
	VM_BUG_ON_PAGE(PageTransHuge(oldpage) != PageTransHuge(newpage),
		       newpage);
5602 5603 5604 5605 5606

	if (mem_cgroup_disabled())
		return;

	/* Page cache replacement: new page already charged? */
5607
	if (newpage->mem_cgroup)
5608 5609
		return;

5610 5611 5612 5613 5614 5615
	/*
	 * Swapcache readahead pages can get migrated before being
	 * charged, and migration from compaction can happen to an
	 * uncharged page when the PFN walker finds a page that
	 * reclaim just put back on the LRU but has not released yet.
	 */
5616
	memcg = oldpage->mem_cgroup;
5617
	if (!memcg)
5618 5619 5620 5621 5622
		return;

	if (lrucare)
		lock_page_lru(oldpage, &isolated);

5623
	oldpage->mem_cgroup = NULL;
5624 5625 5626 5627

	if (lrucare)
		unlock_page_lru(oldpage, isolated);

5628
	commit_charge(newpage, memcg, lrucare);
5629 5630
}

5631
/*
5632 5633 5634 5635 5636 5637
 * subsys_initcall() for memory controller.
 *
 * Some parts like hotcpu_notifier() have to be initialized from this context
 * because of lock dependencies (cgroup_lock -> cpu hotplug) but basically
 * everything that doesn't depend on a specific mem_cgroup structure should
 * be initialized from here.
5638 5639 5640
 */
static int __init mem_cgroup_init(void)
{
5641 5642
	int cpu, node;

5643
	hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
5644 5645 5646 5647 5648 5649 5650 5651 5652 5653 5654 5655 5656 5657 5658 5659 5660 5661 5662 5663 5664 5665

	for_each_possible_cpu(cpu)
		INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
			  drain_local_stock);

	for_each_node(node) {
		struct mem_cgroup_tree_per_node *rtpn;
		int zone;

		rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL,
				    node_online(node) ? node : NUMA_NO_NODE);

		for (zone = 0; zone < MAX_NR_ZONES; zone++) {
			struct mem_cgroup_tree_per_zone *rtpz;

			rtpz = &rtpn->rb_tree_per_zone[zone];
			rtpz->rb_root = RB_ROOT;
			spin_lock_init(&rtpz->lock);
		}
		soft_limit_tree.rb_tree_per_node[node] = rtpn;
	}

5666 5667 5668
	return 0;
}
subsys_initcall(mem_cgroup_init);
5669 5670 5671 5672 5673 5674 5675 5676 5677 5678 5679 5680 5681 5682 5683 5684 5685 5686 5687 5688 5689 5690 5691 5692 5693 5694 5695 5696 5697 5698 5699 5700 5701 5702 5703

#ifdef CONFIG_MEMCG_SWAP
/**
 * mem_cgroup_swapout - transfer a memsw charge to swap
 * @page: page whose memsw charge to transfer
 * @entry: swap entry to move the charge to
 *
 * Transfer the memsw charge of @page to @entry.
 */
void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
{
	struct mem_cgroup *memcg;
	unsigned short oldid;

	VM_BUG_ON_PAGE(PageLRU(page), page);
	VM_BUG_ON_PAGE(page_count(page), page);

	if (!do_swap_account)
		return;

	memcg = page->mem_cgroup;

	/* Readahead page, never charged */
	if (!memcg)
		return;

	oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
	VM_BUG_ON_PAGE(oldid, page);
	mem_cgroup_swap_statistics(memcg, true);

	page->mem_cgroup = NULL;

	if (!mem_cgroup_is_root(memcg))
		page_counter_uncharge(&memcg->memory, 1);

5704 5705 5706 5707 5708 5709 5710
	/*
	 * Interrupts should be disabled here because the caller holds the
	 * mapping->tree_lock lock which is taken with interrupts-off. It is
	 * important here to have the interrupts disabled because it is the
	 * only synchronisation we have for udpating the per-CPU variables.
	 */
	VM_BUG_ON(!irqs_disabled());
5711 5712 5713 5714 5715 5716 5717 5718 5719 5720 5721 5722 5723 5724 5725 5726 5727 5728 5729 5730
	mem_cgroup_charge_statistics(memcg, page, -1);
	memcg_check_events(memcg, page);
}

/**
 * mem_cgroup_uncharge_swap - uncharge a swap entry
 * @entry: swap entry to uncharge
 *
 * Drop the memsw charge associated with @entry.
 */
void mem_cgroup_uncharge_swap(swp_entry_t entry)
{
	struct mem_cgroup *memcg;
	unsigned short id;

	if (!do_swap_account)
		return;

	id = swap_cgroup_record(entry, 0);
	rcu_read_lock();
5731
	memcg = mem_cgroup_from_id(id);
5732 5733 5734 5735 5736 5737 5738 5739 5740 5741 5742 5743 5744 5745 5746 5747 5748 5749 5750 5751 5752 5753 5754 5755 5756 5757 5758 5759 5760 5761 5762 5763 5764 5765 5766 5767 5768 5769 5770 5771 5772 5773 5774 5775 5776 5777 5778 5779 5780 5781 5782 5783 5784 5785 5786 5787 5788 5789 5790 5791 5792 5793 5794 5795 5796
	if (memcg) {
		if (!mem_cgroup_is_root(memcg))
			page_counter_uncharge(&memcg->memsw, 1);
		mem_cgroup_swap_statistics(memcg, false);
		css_put(&memcg->css);
	}
	rcu_read_unlock();
}

/* for remember boot option*/
#ifdef CONFIG_MEMCG_SWAP_ENABLED
static int really_do_swap_account __initdata = 1;
#else
static int really_do_swap_account __initdata;
#endif

static int __init enable_swap_account(char *s)
{
	if (!strcmp(s, "1"))
		really_do_swap_account = 1;
	else if (!strcmp(s, "0"))
		really_do_swap_account = 0;
	return 1;
}
__setup("swapaccount=", enable_swap_account);

static struct cftype memsw_cgroup_files[] = {
	{
		.name = "memsw.usage_in_bytes",
		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
		.read_u64 = mem_cgroup_read_u64,
	},
	{
		.name = "memsw.max_usage_in_bytes",
		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
		.write = mem_cgroup_reset,
		.read_u64 = mem_cgroup_read_u64,
	},
	{
		.name = "memsw.limit_in_bytes",
		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
		.write = mem_cgroup_write,
		.read_u64 = mem_cgroup_read_u64,
	},
	{
		.name = "memsw.failcnt",
		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
		.write = mem_cgroup_reset,
		.read_u64 = mem_cgroup_read_u64,
	},
	{ },	/* terminate */
};

static int __init mem_cgroup_swap_init(void)
{
	if (!mem_cgroup_disabled() && really_do_swap_account) {
		do_swap_account = 1;
		WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys,
						  memsw_cgroup_files));
	}
	return 0;
}
subsys_initcall(mem_cgroup_swap_init);

#endif /* CONFIG_MEMCG_SWAP */