memcontrol.c 175.4 KB
Newer Older
B
Balbir Singh 已提交
1 2 3 4 5
/* memcontrol.c - Memory Controller
 *
 * Copyright IBM Corporation, 2007
 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
 *
6 7 8
 * Copyright 2007 OpenVZ SWsoft Inc
 * Author: Pavel Emelianov <xemul@openvz.org>
 *
9 10 11 12
 * Memory thresholds
 * Copyright (C) 2009 Nokia Corporation
 * Author: Kirill A. Shutemov
 *
13 14 15 16
 * Kernel Memory Controller
 * Copyright (C) 2012 Parallels Inc. and Google Inc.
 * Authors: Glauber Costa and Suleiman Souhlal
 *
B
Balbir Singh 已提交
17 18 19 20 21 22 23 24 25 26 27 28 29 30
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 */

#include <linux/res_counter.h>
#include <linux/memcontrol.h>
#include <linux/cgroup.h>
31
#include <linux/mm.h>
32
#include <linux/hugetlb.h>
K
KAMEZAWA Hiroyuki 已提交
33
#include <linux/pagemap.h>
34
#include <linux/smp.h>
35
#include <linux/page-flags.h>
36
#include <linux/backing-dev.h>
37 38
#include <linux/bit_spinlock.h>
#include <linux/rcupdate.h>
39
#include <linux/limits.h>
40
#include <linux/export.h>
41
#include <linux/mutex.h>
42
#include <linux/slab.h>
43
#include <linux/swap.h>
44
#include <linux/swapops.h>
45
#include <linux/spinlock.h>
46 47
#include <linux/eventfd.h>
#include <linux/sort.h>
48
#include <linux/fs.h>
49
#include <linux/seq_file.h>
50
#include <linux/vmalloc.h>
51
#include <linux/vmpressure.h>
52
#include <linux/mm_inline.h>
53
#include <linux/page_cgroup.h>
54
#include <linux/cpu.h>
55
#include <linux/oom.h>
K
KAMEZAWA Hiroyuki 已提交
56
#include "internal.h"
G
Glauber Costa 已提交
57
#include <net/sock.h>
M
Michal Hocko 已提交
58
#include <net/ip.h>
G
Glauber Costa 已提交
59
#include <net/tcp_memcontrol.h>
B
Balbir Singh 已提交
60

61 62
#include <asm/uaccess.h>

63 64
#include <trace/events/vmscan.h>

65
struct cgroup_subsys mem_cgroup_subsys __read_mostly;
66 67
EXPORT_SYMBOL(mem_cgroup_subsys);

68
#define MEM_CGROUP_RECLAIM_RETRIES	5
69
static struct mem_cgroup *root_mem_cgroup __read_mostly;
B
Balbir Singh 已提交
70

A
Andrew Morton 已提交
71
#ifdef CONFIG_MEMCG_SWAP
L
Li Zefan 已提交
72
/* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */
73
int do_swap_account __read_mostly;
74 75

/* for remember boot option*/
A
Andrew Morton 已提交
76
#ifdef CONFIG_MEMCG_SWAP_ENABLED
77 78 79 80 81
static int really_do_swap_account __initdata = 1;
#else
static int really_do_swap_account __initdata = 0;
#endif

82
#else
83
#define do_swap_account		0
84 85 86
#endif


87 88 89 90 91 92 93
/*
 * Statistics for memory cgroup.
 */
enum mem_cgroup_stat_index {
	/*
	 * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
	 */
94 95 96 97 98
	MEM_CGROUP_STAT_CACHE,		/* # of pages charged as cache */
	MEM_CGROUP_STAT_RSS,		/* # of pages charged as anon rss */
	MEM_CGROUP_STAT_RSS_HUGE,	/* # of pages charged as anon huge */
	MEM_CGROUP_STAT_FILE_MAPPED,	/* # of pages charged as file rss */
	MEM_CGROUP_STAT_SWAP,		/* # of pages, swapped out */
99 100 101
	MEM_CGROUP_STAT_NSTATS,
};

102 103 104
static const char * const mem_cgroup_stat_names[] = {
	"cache",
	"rss",
105
	"rss_huge",
106 107 108 109
	"mapped_file",
	"swap",
};

110 111 112
enum mem_cgroup_events_index {
	MEM_CGROUP_EVENTS_PGPGIN,	/* # of pages paged in */
	MEM_CGROUP_EVENTS_PGPGOUT,	/* # of pages paged out */
113 114
	MEM_CGROUP_EVENTS_PGFAULT,	/* # of page-faults */
	MEM_CGROUP_EVENTS_PGMAJFAULT,	/* # of major page-faults */
115 116
	MEM_CGROUP_EVENTS_NSTATS,
};
117 118 119 120 121 122 123 124

static const char * const mem_cgroup_events_names[] = {
	"pgpgin",
	"pgpgout",
	"pgfault",
	"pgmajfault",
};

125 126 127 128 129 130 131 132
static const char * const mem_cgroup_lru_names[] = {
	"inactive_anon",
	"active_anon",
	"inactive_file",
	"active_file",
	"unevictable",
};

133 134 135 136 137 138 139 140
/*
 * Per memcg event counter is incremented at every pagein/pageout. With THP,
 * it will be incremated by the number of pages. This counter is used for
 * for trigger some periodic events. This is straightforward and better
 * than using jiffies etc. to handle periodic memcg event.
 */
enum mem_cgroup_events_target {
	MEM_CGROUP_TARGET_THRESH,
141
	MEM_CGROUP_TARGET_NUMAINFO,
142 143
	MEM_CGROUP_NTARGETS,
};
144 145 146
#define THRESHOLDS_EVENTS_TARGET 128
#define SOFTLIMIT_EVENTS_TARGET 1024
#define NUMAINFO_EVENTS_TARGET	1024
147

148
struct mem_cgroup_stat_cpu {
149
	long count[MEM_CGROUP_STAT_NSTATS];
150
	unsigned long events[MEM_CGROUP_EVENTS_NSTATS];
151
	unsigned long nr_page_events;
152
	unsigned long targets[MEM_CGROUP_NTARGETS];
153 154
};

155
struct mem_cgroup_reclaim_iter {
M
Michal Hocko 已提交
156 157 158 159
	/*
	 * last scanned hierarchy member. Valid only if last_dead_count
	 * matches memcg->dead_count of the hierarchy root group.
	 */
160
	struct mem_cgroup *last_visited;
M
Michal Hocko 已提交
161 162
	unsigned long last_dead_count;

163 164 165 166
	/* scan generation, increased every round-trip */
	unsigned int generation;
};

167 168 169 170
/*
 * per-zone information in memory controller.
 */
struct mem_cgroup_per_zone {
171
	struct lruvec		lruvec;
172
	unsigned long		lru_size[NR_LRU_LISTS];
K
KOSAKI Motohiro 已提交
173

174 175
	struct mem_cgroup_reclaim_iter reclaim_iter[DEF_PRIORITY + 1];

176
	struct mem_cgroup	*memcg;		/* Back pointer, we cannot */
177
						/* use container_of	   */
178 179 180 181 182 183
};

struct mem_cgroup_per_node {
	struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
};

184 185 186 187 188
struct mem_cgroup_threshold {
	struct eventfd_ctx *eventfd;
	u64 threshold;
};

K
KAMEZAWA Hiroyuki 已提交
189
/* For threshold */
190
struct mem_cgroup_threshold_ary {
191
	/* An array index points to threshold just below or equal to usage. */
192
	int current_threshold;
193 194 195 196 197
	/* Size of entries[] */
	unsigned int size;
	/* Array of thresholds */
	struct mem_cgroup_threshold entries[0];
};
198 199 200 201 202 203 204 205 206 207 208 209

struct mem_cgroup_thresholds {
	/* Primary thresholds array */
	struct mem_cgroup_threshold_ary *primary;
	/*
	 * Spare threshold array.
	 * This is needed to make mem_cgroup_unregister_event() "never fail".
	 * It must be able to store at least primary->size - 1 entries.
	 */
	struct mem_cgroup_threshold_ary *spare;
};

K
KAMEZAWA Hiroyuki 已提交
210 211 212 213 214
/* for OOM */
struct mem_cgroup_eventfd_list {
	struct list_head list;
	struct eventfd_ctx *eventfd;
};
215

216 217
static void mem_cgroup_threshold(struct mem_cgroup *memcg);
static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
218

B
Balbir Singh 已提交
219 220 221 222 223 224 225
/*
 * The memory controller data structure. The memory controller controls both
 * page cache and RSS per cgroup. We would eventually like to provide
 * statistics based on the statistics developed by Rik Van Riel for clock-pro,
 * to help the administrator determine what knobs to tune.
 *
 * TODO: Add a water mark for the memory controller. Reclaim will begin when
226 227 228
 * we hit the water mark. May be even add a low water mark, such that
 * no reclaim occurs from a cgroup at it's low water mark, this is
 * a feature that will be implemented much later in the future.
B
Balbir Singh 已提交
229 230 231 232 233 234 235
 */
struct mem_cgroup {
	struct cgroup_subsys_state css;
	/*
	 * the counter to account for memory usage
	 */
	struct res_counter res;
236

237 238 239
	/* vmpressure notifications */
	struct vmpressure vmpressure;

240 241 242 243
	/*
	 * the counter to account for mem+swap usage.
	 */
	struct res_counter memsw;
244

245 246 247 248
	/*
	 * the counter to account for kernel memory usage.
	 */
	struct res_counter kmem;
249 250 251 252
	/*
	 * Should the accounting and control be hierarchical, per subtree?
	 */
	bool use_hierarchy;
253
	unsigned long kmem_account_flags; /* See KMEM_ACCOUNTED_*, below */
254 255 256 257

	bool		oom_lock;
	atomic_t	under_oom;

258
	int	swappiness;
259 260
	/* OOM-Killer disable */
	int		oom_kill_disable;
K
KOSAKI Motohiro 已提交
261

262 263 264
	/* set when res.limit == memsw.limit */
	bool		memsw_is_minimum;

265 266 267 268
	/* protect arrays of thresholds */
	struct mutex thresholds_lock;

	/* thresholds for memory usage. RCU-protected */
269
	struct mem_cgroup_thresholds thresholds;
270

271
	/* thresholds for mem+swap usage. RCU-protected */
272
	struct mem_cgroup_thresholds memsw_thresholds;
273

K
KAMEZAWA Hiroyuki 已提交
274 275
	/* For oom notifier event fd */
	struct list_head oom_notify;
276

277 278 279 280 281
	/*
	 * Should we move charges of a task when a task is moved into this
	 * mem_cgroup ? And what type of charges should we move ?
	 */
	unsigned long 	move_charge_at_immigrate;
282 283 284 285
	/*
	 * set > 0 if pages under this cgroup are moving to other cgroup.
	 */
	atomic_t	moving_account;
286 287
	/* taken only while moving_account > 0 */
	spinlock_t	move_lock;
288
	/*
289
	 * percpu counter.
290
	 */
291
	struct mem_cgroup_stat_cpu __percpu *stat;
292 293 294 295 296 297
	/*
	 * used when a cpu is offlined or other synchronizations
	 * See mem_cgroup_read_stat().
	 */
	struct mem_cgroup_stat_cpu nocpu_base;
	spinlock_t pcp_counter_lock;
G
Glauber Costa 已提交
298

M
Michal Hocko 已提交
299
	atomic_t	dead_count;
M
Michal Hocko 已提交
300
#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET)
G
Glauber Costa 已提交
301 302
	struct tcp_memcontrol tcp_mem;
#endif
303 304 305 306 307 308 309 310
#if defined(CONFIG_MEMCG_KMEM)
	/* analogous to slab_common's slab_caches list. per-memcg */
	struct list_head memcg_slab_caches;
	/* Not a spinlock, we can take a lot of time walking the list */
	struct mutex slab_caches_mutex;
        /* Index in the kmem_cache->memcg_params->memcg_caches array */
	int kmemcg_id;
#endif
311 312 313 314 315 316 317

	int last_scanned_node;
#if MAX_NUMNODES > 1
	nodemask_t	scan_nodes;
	atomic_t	numainfo_events;
	atomic_t	numainfo_updating;
#endif
318

319 320
	struct mem_cgroup_per_node *nodeinfo[0];
	/* WARNING: nodeinfo must be the last member here */
B
Balbir Singh 已提交
321 322
};

323 324 325 326 327 328
static size_t memcg_size(void)
{
	return sizeof(struct mem_cgroup) +
		nr_node_ids * sizeof(struct mem_cgroup_per_node);
}

329 330 331
/* internal only representation about the status of kmem accounting. */
enum {
	KMEM_ACCOUNTED_ACTIVE = 0, /* accounted by this cgroup itself */
332
	KMEM_ACCOUNTED_ACTIVATED, /* static key enabled. */
333
	KMEM_ACCOUNTED_DEAD, /* dead memcg with pending kmem charges */
334 335
};

336 337 338
/* We account when limit is on, but only after call sites are patched */
#define KMEM_ACCOUNTED_MASK \
		((1 << KMEM_ACCOUNTED_ACTIVE) | (1 << KMEM_ACCOUNTED_ACTIVATED))
339 340 341 342 343 344

#ifdef CONFIG_MEMCG_KMEM
static inline void memcg_kmem_set_active(struct mem_cgroup *memcg)
{
	set_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags);
}
345 346 347 348 349 350

static bool memcg_kmem_is_active(struct mem_cgroup *memcg)
{
	return test_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags);
}

351 352 353 354 355
static void memcg_kmem_set_activated(struct mem_cgroup *memcg)
{
	set_bit(KMEM_ACCOUNTED_ACTIVATED, &memcg->kmem_account_flags);
}

356 357 358 359 360
static void memcg_kmem_clear_activated(struct mem_cgroup *memcg)
{
	clear_bit(KMEM_ACCOUNTED_ACTIVATED, &memcg->kmem_account_flags);
}

361 362
static void memcg_kmem_mark_dead(struct mem_cgroup *memcg)
{
363 364 365 366 367
	/*
	 * Our caller must use css_get() first, because memcg_uncharge_kmem()
	 * will call css_put() if it sees the memcg is dead.
	 */
	smp_wmb();
368 369 370 371 372 373 374 375 376
	if (test_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags))
		set_bit(KMEM_ACCOUNTED_DEAD, &memcg->kmem_account_flags);
}

static bool memcg_kmem_test_and_clear_dead(struct mem_cgroup *memcg)
{
	return test_and_clear_bit(KMEM_ACCOUNTED_DEAD,
				  &memcg->kmem_account_flags);
}
377 378
#endif

379 380
/* Stuffs for move charges at task migration. */
/*
381 382
 * Types of charges to be moved. "move_charge_at_immitgrate" and
 * "immigrate_flags" are treated as a left-shifted bitmap of these types.
383 384
 */
enum move_type {
385
	MOVE_CHARGE_TYPE_ANON,	/* private anonymous page and swap of it */
386
	MOVE_CHARGE_TYPE_FILE,	/* file page(including tmpfs) and swap of it */
387 388 389
	NR_MOVE_TYPE,
};

390 391
/* "mc" and its members are protected by cgroup_mutex */
static struct move_charge_struct {
392
	spinlock_t	  lock; /* for from, to */
393 394
	struct mem_cgroup *from;
	struct mem_cgroup *to;
395
	unsigned long immigrate_flags;
396
	unsigned long precharge;
397
	unsigned long moved_charge;
398
	unsigned long moved_swap;
399 400 401
	struct task_struct *moving_task;	/* a task moving charges */
	wait_queue_head_t waitq;		/* a waitq for other context */
} mc = {
402
	.lock = __SPIN_LOCK_UNLOCKED(mc.lock),
403 404
	.waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
};
405

D
Daisuke Nishimura 已提交
406 407
static bool move_anon(void)
{
408
	return test_bit(MOVE_CHARGE_TYPE_ANON, &mc.immigrate_flags);
D
Daisuke Nishimura 已提交
409 410
}

411 412
static bool move_file(void)
{
413
	return test_bit(MOVE_CHARGE_TYPE_FILE, &mc.immigrate_flags);
414 415
}

416 417 418 419
/*
 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
 * limit reclaim to prevent infinite loops, if they ever occur.
 */
420
#define	MEM_CGROUP_MAX_RECLAIM_LOOPS		100
421

422 423
enum charge_type {
	MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
424
	MEM_CGROUP_CHARGE_TYPE_ANON,
K
KAMEZAWA Hiroyuki 已提交
425
	MEM_CGROUP_CHARGE_TYPE_SWAPOUT,	/* for accounting swapcache */
K
KAMEZAWA Hiroyuki 已提交
426
	MEM_CGROUP_CHARGE_TYPE_DROP,	/* a page was unused swap cache */
427 428 429
	NR_CHARGE_TYPE,
};

430
/* for encoding cft->private value on file */
G
Glauber Costa 已提交
431 432 433 434
enum res_type {
	_MEM,
	_MEMSWAP,
	_OOM_TYPE,
435
	_KMEM,
G
Glauber Costa 已提交
436 437
};

438 439
#define MEMFILE_PRIVATE(x, val)	((x) << 16 | (val))
#define MEMFILE_TYPE(val)	((val) >> 16 & 0xffff)
440
#define MEMFILE_ATTR(val)	((val) & 0xffff)
K
KAMEZAWA Hiroyuki 已提交
441 442
/* Used for OOM nofiier */
#define OOM_CONTROL		(0)
443

444 445 446 447 448 449 450 451
/*
 * Reclaim flags for mem_cgroup_hierarchical_reclaim
 */
#define MEM_CGROUP_RECLAIM_NOSWAP_BIT	0x0
#define MEM_CGROUP_RECLAIM_NOSWAP	(1 << MEM_CGROUP_RECLAIM_NOSWAP_BIT)
#define MEM_CGROUP_RECLAIM_SHRINK_BIT	0x1
#define MEM_CGROUP_RECLAIM_SHRINK	(1 << MEM_CGROUP_RECLAIM_SHRINK_BIT)

452 453 454 455 456 457 458
/*
 * The memcg_create_mutex will be held whenever a new cgroup is created.
 * As a consequence, any change that needs to protect against new child cgroups
 * appearing has to hold it as well.
 */
static DEFINE_MUTEX(memcg_create_mutex);

459 460
struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *s)
{
461
	return s ? container_of(s, struct mem_cgroup, css) : NULL;
462 463
}

464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481
/* Some nice accessors for the vmpressure. */
struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
{
	if (!memcg)
		memcg = root_mem_cgroup;
	return &memcg->vmpressure;
}

struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr)
{
	return &container_of(vmpr, struct mem_cgroup, vmpressure)->css;
}

struct vmpressure *css_to_vmpressure(struct cgroup_subsys_state *css)
{
	return &mem_cgroup_from_css(css)->vmpressure;
}

482 483 484 485 486
static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
{
	return (memcg == root_mem_cgroup);
}

G
Glauber Costa 已提交
487
/* Writing them here to avoid exposing memcg's inner layout */
M
Michal Hocko 已提交
488
#if defined(CONFIG_INET) && defined(CONFIG_MEMCG_KMEM)
G
Glauber Costa 已提交
489 490 491

void sock_update_memcg(struct sock *sk)
{
492
	if (mem_cgroup_sockets_enabled) {
G
Glauber Costa 已提交
493
		struct mem_cgroup *memcg;
494
		struct cg_proto *cg_proto;
G
Glauber Costa 已提交
495 496 497

		BUG_ON(!sk->sk_prot->proto_cgroup);

498 499 500 501 502 503 504 505 506 507
		/* Socket cloning can throw us here with sk_cgrp already
		 * filled. It won't however, necessarily happen from
		 * process context. So the test for root memcg given
		 * the current task's memcg won't help us in this case.
		 *
		 * Respecting the original socket's memcg is a better
		 * decision in this case.
		 */
		if (sk->sk_cgrp) {
			BUG_ON(mem_cgroup_is_root(sk->sk_cgrp->memcg));
508
			css_get(&sk->sk_cgrp->memcg->css);
509 510 511
			return;
		}

G
Glauber Costa 已提交
512 513
		rcu_read_lock();
		memcg = mem_cgroup_from_task(current);
514
		cg_proto = sk->sk_prot->proto_cgroup(memcg);
515 516
		if (!mem_cgroup_is_root(memcg) &&
		    memcg_proto_active(cg_proto) && css_tryget(&memcg->css)) {
517
			sk->sk_cgrp = cg_proto;
G
Glauber Costa 已提交
518 519 520 521 522 523 524 525
		}
		rcu_read_unlock();
	}
}
EXPORT_SYMBOL(sock_update_memcg);

void sock_release_memcg(struct sock *sk)
{
526
	if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
G
Glauber Costa 已提交
527 528 529
		struct mem_cgroup *memcg;
		WARN_ON(!sk->sk_cgrp->memcg);
		memcg = sk->sk_cgrp->memcg;
530
		css_put(&sk->sk_cgrp->memcg->css);
G
Glauber Costa 已提交
531 532
	}
}
G
Glauber Costa 已提交
533 534 535 536 537 538 539 540 541

struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg)
{
	if (!memcg || mem_cgroup_is_root(memcg))
		return NULL;

	return &memcg->tcp_mem.cg_proto;
}
EXPORT_SYMBOL(tcp_proto_cgroup);
G
Glauber Costa 已提交
542

543 544 545 546 547 548 549 550 551 552 553 554
static void disarm_sock_keys(struct mem_cgroup *memcg)
{
	if (!memcg_proto_activated(&memcg->tcp_mem.cg_proto))
		return;
	static_key_slow_dec(&memcg_socket_limit_enabled);
}
#else
static void disarm_sock_keys(struct mem_cgroup *memcg)
{
}
#endif

555
#ifdef CONFIG_MEMCG_KMEM
556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573
/*
 * This will be the memcg's index in each cache's ->memcg_params->memcg_caches.
 * There are two main reasons for not using the css_id for this:
 *  1) this works better in sparse environments, where we have a lot of memcgs,
 *     but only a few kmem-limited. Or also, if we have, for instance, 200
 *     memcgs, and none but the 200th is kmem-limited, we'd have to have a
 *     200 entry array for that.
 *
 *  2) In order not to violate the cgroup API, we would like to do all memory
 *     allocation in ->create(). At that point, we haven't yet allocated the
 *     css_id. Having a separate index prevents us from messing with the cgroup
 *     core for this
 *
 * The current size of the caches array is stored in
 * memcg_limited_groups_array_size.  It will double each time we have to
 * increase it.
 */
static DEFINE_IDA(kmem_limited_groups);
574 575
int memcg_limited_groups_array_size;

576 577 578 579 580 581 582 583 584 585 586 587 588 589 590
/*
 * MIN_SIZE is different than 1, because we would like to avoid going through
 * the alloc/free process all the time. In a small machine, 4 kmem-limited
 * cgroups is a reasonable guess. In the future, it could be a parameter or
 * tunable, but that is strictly not necessary.
 *
 * MAX_SIZE should be as large as the number of css_ids. Ideally, we could get
 * this constant directly from cgroup, but it is understandable that this is
 * better kept as an internal representation in cgroup.c. In any case, the
 * css_id space is not getting any smaller, and we don't have to necessarily
 * increase ours as well if it increases.
 */
#define MEMCG_CACHES_MIN_SIZE 4
#define MEMCG_CACHES_MAX_SIZE 65535

591 592 593 594 595 596
/*
 * A lot of the calls to the cache allocation functions are expected to be
 * inlined by the compiler. Since the calls to memcg_kmem_get_cache are
 * conditional to this static branch, we'll have to allow modules that does
 * kmem_cache_alloc and the such to see this symbol as well
 */
597
struct static_key memcg_kmem_enabled_key;
598
EXPORT_SYMBOL(memcg_kmem_enabled_key);
599 600 601

static void disarm_kmem_keys(struct mem_cgroup *memcg)
{
602
	if (memcg_kmem_is_active(memcg)) {
603
		static_key_slow_dec(&memcg_kmem_enabled_key);
604 605
		ida_simple_remove(&kmem_limited_groups, memcg->kmemcg_id);
	}
606 607 608 609 610
	/*
	 * This check can't live in kmem destruction function,
	 * since the charges will outlive the cgroup
	 */
	WARN_ON(res_counter_read_u64(&memcg->kmem, RES_USAGE) != 0);
611 612 613 614 615 616 617 618 619 620 621 622 623
}
#else
static void disarm_kmem_keys(struct mem_cgroup *memcg)
{
}
#endif /* CONFIG_MEMCG_KMEM */

static void disarm_static_keys(struct mem_cgroup *memcg)
{
	disarm_sock_keys(memcg);
	disarm_kmem_keys(memcg);
}

624
static void drain_all_stock_async(struct mem_cgroup *memcg);
625

626
static struct mem_cgroup_per_zone *
627
mem_cgroup_zoneinfo(struct mem_cgroup *memcg, int nid, int zid)
628
{
629
	VM_BUG_ON((unsigned)nid >= nr_node_ids);
630
	return &memcg->nodeinfo[nid]->zoneinfo[zid];
631 632
}

633
struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg)
634
{
635
	return &memcg->css;
636 637
}

638
static struct mem_cgroup_per_zone *
639
page_cgroup_zoneinfo(struct mem_cgroup *memcg, struct page *page)
640
{
641 642
	int nid = page_to_nid(page);
	int zid = page_zonenum(page);
643

644
	return mem_cgroup_zoneinfo(memcg, nid, zid);
645 646
}

647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665
/*
 * Implementation Note: reading percpu statistics for memcg.
 *
 * Both of vmstat[] and percpu_counter has threshold and do periodic
 * synchronization to implement "quick" read. There are trade-off between
 * reading cost and precision of value. Then, we may have a chance to implement
 * a periodic synchronizion of counter in memcg's counter.
 *
 * But this _read() function is used for user interface now. The user accounts
 * memory usage by memory cgroup and he _always_ requires exact value because
 * he accounts memory. Even if we provide quick-and-fuzzy read, we always
 * have to visit all online cpus and make sum. So, for now, unnecessary
 * synchronization is not implemented. (just implemented for cpu hotplug)
 *
 * If there are kernel internal actions which can make use of some not-exact
 * value, and reading all cpu value can be performance bottleneck in some
 * common workload, threashold and synchonization as vmstat[] should be
 * implemented.
 */
666
static long mem_cgroup_read_stat(struct mem_cgroup *memcg,
667
				 enum mem_cgroup_stat_index idx)
668
{
669
	long val = 0;
670 671
	int cpu;

672 673
	get_online_cpus();
	for_each_online_cpu(cpu)
674
		val += per_cpu(memcg->stat->count[idx], cpu);
675
#ifdef CONFIG_HOTPLUG_CPU
676 677 678
	spin_lock(&memcg->pcp_counter_lock);
	val += memcg->nocpu_base.count[idx];
	spin_unlock(&memcg->pcp_counter_lock);
679 680
#endif
	put_online_cpus();
681 682 683
	return val;
}

684
static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg,
685 686 687
					 bool charge)
{
	int val = (charge) ? 1 : -1;
688
	this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_SWAP], val);
689 690
}

691
static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg,
692 693 694 695 696 697
					    enum mem_cgroup_events_index idx)
{
	unsigned long val = 0;
	int cpu;

	for_each_online_cpu(cpu)
698
		val += per_cpu(memcg->stat->events[idx], cpu);
699
#ifdef CONFIG_HOTPLUG_CPU
700 701 702
	spin_lock(&memcg->pcp_counter_lock);
	val += memcg->nocpu_base.events[idx];
	spin_unlock(&memcg->pcp_counter_lock);
703 704 705 706
#endif
	return val;
}

707
static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
708
					 struct page *page,
709
					 bool anon, int nr_pages)
710
{
711 712
	preempt_disable();

713 714 715 716 717 718
	/*
	 * Here, RSS means 'mapped anon' and anon's SwapCache. Shmem/tmpfs is
	 * counted as CACHE even if it's on ANON LRU.
	 */
	if (anon)
		__this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS],
719
				nr_pages);
720
	else
721
		__this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE],
722
				nr_pages);
723

724 725 726 727
	if (PageTransHuge(page))
		__this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE],
				nr_pages);

728 729
	/* pagein of a big page is an event. So, ignore page size */
	if (nr_pages > 0)
730
		__this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGIN]);
731
	else {
732
		__this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]);
733 734
		nr_pages = -nr_pages; /* for event */
	}
735

736
	__this_cpu_add(memcg->stat->nr_page_events, nr_pages);
737

738
	preempt_enable();
739 740
}

741
unsigned long
742
mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
743 744 745 746 747 748 749 750
{
	struct mem_cgroup_per_zone *mz;

	mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
	return mz->lru_size[lru];
}

static unsigned long
751
mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, int nid, int zid,
752
			unsigned int lru_mask)
753 754
{
	struct mem_cgroup_per_zone *mz;
H
Hugh Dickins 已提交
755
	enum lru_list lru;
756 757
	unsigned long ret = 0;

758
	mz = mem_cgroup_zoneinfo(memcg, nid, zid);
759

H
Hugh Dickins 已提交
760 761 762
	for_each_lru(lru) {
		if (BIT(lru) & lru_mask)
			ret += mz->lru_size[lru];
763 764 765 766 767
	}
	return ret;
}

static unsigned long
768
mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
769 770
			int nid, unsigned int lru_mask)
{
771 772 773
	u64 total = 0;
	int zid;

774
	for (zid = 0; zid < MAX_NR_ZONES; zid++)
775 776
		total += mem_cgroup_zone_nr_lru_pages(memcg,
						nid, zid, lru_mask);
777

778 779
	return total;
}
780

781
static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
782
			unsigned int lru_mask)
783
{
784
	int nid;
785 786
	u64 total = 0;

787
	for_each_node_state(nid, N_MEMORY)
788
		total += mem_cgroup_node_nr_lru_pages(memcg, nid, lru_mask);
789
	return total;
790 791
}

792 793
static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
				       enum mem_cgroup_events_target target)
794 795 796
{
	unsigned long val, next;

797
	val = __this_cpu_read(memcg->stat->nr_page_events);
798
	next = __this_cpu_read(memcg->stat->targets[target]);
799
	/* from time_after() in jiffies.h */
800 801 802 803 804 805 806 807 808 809 810 811 812
	if ((long)next - (long)val < 0) {
		switch (target) {
		case MEM_CGROUP_TARGET_THRESH:
			next = val + THRESHOLDS_EVENTS_TARGET;
			break;
		case MEM_CGROUP_TARGET_NUMAINFO:
			next = val + NUMAINFO_EVENTS_TARGET;
			break;
		default:
			break;
		}
		__this_cpu_write(memcg->stat->targets[target], next);
		return true;
813
	}
814
	return false;
815 816 817 818 819 820
}

/*
 * Check events in order.
 *
 */
821
static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
822
{
823
	preempt_disable();
824
	/* threshold event is triggered in finer grain than soft limit */
825 826
	if (unlikely(mem_cgroup_event_ratelimit(memcg,
						MEM_CGROUP_TARGET_THRESH))) {
827
		bool do_numainfo __maybe_unused;
828 829 830 831 832 833 834

#if MAX_NUMNODES > 1
		do_numainfo = mem_cgroup_event_ratelimit(memcg,
						MEM_CGROUP_TARGET_NUMAINFO);
#endif
		preempt_enable();

835
		mem_cgroup_threshold(memcg);
836
#if MAX_NUMNODES > 1
837
		if (unlikely(do_numainfo))
838
			atomic_inc(&memcg->numainfo_events);
839
#endif
840 841
	} else
		preempt_enable();
842 843
}

844
struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
845
{
846 847 848 849 850 851 852 853
	/*
	 * mm_update_next_owner() may clear mm->owner to NULL
	 * if it races with swapoff, page migration, etc.
	 * So this can be called with p == NULL.
	 */
	if (unlikely(!p))
		return NULL;

854
	return mem_cgroup_from_css(task_css(p, mem_cgroup_subsys_id));
855 856
}

857
struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
858
{
859
	struct mem_cgroup *memcg = NULL;
860 861 862

	if (!mm)
		return NULL;
863 864 865 866 867 868 869
	/*
	 * Because we have no locks, mm->owner's may be being moved to other
	 * cgroup. We use css_tryget() here even if this looks
	 * pessimistic (rather than adding locks here).
	 */
	rcu_read_lock();
	do {
870 871
		memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
		if (unlikely(!memcg))
872
			break;
873
	} while (!css_tryget(&memcg->css));
874
	rcu_read_unlock();
875
	return memcg;
876 877
}

878 879 880 881 882 883 884 885 886
/*
 * Returns a next (in a pre-order walk) alive memcg (with elevated css
 * ref. count) or NULL if the whole root's subtree has been visited.
 *
 * helper function to be used by mem_cgroup_iter
 */
static struct mem_cgroup *__mem_cgroup_iter_next(struct mem_cgroup *root,
		struct mem_cgroup *last_visited)
{
887
	struct cgroup_subsys_state *prev_css, *next_css;
888

889
	prev_css = last_visited ? &last_visited->css : NULL;
890
skip_node:
891
	next_css = css_next_descendant_pre(prev_css, &root->css);
892 893 894 895 896 897 898 899

	/*
	 * Even if we found a group we have to make sure it is
	 * alive. css && !memcg means that the groups should be
	 * skipped and we should continue the tree walk.
	 * last_visited css is safe to use because it is
	 * protected by css_get and the tree walk is rcu safe.
	 */
900 901 902
	if (next_css) {
		struct mem_cgroup *mem = mem_cgroup_from_css(next_css);

903 904 905
		if (css_tryget(&mem->css))
			return mem;
		else {
906
			prev_css = next_css;
907 908 909 910 911 912 913
			goto skip_node;
		}
	}

	return NULL;
}

914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965
static void mem_cgroup_iter_invalidate(struct mem_cgroup *root)
{
	/*
	 * When a group in the hierarchy below root is destroyed, the
	 * hierarchy iterator can no longer be trusted since it might
	 * have pointed to the destroyed group.  Invalidate it.
	 */
	atomic_inc(&root->dead_count);
}

static struct mem_cgroup *
mem_cgroup_iter_load(struct mem_cgroup_reclaim_iter *iter,
		     struct mem_cgroup *root,
		     int *sequence)
{
	struct mem_cgroup *position = NULL;
	/*
	 * A cgroup destruction happens in two stages: offlining and
	 * release.  They are separated by a RCU grace period.
	 *
	 * If the iterator is valid, we may still race with an
	 * offlining.  The RCU lock ensures the object won't be
	 * released, tryget will fail if we lost the race.
	 */
	*sequence = atomic_read(&root->dead_count);
	if (iter->last_dead_count == *sequence) {
		smp_rmb();
		position = iter->last_visited;
		if (position && !css_tryget(&position->css))
			position = NULL;
	}
	return position;
}

static void mem_cgroup_iter_update(struct mem_cgroup_reclaim_iter *iter,
				   struct mem_cgroup *last_visited,
				   struct mem_cgroup *new_position,
				   int sequence)
{
	if (last_visited)
		css_put(&last_visited->css);
	/*
	 * We store the sequence count from the time @last_visited was
	 * loaded successfully instead of rereading it here so that we
	 * don't lose destruction events in between.  We could have
	 * raced with the destruction of @new_position after all.
	 */
	iter->last_visited = new_position;
	smp_wmb();
	iter->last_dead_count = sequence;
}

966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985
/**
 * mem_cgroup_iter - iterate over memory cgroup hierarchy
 * @root: hierarchy root
 * @prev: previously returned memcg, NULL on first invocation
 * @reclaim: cookie for shared reclaim walks, NULL for full walks
 *
 * Returns references to children of the hierarchy below @root, or
 * @root itself, or %NULL after a full round-trip.
 *
 * Caller must pass the return value in @prev on subsequent
 * invocations for reference counting, or use mem_cgroup_iter_break()
 * to cancel a hierarchy walk before the round-trip is complete.
 *
 * Reclaimers can specify a zone and a priority level in @reclaim to
 * divide up the memcgs in the hierarchy among all concurrent
 * reclaimers operating on the same zone and priority.
 */
struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
				   struct mem_cgroup *prev,
				   struct mem_cgroup_reclaim_cookie *reclaim)
K
KAMEZAWA Hiroyuki 已提交
986
{
987
	struct mem_cgroup *memcg = NULL;
988
	struct mem_cgroup *last_visited = NULL;
989

990 991 992
	if (mem_cgroup_disabled())
		return NULL;

993 994
	if (!root)
		root = root_mem_cgroup;
K
KAMEZAWA Hiroyuki 已提交
995

996
	if (prev && !reclaim)
997
		last_visited = prev;
K
KAMEZAWA Hiroyuki 已提交
998

999 1000
	if (!root->use_hierarchy && root != root_mem_cgroup) {
		if (prev)
1001
			goto out_css_put;
1002 1003
		return root;
	}
K
KAMEZAWA Hiroyuki 已提交
1004

1005
	rcu_read_lock();
1006
	while (!memcg) {
1007
		struct mem_cgroup_reclaim_iter *uninitialized_var(iter);
1008
		int uninitialized_var(seq);
1009

1010 1011 1012 1013 1014 1015 1016
		if (reclaim) {
			int nid = zone_to_nid(reclaim->zone);
			int zid = zone_idx(reclaim->zone);
			struct mem_cgroup_per_zone *mz;

			mz = mem_cgroup_zoneinfo(root, nid, zid);
			iter = &mz->reclaim_iter[reclaim->priority];
1017
			if (prev && reclaim->generation != iter->generation) {
M
Michal Hocko 已提交
1018
				iter->last_visited = NULL;
1019 1020
				goto out_unlock;
			}
M
Michal Hocko 已提交
1021

1022
			last_visited = mem_cgroup_iter_load(iter, root, &seq);
1023
		}
K
KAMEZAWA Hiroyuki 已提交
1024

1025
		memcg = __mem_cgroup_iter_next(root, last_visited);
K
KAMEZAWA Hiroyuki 已提交
1026

1027
		if (reclaim) {
1028
			mem_cgroup_iter_update(iter, last_visited, memcg, seq);
1029

M
Michal Hocko 已提交
1030
			if (!memcg)
1031 1032 1033 1034
				iter->generation++;
			else if (!prev && memcg)
				reclaim->generation = iter->generation;
		}
1035

M
Michal Hocko 已提交
1036
		if (prev && !memcg)
1037
			goto out_unlock;
1038
	}
1039 1040
out_unlock:
	rcu_read_unlock();
1041 1042 1043 1044
out_css_put:
	if (prev && prev != root)
		css_put(&prev->css);

1045
	return memcg;
K
KAMEZAWA Hiroyuki 已提交
1046
}
K
KAMEZAWA Hiroyuki 已提交
1047

1048 1049 1050 1051 1052 1053 1054
/**
 * mem_cgroup_iter_break - abort a hierarchy walk prematurely
 * @root: hierarchy root
 * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
 */
void mem_cgroup_iter_break(struct mem_cgroup *root,
			   struct mem_cgroup *prev)
1055 1056 1057 1058 1059 1060
{
	if (!root)
		root = root_mem_cgroup;
	if (prev && prev != root)
		css_put(&prev->css);
}
K
KAMEZAWA Hiroyuki 已提交
1061

1062 1063 1064 1065 1066 1067
/*
 * Iteration constructs for visiting all cgroups (under a tree).  If
 * loops are exited prematurely (break), mem_cgroup_iter_break() must
 * be used for reference counting.
 */
#define for_each_mem_cgroup_tree(iter, root)		\
1068
	for (iter = mem_cgroup_iter(root, NULL, NULL);	\
1069
	     iter != NULL;				\
1070
	     iter = mem_cgroup_iter(root, iter, NULL))
1071

1072
#define for_each_mem_cgroup(iter)			\
1073
	for (iter = mem_cgroup_iter(NULL, NULL, NULL);	\
1074
	     iter != NULL;				\
1075
	     iter = mem_cgroup_iter(NULL, iter, NULL))
K
KAMEZAWA Hiroyuki 已提交
1076

1077
void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
1078
{
1079
	struct mem_cgroup *memcg;
1080 1081

	rcu_read_lock();
1082 1083
	memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
	if (unlikely(!memcg))
1084 1085 1086 1087
		goto out;

	switch (idx) {
	case PGFAULT:
1088 1089 1090 1091
		this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGFAULT]);
		break;
	case PGMAJFAULT:
		this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGMAJFAULT]);
1092 1093 1094 1095 1096 1097 1098
		break;
	default:
		BUG();
	}
out:
	rcu_read_unlock();
}
1099
EXPORT_SYMBOL(__mem_cgroup_count_vm_event);
1100

1101 1102 1103
/**
 * mem_cgroup_zone_lruvec - get the lru list vector for a zone and memcg
 * @zone: zone of the wanted lruvec
1104
 * @memcg: memcg of the wanted lruvec
1105 1106 1107 1108 1109 1110 1111 1112 1113
 *
 * Returns the lru list vector holding pages for the given @zone and
 * @mem.  This can be the global zone lruvec, if the memory controller
 * is disabled.
 */
struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
				      struct mem_cgroup *memcg)
{
	struct mem_cgroup_per_zone *mz;
1114
	struct lruvec *lruvec;
1115

1116 1117 1118 1119
	if (mem_cgroup_disabled()) {
		lruvec = &zone->lruvec;
		goto out;
	}
1120 1121

	mz = mem_cgroup_zoneinfo(memcg, zone_to_nid(zone), zone_idx(zone));
1122 1123 1124 1125 1126 1127 1128 1129 1130 1131
	lruvec = &mz->lruvec;
out:
	/*
	 * Since a node can be onlined after the mem_cgroup was created,
	 * we have to be prepared to initialize lruvec->zone here;
	 * and if offlined then reonlined, we need to reinitialize it.
	 */
	if (unlikely(lruvec->zone != zone))
		lruvec->zone = zone;
	return lruvec;
1132 1133
}

K
KAMEZAWA Hiroyuki 已提交
1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146
/*
 * Following LRU functions are allowed to be used without PCG_LOCK.
 * Operations are called by routine of global LRU independently from memcg.
 * What we have to take care of here is validness of pc->mem_cgroup.
 *
 * Changes to pc->mem_cgroup happens when
 * 1. charge
 * 2. moving account
 * In typical case, "charge" is done before add-to-lru. Exception is SwapCache.
 * It is added to LRU before charge.
 * If PCG_USED bit is not set, page_cgroup is not added to this private LRU.
 * When moving account, the page is not on LRU. It's isolated.
 */
1147

1148
/**
1149
 * mem_cgroup_page_lruvec - return lruvec for adding an lru page
1150
 * @page: the page
1151
 * @zone: zone of the page
1152
 */
1153
struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone)
K
KAMEZAWA Hiroyuki 已提交
1154 1155
{
	struct mem_cgroup_per_zone *mz;
1156 1157
	struct mem_cgroup *memcg;
	struct page_cgroup *pc;
1158
	struct lruvec *lruvec;
1159

1160 1161 1162 1163
	if (mem_cgroup_disabled()) {
		lruvec = &zone->lruvec;
		goto out;
	}
1164

K
KAMEZAWA Hiroyuki 已提交
1165
	pc = lookup_page_cgroup(page);
1166
	memcg = pc->mem_cgroup;
1167 1168

	/*
1169
	 * Surreptitiously switch any uncharged offlist page to root:
1170 1171 1172 1173 1174 1175 1176
	 * an uncharged page off lru does nothing to secure
	 * its former mem_cgroup from sudden removal.
	 *
	 * Our caller holds lru_lock, and PageCgroupUsed is updated
	 * under page_cgroup lock: between them, they make all uses
	 * of pc->mem_cgroup safe.
	 */
1177
	if (!PageLRU(page) && !PageCgroupUsed(pc) && memcg != root_mem_cgroup)
1178 1179
		pc->mem_cgroup = memcg = root_mem_cgroup;

1180
	mz = page_cgroup_zoneinfo(memcg, page);
1181 1182 1183 1184 1185 1186 1187 1188 1189 1190
	lruvec = &mz->lruvec;
out:
	/*
	 * Since a node can be onlined after the mem_cgroup was created,
	 * we have to be prepared to initialize lruvec->zone here;
	 * and if offlined then reonlined, we need to reinitialize it.
	 */
	if (unlikely(lruvec->zone != zone))
		lruvec->zone = zone;
	return lruvec;
K
KAMEZAWA Hiroyuki 已提交
1191
}
1192

1193
/**
1194 1195 1196 1197
 * mem_cgroup_update_lru_size - account for adding or removing an lru page
 * @lruvec: mem_cgroup per zone lru vector
 * @lru: index of lru list the page is sitting on
 * @nr_pages: positive when adding or negative when removing
1198
 *
1199 1200
 * This function must be called when a page is added to or removed from an
 * lru list.
1201
 */
1202 1203
void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
				int nr_pages)
1204 1205
{
	struct mem_cgroup_per_zone *mz;
1206
	unsigned long *lru_size;
1207 1208 1209 1210

	if (mem_cgroup_disabled())
		return;

1211 1212 1213 1214
	mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
	lru_size = mz->lru_size + lru;
	*lru_size += nr_pages;
	VM_BUG_ON((long)(*lru_size) < 0);
K
KAMEZAWA Hiroyuki 已提交
1215
}
1216

1217
/*
1218
 * Checks whether given mem is same or in the root_mem_cgroup's
1219 1220
 * hierarchy subtree
 */
1221 1222
bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
				  struct mem_cgroup *memcg)
1223
{
1224 1225
	if (root_memcg == memcg)
		return true;
1226
	if (!root_memcg->use_hierarchy || !memcg)
1227
		return false;
1228 1229 1230 1231 1232 1233 1234 1235
	return css_is_ancestor(&memcg->css, &root_memcg->css);
}

static bool mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
				       struct mem_cgroup *memcg)
{
	bool ret;

1236
	rcu_read_lock();
1237
	ret = __mem_cgroup_same_or_subtree(root_memcg, memcg);
1238 1239
	rcu_read_unlock();
	return ret;
1240 1241
}

1242 1243
bool task_in_mem_cgroup(struct task_struct *task,
			const struct mem_cgroup *memcg)
1244
{
1245
	struct mem_cgroup *curr = NULL;
1246
	struct task_struct *p;
1247
	bool ret;
1248

1249
	p = find_lock_task_mm(task);
1250 1251 1252 1253 1254 1255 1256 1257 1258
	if (p) {
		curr = try_get_mem_cgroup_from_mm(p->mm);
		task_unlock(p);
	} else {
		/*
		 * All threads may have already detached their mm's, but the oom
		 * killer still needs to detect if they have already been oom
		 * killed to prevent needlessly killing additional tasks.
		 */
1259
		rcu_read_lock();
1260 1261 1262
		curr = mem_cgroup_from_task(task);
		if (curr)
			css_get(&curr->css);
1263
		rcu_read_unlock();
1264
	}
1265
	if (!curr)
1266
		return false;
1267
	/*
1268
	 * We should check use_hierarchy of "memcg" not "curr". Because checking
1269
	 * use_hierarchy of "curr" here make this function true if hierarchy is
1270 1271
	 * enabled in "curr" and "curr" is a child of "memcg" in *cgroup*
	 * hierarchy(even if use_hierarchy is disabled in "memcg").
1272
	 */
1273
	ret = mem_cgroup_same_or_subtree(memcg, curr);
1274
	css_put(&curr->css);
1275 1276 1277
	return ret;
}

1278
int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec)
1279
{
1280
	unsigned long inactive_ratio;
1281
	unsigned long inactive;
1282
	unsigned long active;
1283
	unsigned long gb;
1284

1285 1286
	inactive = mem_cgroup_get_lru_size(lruvec, LRU_INACTIVE_ANON);
	active = mem_cgroup_get_lru_size(lruvec, LRU_ACTIVE_ANON);
1287

1288 1289 1290 1291 1292 1293
	gb = (inactive + active) >> (30 - PAGE_SHIFT);
	if (gb)
		inactive_ratio = int_sqrt(10 * gb);
	else
		inactive_ratio = 1;

1294
	return inactive * inactive_ratio < active;
1295 1296
}

1297 1298 1299
#define mem_cgroup_from_res_counter(counter, member)	\
	container_of(counter, struct mem_cgroup, member)

1300
/**
1301
 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
W
Wanpeng Li 已提交
1302
 * @memcg: the memory cgroup
1303
 *
1304
 * Returns the maximum amount of memory @mem can be charged with, in
1305
 * pages.
1306
 */
1307
static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1308
{
1309 1310
	unsigned long long margin;

1311
	margin = res_counter_margin(&memcg->res);
1312
	if (do_swap_account)
1313
		margin = min(margin, res_counter_margin(&memcg->memsw));
1314
	return margin >> PAGE_SHIFT;
1315 1316
}

1317
int mem_cgroup_swappiness(struct mem_cgroup *memcg)
K
KOSAKI Motohiro 已提交
1318 1319
{
	/* root ? */
T
Tejun Heo 已提交
1320
	if (!css_parent(&memcg->css))
K
KOSAKI Motohiro 已提交
1321 1322
		return vm_swappiness;

1323
	return memcg->swappiness;
K
KOSAKI Motohiro 已提交
1324 1325
}

1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339
/*
 * memcg->moving_account is used for checking possibility that some thread is
 * calling move_account(). When a thread on CPU-A starts moving pages under
 * a memcg, other threads should check memcg->moving_account under
 * rcu_read_lock(), like this:
 *
 *         CPU-A                                    CPU-B
 *                                              rcu_read_lock()
 *         memcg->moving_account+1              if (memcg->mocing_account)
 *                                                   take heavy locks.
 *         synchronize_rcu()                    update something.
 *                                              rcu_read_unlock()
 *         start move here.
 */
1340 1341 1342 1343

/* for quick checking without looking up memcg */
atomic_t memcg_moving __read_mostly;

1344
static void mem_cgroup_start_move(struct mem_cgroup *memcg)
1345
{
1346
	atomic_inc(&memcg_moving);
1347
	atomic_inc(&memcg->moving_account);
1348 1349 1350
	synchronize_rcu();
}

1351
static void mem_cgroup_end_move(struct mem_cgroup *memcg)
1352
{
1353 1354 1355 1356
	/*
	 * Now, mem_cgroup_clear_mc() may call this function with NULL.
	 * We check NULL in callee rather than caller.
	 */
1357 1358
	if (memcg) {
		atomic_dec(&memcg_moving);
1359
		atomic_dec(&memcg->moving_account);
1360
	}
1361
}
1362

1363 1364 1365
/*
 * 2 routines for checking "mem" is under move_account() or not.
 *
1366 1367
 * mem_cgroup_stolen() -  checking whether a cgroup is mc.from or not. This
 *			  is used for avoiding races in accounting.  If true,
1368 1369 1370 1371 1372 1373 1374
 *			  pc->mem_cgroup may be overwritten.
 *
 * mem_cgroup_under_move() - checking a cgroup is mc.from or mc.to or
 *			  under hierarchy of moving cgroups. This is for
 *			  waiting at hith-memory prressure caused by "move".
 */

1375
static bool mem_cgroup_stolen(struct mem_cgroup *memcg)
1376 1377
{
	VM_BUG_ON(!rcu_read_lock_held());
1378
	return atomic_read(&memcg->moving_account) > 0;
1379
}
1380

1381
static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
1382
{
1383 1384
	struct mem_cgroup *from;
	struct mem_cgroup *to;
1385
	bool ret = false;
1386 1387 1388 1389 1390 1391 1392 1393 1394
	/*
	 * Unlike task_move routines, we access mc.to, mc.from not under
	 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
	 */
	spin_lock(&mc.lock);
	from = mc.from;
	to = mc.to;
	if (!from)
		goto unlock;
1395

1396 1397
	ret = mem_cgroup_same_or_subtree(memcg, from)
		|| mem_cgroup_same_or_subtree(memcg, to);
1398 1399
unlock:
	spin_unlock(&mc.lock);
1400 1401 1402
	return ret;
}

1403
static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
1404 1405
{
	if (mc.moving_task && current != mc.moving_task) {
1406
		if (mem_cgroup_under_move(memcg)) {
1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418
			DEFINE_WAIT(wait);
			prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
			/* moving charge context might have finished. */
			if (mc.moving_task)
				schedule();
			finish_wait(&mc.waitq, &wait);
			return true;
		}
	}
	return false;
}

1419 1420 1421 1422
/*
 * Take this lock when
 * - a code tries to modify page's memcg while it's USED.
 * - a code tries to modify page state accounting in a memcg.
1423
 * see mem_cgroup_stolen(), too.
1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436
 */
static void move_lock_mem_cgroup(struct mem_cgroup *memcg,
				  unsigned long *flags)
{
	spin_lock_irqsave(&memcg->move_lock, *flags);
}

static void move_unlock_mem_cgroup(struct mem_cgroup *memcg,
				unsigned long *flags)
{
	spin_unlock_irqrestore(&memcg->move_lock, *flags);
}

1437
#define K(x) ((x) << (PAGE_SHIFT-10))
1438
/**
1439
 * mem_cgroup_print_oom_info: Print OOM information relevant to memory controller.
1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456
 * @memcg: The memory cgroup that went over limit
 * @p: Task that is going to be killed
 *
 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
 * enabled
 */
void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
{
	struct cgroup *task_cgrp;
	struct cgroup *mem_cgrp;
	/*
	 * Need a buffer in BSS, can't rely on allocations. The code relies
	 * on the assumption that OOM is serialized for memory controller.
	 * If this assumption is broken, revisit this code.
	 */
	static char memcg_name[PATH_MAX];
	int ret;
1457 1458
	struct mem_cgroup *iter;
	unsigned int i;
1459

1460
	if (!p)
1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478
		return;

	rcu_read_lock();

	mem_cgrp = memcg->css.cgroup;
	task_cgrp = task_cgroup(p, mem_cgroup_subsys_id);

	ret = cgroup_path(task_cgrp, memcg_name, PATH_MAX);
	if (ret < 0) {
		/*
		 * Unfortunately, we are unable to convert to a useful name
		 * But we'll still print out the usage information
		 */
		rcu_read_unlock();
		goto done;
	}
	rcu_read_unlock();

1479
	pr_info("Task in %s killed", memcg_name);
1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491

	rcu_read_lock();
	ret = cgroup_path(mem_cgrp, memcg_name, PATH_MAX);
	if (ret < 0) {
		rcu_read_unlock();
		goto done;
	}
	rcu_read_unlock();

	/*
	 * Continues from above, so we don't need an KERN_ level
	 */
1492
	pr_cont(" as a result of limit of %s\n", memcg_name);
1493 1494
done:

1495
	pr_info("memory: usage %llukB, limit %llukB, failcnt %llu\n",
1496 1497 1498
		res_counter_read_u64(&memcg->res, RES_USAGE) >> 10,
		res_counter_read_u64(&memcg->res, RES_LIMIT) >> 10,
		res_counter_read_u64(&memcg->res, RES_FAILCNT));
1499
	pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %llu\n",
1500 1501 1502
		res_counter_read_u64(&memcg->memsw, RES_USAGE) >> 10,
		res_counter_read_u64(&memcg->memsw, RES_LIMIT) >> 10,
		res_counter_read_u64(&memcg->memsw, RES_FAILCNT));
1503
	pr_info("kmem: usage %llukB, limit %llukB, failcnt %llu\n",
1504 1505 1506
		res_counter_read_u64(&memcg->kmem, RES_USAGE) >> 10,
		res_counter_read_u64(&memcg->kmem, RES_LIMIT) >> 10,
		res_counter_read_u64(&memcg->kmem, RES_FAILCNT));
1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530

	for_each_mem_cgroup_tree(iter, memcg) {
		pr_info("Memory cgroup stats");

		rcu_read_lock();
		ret = cgroup_path(iter->css.cgroup, memcg_name, PATH_MAX);
		if (!ret)
			pr_cont(" for %s", memcg_name);
		rcu_read_unlock();
		pr_cont(":");

		for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
			if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
				continue;
			pr_cont(" %s:%ldKB", mem_cgroup_stat_names[i],
				K(mem_cgroup_read_stat(iter, i)));
		}

		for (i = 0; i < NR_LRU_LISTS; i++)
			pr_cont(" %s:%luKB", mem_cgroup_lru_names[i],
				K(mem_cgroup_nr_lru_pages(iter, BIT(i))));

		pr_cont("\n");
	}
1531 1532
}

1533 1534 1535 1536
/*
 * This function returns the number of memcg under hierarchy tree. Returns
 * 1(self count) if no children.
 */
1537
static int mem_cgroup_count_children(struct mem_cgroup *memcg)
1538 1539
{
	int num = 0;
K
KAMEZAWA Hiroyuki 已提交
1540 1541
	struct mem_cgroup *iter;

1542
	for_each_mem_cgroup_tree(iter, memcg)
K
KAMEZAWA Hiroyuki 已提交
1543
		num++;
1544 1545 1546
	return num;
}

D
David Rientjes 已提交
1547 1548 1549
/*
 * Return the memory (and swap, if configured) limit for a memcg.
 */
1550
static u64 mem_cgroup_get_limit(struct mem_cgroup *memcg)
D
David Rientjes 已提交
1551 1552 1553
{
	u64 limit;

1554 1555
	limit = res_counter_read_u64(&memcg->res, RES_LIMIT);

D
David Rientjes 已提交
1556
	/*
1557
	 * Do not consider swap space if we cannot swap due to swappiness
D
David Rientjes 已提交
1558
	 */
1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572
	if (mem_cgroup_swappiness(memcg)) {
		u64 memsw;

		limit += total_swap_pages << PAGE_SHIFT;
		memsw = res_counter_read_u64(&memcg->memsw, RES_LIMIT);

		/*
		 * If memsw is finite and limits the amount of swap space
		 * available to this memcg, return that limit.
		 */
		limit = min(limit, memsw);
	}

	return limit;
D
David Rientjes 已提交
1573 1574
}

1575 1576
static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
				     int order)
1577 1578 1579 1580 1581 1582 1583
{
	struct mem_cgroup *iter;
	unsigned long chosen_points = 0;
	unsigned long totalpages;
	unsigned int points = 0;
	struct task_struct *chosen = NULL;

1584
	/*
1585 1586 1587
	 * If current has a pending SIGKILL or is exiting, then automatically
	 * select it.  The goal is to allow it to allocate so that it may
	 * quickly exit and free its memory.
1588
	 */
1589
	if (fatal_signal_pending(current) || current->flags & PF_EXITING) {
1590 1591 1592 1593 1594
		set_thread_flag(TIF_MEMDIE);
		return;
	}

	check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, order, NULL);
1595 1596
	totalpages = mem_cgroup_get_limit(memcg) >> PAGE_SHIFT ? : 1;
	for_each_mem_cgroup_tree(iter, memcg) {
1597
		struct css_task_iter it;
1598 1599
		struct task_struct *task;

1600 1601
		css_task_iter_start(&iter->css, &it);
		while ((task = css_task_iter_next(&it))) {
1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613
			switch (oom_scan_process_thread(task, totalpages, NULL,
							false)) {
			case OOM_SCAN_SELECT:
				if (chosen)
					put_task_struct(chosen);
				chosen = task;
				chosen_points = ULONG_MAX;
				get_task_struct(chosen);
				/* fall through */
			case OOM_SCAN_CONTINUE:
				continue;
			case OOM_SCAN_ABORT:
1614
				css_task_iter_end(&it);
1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630
				mem_cgroup_iter_break(memcg, iter);
				if (chosen)
					put_task_struct(chosen);
				return;
			case OOM_SCAN_OK:
				break;
			};
			points = oom_badness(task, memcg, NULL, totalpages);
			if (points > chosen_points) {
				if (chosen)
					put_task_struct(chosen);
				chosen = task;
				chosen_points = points;
				get_task_struct(chosen);
			}
		}
1631
		css_task_iter_end(&it);
1632 1633 1634 1635 1636 1637 1638 1639 1640
	}

	if (!chosen)
		return;
	points = chosen_points * 1000 / totalpages;
	oom_kill_process(chosen, gfp_mask, order, points, totalpages, memcg,
			 NULL, "Memory cgroup out of memory");
}

1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676
static unsigned long mem_cgroup_reclaim(struct mem_cgroup *memcg,
					gfp_t gfp_mask,
					unsigned long flags)
{
	unsigned long total = 0;
	bool noswap = false;
	int loop;

	if (flags & MEM_CGROUP_RECLAIM_NOSWAP)
		noswap = true;
	if (!(flags & MEM_CGROUP_RECLAIM_SHRINK) && memcg->memsw_is_minimum)
		noswap = true;

	for (loop = 0; loop < MEM_CGROUP_MAX_RECLAIM_LOOPS; loop++) {
		if (loop)
			drain_all_stock_async(memcg);
		total += try_to_free_mem_cgroup_pages(memcg, gfp_mask, noswap);
		/*
		 * Allow limit shrinkers, which are triggered directly
		 * by userspace, to catch signals and stop reclaim
		 * after minimal progress, regardless of the margin.
		 */
		if (total && (flags & MEM_CGROUP_RECLAIM_SHRINK))
			break;
		if (mem_cgroup_margin(memcg))
			break;
		/*
		 * If nothing was reclaimed after two attempts, there
		 * may be no reclaimable pages in this hierarchy.
		 */
		if (loop && !total)
			break;
	}
	return total;
}

1677
#if MAX_NUMNODES > 1
1678 1679
/**
 * test_mem_cgroup_node_reclaimable
W
Wanpeng Li 已提交
1680
 * @memcg: the target memcg
1681 1682 1683 1684 1685 1686 1687
 * @nid: the node ID to be checked.
 * @noswap : specify true here if the user wants flle only information.
 *
 * This function returns whether the specified memcg contains any
 * reclaimable pages on a node. Returns true if there are any reclaimable
 * pages in the node.
 */
1688
static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg,
1689 1690
		int nid, bool noswap)
{
1691
	if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_FILE))
1692 1693 1694
		return true;
	if (noswap || !total_swap_pages)
		return false;
1695
	if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_ANON))
1696 1697 1698 1699
		return true;
	return false;

}
1700 1701 1702 1703 1704 1705 1706

/*
 * Always updating the nodemask is not very good - even if we have an empty
 * list or the wrong list here, we can start from some node and traverse all
 * nodes based on the zonelist. So update the list loosely once per 10 secs.
 *
 */
1707
static void mem_cgroup_may_update_nodemask(struct mem_cgroup *memcg)
1708 1709
{
	int nid;
1710 1711 1712 1713
	/*
	 * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET
	 * pagein/pageout changes since the last update.
	 */
1714
	if (!atomic_read(&memcg->numainfo_events))
1715
		return;
1716
	if (atomic_inc_return(&memcg->numainfo_updating) > 1)
1717 1718 1719
		return;

	/* make a nodemask where this memcg uses memory from */
1720
	memcg->scan_nodes = node_states[N_MEMORY];
1721

1722
	for_each_node_mask(nid, node_states[N_MEMORY]) {
1723

1724 1725
		if (!test_mem_cgroup_node_reclaimable(memcg, nid, false))
			node_clear(nid, memcg->scan_nodes);
1726
	}
1727

1728 1729
	atomic_set(&memcg->numainfo_events, 0);
	atomic_set(&memcg->numainfo_updating, 0);
1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743
}

/*
 * Selecting a node where we start reclaim from. Because what we need is just
 * reducing usage counter, start from anywhere is O,K. Considering
 * memory reclaim from current node, there are pros. and cons.
 *
 * Freeing memory from current node means freeing memory from a node which
 * we'll use or we've used. So, it may make LRU bad. And if several threads
 * hit limits, it will see a contention on a node. But freeing from remote
 * node means more costs for memory reclaim because of memory latency.
 *
 * Now, we use round-robin. Better algorithm is welcomed.
 */
1744
int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
1745 1746 1747
{
	int node;

1748 1749
	mem_cgroup_may_update_nodemask(memcg);
	node = memcg->last_scanned_node;
1750

1751
	node = next_node(node, memcg->scan_nodes);
1752
	if (node == MAX_NUMNODES)
1753
		node = first_node(memcg->scan_nodes);
1754 1755 1756 1757 1758 1759 1760 1761 1762
	/*
	 * We call this when we hit limit, not when pages are added to LRU.
	 * No LRU may hold pages because all pages are UNEVICTABLE or
	 * memcg is too small and all pages are not on LRU. In that case,
	 * we use curret node.
	 */
	if (unlikely(node == MAX_NUMNODES))
		node = numa_node_id();

1763
	memcg->last_scanned_node = node;
1764 1765 1766 1767
	return node;
}

#else
1768
int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
1769 1770 1771
{
	return 0;
}
1772

1773 1774
#endif

1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793
/*
 * A group is eligible for the soft limit reclaim if it is
 * 	a) is over its soft limit
 * 	b) any parent up the hierarchy is over its soft limit
 */
bool mem_cgroup_soft_reclaim_eligible(struct mem_cgroup *memcg)
{
	struct mem_cgroup *parent = memcg;

	if (res_counter_soft_limit_excess(&memcg->res))
		return true;

	/*
	 * If any parent up the hierarchy is over its soft limit then we
	 * have to obey and reclaim from this group as well.
	 */
	while((parent = parent_mem_cgroup(parent))) {
		if (res_counter_soft_limit_excess(&parent->res))
			return true;
1794
	}
1795 1796

	return false;
1797 1798
}

K
KAMEZAWA Hiroyuki 已提交
1799 1800 1801
/*
 * Check OOM-Killer is already running under our hierarchy.
 * If someone is running, return false.
1802
 * Has to be called with memcg_oom_lock
K
KAMEZAWA Hiroyuki 已提交
1803
 */
1804
static bool mem_cgroup_oom_lock(struct mem_cgroup *memcg)
K
KAMEZAWA Hiroyuki 已提交
1805
{
1806
	struct mem_cgroup *iter, *failed = NULL;
1807

1808
	for_each_mem_cgroup_tree(iter, memcg) {
1809
		if (iter->oom_lock) {
1810 1811 1812 1813 1814
			/*
			 * this subtree of our hierarchy is already locked
			 * so we cannot give a lock.
			 */
			failed = iter;
1815 1816
			mem_cgroup_iter_break(memcg, iter);
			break;
1817 1818
		} else
			iter->oom_lock = true;
K
KAMEZAWA Hiroyuki 已提交
1819
	}
K
KAMEZAWA Hiroyuki 已提交
1820

1821
	if (!failed)
1822
		return true;
1823 1824 1825 1826 1827

	/*
	 * OK, we failed to lock the whole subtree so we have to clean up
	 * what we set up to the failing subtree
	 */
1828
	for_each_mem_cgroup_tree(iter, memcg) {
1829
		if (iter == failed) {
1830 1831
			mem_cgroup_iter_break(memcg, iter);
			break;
1832 1833 1834
		}
		iter->oom_lock = false;
	}
1835
	return false;
1836
}
1837

1838
/*
1839
 * Has to be called with memcg_oom_lock
1840
 */
1841
static int mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
1842
{
K
KAMEZAWA Hiroyuki 已提交
1843 1844
	struct mem_cgroup *iter;

1845
	for_each_mem_cgroup_tree(iter, memcg)
1846 1847 1848 1849
		iter->oom_lock = false;
	return 0;
}

1850
static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
1851 1852 1853
{
	struct mem_cgroup *iter;

1854
	for_each_mem_cgroup_tree(iter, memcg)
1855 1856 1857
		atomic_inc(&iter->under_oom);
}

1858
static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
1859 1860 1861
{
	struct mem_cgroup *iter;

K
KAMEZAWA Hiroyuki 已提交
1862 1863 1864 1865 1866
	/*
	 * When a new child is created while the hierarchy is under oom,
	 * mem_cgroup_oom_lock() may not be called. We have to use
	 * atomic_add_unless() here.
	 */
1867
	for_each_mem_cgroup_tree(iter, memcg)
1868
		atomic_add_unless(&iter->under_oom, -1, 0);
1869 1870
}

1871
static DEFINE_SPINLOCK(memcg_oom_lock);
K
KAMEZAWA Hiroyuki 已提交
1872 1873
static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);

K
KAMEZAWA Hiroyuki 已提交
1874
struct oom_wait_info {
1875
	struct mem_cgroup *memcg;
K
KAMEZAWA Hiroyuki 已提交
1876 1877 1878 1879 1880 1881
	wait_queue_t	wait;
};

static int memcg_oom_wake_function(wait_queue_t *wait,
	unsigned mode, int sync, void *arg)
{
1882 1883
	struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
	struct mem_cgroup *oom_wait_memcg;
K
KAMEZAWA Hiroyuki 已提交
1884 1885 1886
	struct oom_wait_info *oom_wait_info;

	oom_wait_info = container_of(wait, struct oom_wait_info, wait);
1887
	oom_wait_memcg = oom_wait_info->memcg;
K
KAMEZAWA Hiroyuki 已提交
1888 1889

	/*
1890
	 * Both of oom_wait_info->memcg and wake_memcg are stable under us.
K
KAMEZAWA Hiroyuki 已提交
1891 1892
	 * Then we can use css_is_ancestor without taking care of RCU.
	 */
1893 1894
	if (!mem_cgroup_same_or_subtree(oom_wait_memcg, wake_memcg)
		&& !mem_cgroup_same_or_subtree(wake_memcg, oom_wait_memcg))
K
KAMEZAWA Hiroyuki 已提交
1895 1896 1897 1898
		return 0;
	return autoremove_wake_function(wait, mode, sync, arg);
}

1899
static void memcg_wakeup_oom(struct mem_cgroup *memcg)
K
KAMEZAWA Hiroyuki 已提交
1900
{
1901 1902
	/* for filtering, pass "memcg" as argument. */
	__wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
K
KAMEZAWA Hiroyuki 已提交
1903 1904
}

1905
static void memcg_oom_recover(struct mem_cgroup *memcg)
1906
{
1907 1908
	if (memcg && atomic_read(&memcg->under_oom))
		memcg_wakeup_oom(memcg);
1909 1910
}

K
KAMEZAWA Hiroyuki 已提交
1911 1912 1913
/*
 * try to call OOM killer. returns false if we should exit memory-reclaim loop.
 */
1914 1915
static bool mem_cgroup_handle_oom(struct mem_cgroup *memcg, gfp_t mask,
				  int order)
1916
{
K
KAMEZAWA Hiroyuki 已提交
1917
	struct oom_wait_info owait;
1918
	bool locked, need_to_kill;
K
KAMEZAWA Hiroyuki 已提交
1919

1920
	owait.memcg = memcg;
K
KAMEZAWA Hiroyuki 已提交
1921 1922 1923 1924
	owait.wait.flags = 0;
	owait.wait.func = memcg_oom_wake_function;
	owait.wait.private = current;
	INIT_LIST_HEAD(&owait.wait.task_list);
1925
	need_to_kill = true;
1926
	mem_cgroup_mark_under_oom(memcg);
1927

1928
	/* At first, try to OOM lock hierarchy under memcg.*/
1929
	spin_lock(&memcg_oom_lock);
1930
	locked = mem_cgroup_oom_lock(memcg);
K
KAMEZAWA Hiroyuki 已提交
1931 1932 1933 1934 1935
	/*
	 * Even if signal_pending(), we can't quit charge() loop without
	 * accounting. So, UNINTERRUPTIBLE is appropriate. But SIGKILL
	 * under OOM is always welcomed, use TASK_KILLABLE here.
	 */
1936
	prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
1937
	if (!locked || memcg->oom_kill_disable)
1938 1939
		need_to_kill = false;
	if (locked)
1940
		mem_cgroup_oom_notify(memcg);
1941
	spin_unlock(&memcg_oom_lock);
K
KAMEZAWA Hiroyuki 已提交
1942

1943 1944
	if (need_to_kill) {
		finish_wait(&memcg_oom_waitq, &owait.wait);
1945
		mem_cgroup_out_of_memory(memcg, mask, order);
1946
	} else {
K
KAMEZAWA Hiroyuki 已提交
1947
		schedule();
K
KAMEZAWA Hiroyuki 已提交
1948
		finish_wait(&memcg_oom_waitq, &owait.wait);
K
KAMEZAWA Hiroyuki 已提交
1949
	}
1950
	spin_lock(&memcg_oom_lock);
1951
	if (locked)
1952 1953
		mem_cgroup_oom_unlock(memcg);
	memcg_wakeup_oom(memcg);
1954
	spin_unlock(&memcg_oom_lock);
K
KAMEZAWA Hiroyuki 已提交
1955

1956
	mem_cgroup_unmark_under_oom(memcg);
1957

K
KAMEZAWA Hiroyuki 已提交
1958 1959 1960
	if (test_thread_flag(TIF_MEMDIE) || fatal_signal_pending(current))
		return false;
	/* Give chance to dying process */
1961
	schedule_timeout_uninterruptible(1);
K
KAMEZAWA Hiroyuki 已提交
1962
	return true;
1963 1964
}

1965 1966 1967
/*
 * Currently used to update mapped file statistics, but the routine can be
 * generalized to update other statistics as well.
1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984
 *
 * Notes: Race condition
 *
 * We usually use page_cgroup_lock() for accessing page_cgroup member but
 * it tends to be costly. But considering some conditions, we doesn't need
 * to do so _always_.
 *
 * Considering "charge", lock_page_cgroup() is not required because all
 * file-stat operations happen after a page is attached to radix-tree. There
 * are no race with "charge".
 *
 * Considering "uncharge", we know that memcg doesn't clear pc->mem_cgroup
 * at "uncharge" intentionally. So, we always see valid pc->mem_cgroup even
 * if there are race with "uncharge". Statistics itself is properly handled
 * by flags.
 *
 * Considering "move", this is an only case we see a race. To make the race
1985 1986
 * small, we check mm->moving_account and detect there are possibility of race
 * If there is, we take a lock.
1987
 */
1988

1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001
void __mem_cgroup_begin_update_page_stat(struct page *page,
				bool *locked, unsigned long *flags)
{
	struct mem_cgroup *memcg;
	struct page_cgroup *pc;

	pc = lookup_page_cgroup(page);
again:
	memcg = pc->mem_cgroup;
	if (unlikely(!memcg || !PageCgroupUsed(pc)))
		return;
	/*
	 * If this memory cgroup is not under account moving, we don't
2002
	 * need to take move_lock_mem_cgroup(). Because we already hold
2003
	 * rcu_read_lock(), any calls to move_account will be delayed until
2004
	 * rcu_read_unlock() if mem_cgroup_stolen() == true.
2005
	 */
2006
	if (!mem_cgroup_stolen(memcg))
2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023
		return;

	move_lock_mem_cgroup(memcg, flags);
	if (memcg != pc->mem_cgroup || !PageCgroupUsed(pc)) {
		move_unlock_mem_cgroup(memcg, flags);
		goto again;
	}
	*locked = true;
}

void __mem_cgroup_end_update_page_stat(struct page *page, unsigned long *flags)
{
	struct page_cgroup *pc = lookup_page_cgroup(page);

	/*
	 * It's guaranteed that pc->mem_cgroup never changes while
	 * lock is held because a routine modifies pc->mem_cgroup
2024
	 * should take move_lock_mem_cgroup().
2025 2026 2027 2028
	 */
	move_unlock_mem_cgroup(pc->mem_cgroup, flags);
}

2029 2030
void mem_cgroup_update_page_stat(struct page *page,
				 enum mem_cgroup_page_stat_item idx, int val)
2031
{
2032
	struct mem_cgroup *memcg;
2033
	struct page_cgroup *pc = lookup_page_cgroup(page);
2034
	unsigned long uninitialized_var(flags);
2035

2036
	if (mem_cgroup_disabled())
2037
		return;
2038

2039 2040
	memcg = pc->mem_cgroup;
	if (unlikely(!memcg || !PageCgroupUsed(pc)))
2041
		return;
2042 2043

	switch (idx) {
2044 2045
	case MEMCG_NR_FILE_MAPPED:
		idx = MEM_CGROUP_STAT_FILE_MAPPED;
2046 2047 2048
		break;
	default:
		BUG();
2049
	}
2050

2051
	this_cpu_add(memcg->stat->count[idx], val);
2052
}
2053

2054 2055 2056 2057
/*
 * size of first charge trial. "32" comes from vmscan.c's magic value.
 * TODO: maybe necessary to use big numbers in big irons.
 */
2058
#define CHARGE_BATCH	32U
2059 2060
struct memcg_stock_pcp {
	struct mem_cgroup *cached; /* this never be root cgroup */
2061
	unsigned int nr_pages;
2062
	struct work_struct work;
2063
	unsigned long flags;
2064
#define FLUSHING_CACHED_CHARGE	0
2065 2066
};
static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
2067
static DEFINE_MUTEX(percpu_charge_mutex);
2068

2069 2070 2071 2072 2073 2074 2075 2076 2077 2078
/**
 * consume_stock: Try to consume stocked charge on this cpu.
 * @memcg: memcg to consume from.
 * @nr_pages: how many pages to charge.
 *
 * The charges will only happen if @memcg matches the current cpu's memcg
 * stock, and at least @nr_pages are available in that stock.  Failure to
 * service an allocation will refill the stock.
 *
 * returns true if successful, false otherwise.
2079
 */
2080
static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2081 2082 2083 2084
{
	struct memcg_stock_pcp *stock;
	bool ret = true;

2085 2086 2087
	if (nr_pages > CHARGE_BATCH)
		return false;

2088
	stock = &get_cpu_var(memcg_stock);
2089 2090
	if (memcg == stock->cached && stock->nr_pages >= nr_pages)
		stock->nr_pages -= nr_pages;
2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103
	else /* need to call res_counter_charge */
		ret = false;
	put_cpu_var(memcg_stock);
	return ret;
}

/*
 * Returns stocks cached in percpu to res_counter and reset cached information.
 */
static void drain_stock(struct memcg_stock_pcp *stock)
{
	struct mem_cgroup *old = stock->cached;

2104 2105 2106 2107
	if (stock->nr_pages) {
		unsigned long bytes = stock->nr_pages * PAGE_SIZE;

		res_counter_uncharge(&old->res, bytes);
2108
		if (do_swap_account)
2109 2110
			res_counter_uncharge(&old->memsw, bytes);
		stock->nr_pages = 0;
2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122
	}
	stock->cached = NULL;
}

/*
 * This must be called under preempt disabled or must be called by
 * a thread which is pinned to local cpu.
 */
static void drain_local_stock(struct work_struct *dummy)
{
	struct memcg_stock_pcp *stock = &__get_cpu_var(memcg_stock);
	drain_stock(stock);
2123
	clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
2124 2125
}

2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136
static void __init memcg_stock_init(void)
{
	int cpu;

	for_each_possible_cpu(cpu) {
		struct memcg_stock_pcp *stock =
					&per_cpu(memcg_stock, cpu);
		INIT_WORK(&stock->work, drain_local_stock);
	}
}

2137 2138
/*
 * Cache charges(val) which is from res_counter, to local per_cpu area.
2139
 * This will be consumed by consume_stock() function, later.
2140
 */
2141
static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2142 2143 2144
{
	struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock);

2145
	if (stock->cached != memcg) { /* reset if necessary */
2146
		drain_stock(stock);
2147
		stock->cached = memcg;
2148
	}
2149
	stock->nr_pages += nr_pages;
2150 2151 2152 2153
	put_cpu_var(memcg_stock);
}

/*
2154
 * Drains all per-CPU charge caches for given root_memcg resp. subtree
2155 2156
 * of the hierarchy under it. sync flag says whether we should block
 * until the work is done.
2157
 */
2158
static void drain_all_stock(struct mem_cgroup *root_memcg, bool sync)
2159
{
2160
	int cpu, curcpu;
2161

2162 2163
	/* Notify other cpus that system-wide "drain" is running */
	get_online_cpus();
2164
	curcpu = get_cpu();
2165 2166
	for_each_online_cpu(cpu) {
		struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
2167
		struct mem_cgroup *memcg;
2168

2169 2170
		memcg = stock->cached;
		if (!memcg || !stock->nr_pages)
2171
			continue;
2172
		if (!mem_cgroup_same_or_subtree(root_memcg, memcg))
2173
			continue;
2174 2175 2176 2177 2178 2179
		if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
			if (cpu == curcpu)
				drain_local_stock(&stock->work);
			else
				schedule_work_on(cpu, &stock->work);
		}
2180
	}
2181
	put_cpu();
2182 2183 2184 2185 2186 2187

	if (!sync)
		goto out;

	for_each_online_cpu(cpu) {
		struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
2188
		if (test_bit(FLUSHING_CACHED_CHARGE, &stock->flags))
2189 2190 2191
			flush_work(&stock->work);
	}
out:
2192
 	put_online_cpus();
2193 2194 2195 2196 2197 2198 2199 2200
}

/*
 * Tries to drain stocked charges in other cpus. This function is asynchronous
 * and just put a work per cpu for draining localy on each cpu. Caller can
 * expects some charges will be back to res_counter later but cannot wait for
 * it.
 */
2201
static void drain_all_stock_async(struct mem_cgroup *root_memcg)
2202
{
2203 2204 2205 2206 2207
	/*
	 * If someone calls draining, avoid adding more kworker runs.
	 */
	if (!mutex_trylock(&percpu_charge_mutex))
		return;
2208
	drain_all_stock(root_memcg, false);
2209
	mutex_unlock(&percpu_charge_mutex);
2210 2211 2212
}

/* This is a synchronous drain interface. */
2213
static void drain_all_stock_sync(struct mem_cgroup *root_memcg)
2214 2215
{
	/* called when force_empty is called */
2216
	mutex_lock(&percpu_charge_mutex);
2217
	drain_all_stock(root_memcg, true);
2218
	mutex_unlock(&percpu_charge_mutex);
2219 2220
}

2221 2222 2223 2224
/*
 * This function drains percpu counter value from DEAD cpu and
 * move it to local cpu. Note that this function can be preempted.
 */
2225
static void mem_cgroup_drain_pcp_counter(struct mem_cgroup *memcg, int cpu)
2226 2227 2228
{
	int i;

2229
	spin_lock(&memcg->pcp_counter_lock);
2230
	for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
2231
		long x = per_cpu(memcg->stat->count[i], cpu);
2232

2233 2234
		per_cpu(memcg->stat->count[i], cpu) = 0;
		memcg->nocpu_base.count[i] += x;
2235
	}
2236
	for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
2237
		unsigned long x = per_cpu(memcg->stat->events[i], cpu);
2238

2239 2240
		per_cpu(memcg->stat->events[i], cpu) = 0;
		memcg->nocpu_base.events[i] += x;
2241
	}
2242
	spin_unlock(&memcg->pcp_counter_lock);
2243 2244
}

2245
static int memcg_cpu_hotplug_callback(struct notifier_block *nb,
2246 2247 2248 2249 2250
					unsigned long action,
					void *hcpu)
{
	int cpu = (unsigned long)hcpu;
	struct memcg_stock_pcp *stock;
2251
	struct mem_cgroup *iter;
2252

2253
	if (action == CPU_ONLINE)
2254 2255
		return NOTIFY_OK;

2256
	if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
2257
		return NOTIFY_OK;
2258

2259
	for_each_mem_cgroup(iter)
2260 2261
		mem_cgroup_drain_pcp_counter(iter, cpu);

2262 2263 2264 2265 2266
	stock = &per_cpu(memcg_stock, cpu);
	drain_stock(stock);
	return NOTIFY_OK;
}

2267 2268 2269 2270 2271 2272 2273 2274 2275 2276

/* See __mem_cgroup_try_charge() for details */
enum {
	CHARGE_OK,		/* success */
	CHARGE_RETRY,		/* need to retry but retry is not bad */
	CHARGE_NOMEM,		/* we can't do more. return -ENOMEM */
	CHARGE_WOULDBLOCK,	/* GFP_WAIT wasn't set and no enough res. */
	CHARGE_OOM_DIE,		/* the current is killed because of OOM */
};

2277
static int mem_cgroup_do_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
2278 2279
				unsigned int nr_pages, unsigned int min_pages,
				bool oom_check)
2280
{
2281
	unsigned long csize = nr_pages * PAGE_SIZE;
2282 2283 2284 2285 2286
	struct mem_cgroup *mem_over_limit;
	struct res_counter *fail_res;
	unsigned long flags = 0;
	int ret;

2287
	ret = res_counter_charge(&memcg->res, csize, &fail_res);
2288 2289 2290 2291

	if (likely(!ret)) {
		if (!do_swap_account)
			return CHARGE_OK;
2292
		ret = res_counter_charge(&memcg->memsw, csize, &fail_res);
2293 2294 2295
		if (likely(!ret))
			return CHARGE_OK;

2296
		res_counter_uncharge(&memcg->res, csize);
2297 2298 2299 2300
		mem_over_limit = mem_cgroup_from_res_counter(fail_res, memsw);
		flags |= MEM_CGROUP_RECLAIM_NOSWAP;
	} else
		mem_over_limit = mem_cgroup_from_res_counter(fail_res, res);
2301 2302 2303 2304
	/*
	 * Never reclaim on behalf of optional batching, retry with a
	 * single page instead.
	 */
2305
	if (nr_pages > min_pages)
2306 2307 2308 2309 2310
		return CHARGE_RETRY;

	if (!(gfp_mask & __GFP_WAIT))
		return CHARGE_WOULDBLOCK;

2311 2312 2313
	if (gfp_mask & __GFP_NORETRY)
		return CHARGE_NOMEM;

2314
	ret = mem_cgroup_reclaim(mem_over_limit, gfp_mask, flags);
2315
	if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
2316
		return CHARGE_RETRY;
2317
	/*
2318 2319 2320 2321 2322 2323 2324
	 * Even though the limit is exceeded at this point, reclaim
	 * may have been able to free some pages.  Retry the charge
	 * before killing the task.
	 *
	 * Only for regular pages, though: huge pages are rather
	 * unlikely to succeed so close to the limit, and we fall back
	 * to regular pages anyway in case of failure.
2325
	 */
2326
	if (nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER) && ret)
2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339
		return CHARGE_RETRY;

	/*
	 * At task move, charge accounts can be doubly counted. So, it's
	 * better to wait until the end of task_move if something is going on.
	 */
	if (mem_cgroup_wait_acct_move(mem_over_limit))
		return CHARGE_RETRY;

	/* If we don't need to call oom-killer at el, return immediately */
	if (!oom_check)
		return CHARGE_NOMEM;
	/* check OOM */
2340
	if (!mem_cgroup_handle_oom(mem_over_limit, gfp_mask, get_order(csize)))
2341 2342 2343 2344 2345
		return CHARGE_OOM_DIE;

	return CHARGE_RETRY;
}

2346
/*
2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365
 * __mem_cgroup_try_charge() does
 * 1. detect memcg to be charged against from passed *mm and *ptr,
 * 2. update res_counter
 * 3. call memory reclaim if necessary.
 *
 * In some special case, if the task is fatal, fatal_signal_pending() or
 * has TIF_MEMDIE, this function returns -EINTR while writing root_mem_cgroup
 * to *ptr. There are two reasons for this. 1: fatal threads should quit as soon
 * as possible without any hazards. 2: all pages should have a valid
 * pc->mem_cgroup. If mm is NULL and the caller doesn't pass a valid memcg
 * pointer, that is treated as a charge to root_mem_cgroup.
 *
 * So __mem_cgroup_try_charge() will return
 *  0       ...  on success, filling *ptr with a valid memcg pointer.
 *  -ENOMEM ...  charge failure because of resource limits.
 *  -EINTR  ...  if thread is fatal. *ptr is filled with root_mem_cgroup.
 *
 * Unlike the exported interface, an "oom" parameter is added. if oom==true,
 * the oom-killer can be invoked.
2366
 */
2367
static int __mem_cgroup_try_charge(struct mm_struct *mm,
A
Andrea Arcangeli 已提交
2368
				   gfp_t gfp_mask,
2369
				   unsigned int nr_pages,
2370
				   struct mem_cgroup **ptr,
2371
				   bool oom)
2372
{
2373
	unsigned int batch = max(CHARGE_BATCH, nr_pages);
2374
	int nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES;
2375
	struct mem_cgroup *memcg = NULL;
2376
	int ret;
2377

K
KAMEZAWA Hiroyuki 已提交
2378 2379 2380 2381 2382 2383 2384 2385
	/*
	 * Unlike gloval-vm's OOM-kill, we're not in memory shortage
	 * in system level. So, allow to go ahead dying process in addition to
	 * MEMDIE process.
	 */
	if (unlikely(test_thread_flag(TIF_MEMDIE)
		     || fatal_signal_pending(current)))
		goto bypass;
2386

2387
	/*
2388 2389
	 * We always charge the cgroup the mm_struct belongs to.
	 * The mm_struct's mem_cgroup changes on task migration if the
2390
	 * thread group leader migrates. It's possible that mm is not
2391
	 * set, if so charge the root memcg (happens for pagecache usage).
2392
	 */
2393
	if (!*ptr && !mm)
2394
		*ptr = root_mem_cgroup;
K
KAMEZAWA Hiroyuki 已提交
2395
again:
2396 2397 2398
	if (*ptr) { /* css should be a valid one */
		memcg = *ptr;
		if (mem_cgroup_is_root(memcg))
K
KAMEZAWA Hiroyuki 已提交
2399
			goto done;
2400
		if (consume_stock(memcg, nr_pages))
K
KAMEZAWA Hiroyuki 已提交
2401
			goto done;
2402
		css_get(&memcg->css);
2403
	} else {
K
KAMEZAWA Hiroyuki 已提交
2404
		struct task_struct *p;
2405

K
KAMEZAWA Hiroyuki 已提交
2406 2407 2408
		rcu_read_lock();
		p = rcu_dereference(mm->owner);
		/*
2409
		 * Because we don't have task_lock(), "p" can exit.
2410
		 * In that case, "memcg" can point to root or p can be NULL with
2411 2412 2413 2414 2415 2416
		 * race with swapoff. Then, we have small risk of mis-accouning.
		 * But such kind of mis-account by race always happens because
		 * we don't have cgroup_mutex(). It's overkill and we allo that
		 * small race, here.
		 * (*) swapoff at el will charge against mm-struct not against
		 * task-struct. So, mm->owner can be NULL.
K
KAMEZAWA Hiroyuki 已提交
2417
		 */
2418
		memcg = mem_cgroup_from_task(p);
2419 2420 2421
		if (!memcg)
			memcg = root_mem_cgroup;
		if (mem_cgroup_is_root(memcg)) {
K
KAMEZAWA Hiroyuki 已提交
2422 2423 2424
			rcu_read_unlock();
			goto done;
		}
2425
		if (consume_stock(memcg, nr_pages)) {
K
KAMEZAWA Hiroyuki 已提交
2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437
			/*
			 * It seems dagerous to access memcg without css_get().
			 * But considering how consume_stok works, it's not
			 * necessary. If consume_stock success, some charges
			 * from this memcg are cached on this cpu. So, we
			 * don't need to call css_get()/css_tryget() before
			 * calling consume_stock().
			 */
			rcu_read_unlock();
			goto done;
		}
		/* after here, we may be blocked. we need to get refcnt */
2438
		if (!css_tryget(&memcg->css)) {
K
KAMEZAWA Hiroyuki 已提交
2439 2440 2441 2442 2443
			rcu_read_unlock();
			goto again;
		}
		rcu_read_unlock();
	}
2444

2445 2446
	do {
		bool oom_check;
2447

2448
		/* If killed, bypass charge */
K
KAMEZAWA Hiroyuki 已提交
2449
		if (fatal_signal_pending(current)) {
2450
			css_put(&memcg->css);
2451
			goto bypass;
K
KAMEZAWA Hiroyuki 已提交
2452
		}
2453

2454 2455 2456 2457
		oom_check = false;
		if (oom && !nr_oom_retries) {
			oom_check = true;
			nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES;
2458
		}
2459

2460 2461
		ret = mem_cgroup_do_charge(memcg, gfp_mask, batch, nr_pages,
		    oom_check);
2462 2463 2464 2465
		switch (ret) {
		case CHARGE_OK:
			break;
		case CHARGE_RETRY: /* not in OOM situation but retry */
2466
			batch = nr_pages;
2467 2468
			css_put(&memcg->css);
			memcg = NULL;
K
KAMEZAWA Hiroyuki 已提交
2469
			goto again;
2470
		case CHARGE_WOULDBLOCK: /* !__GFP_WAIT */
2471
			css_put(&memcg->css);
2472 2473
			goto nomem;
		case CHARGE_NOMEM: /* OOM routine works */
K
KAMEZAWA Hiroyuki 已提交
2474
			if (!oom) {
2475
				css_put(&memcg->css);
K
KAMEZAWA Hiroyuki 已提交
2476
				goto nomem;
K
KAMEZAWA Hiroyuki 已提交
2477
			}
2478 2479 2480 2481
			/* If oom, we never return -ENOMEM */
			nr_oom_retries--;
			break;
		case CHARGE_OOM_DIE: /* Killed by OOM Killer */
2482
			css_put(&memcg->css);
K
KAMEZAWA Hiroyuki 已提交
2483
			goto bypass;
2484
		}
2485 2486
	} while (ret != CHARGE_OK);

2487
	if (batch > nr_pages)
2488 2489
		refill_stock(memcg, batch - nr_pages);
	css_put(&memcg->css);
2490
done:
2491
	*ptr = memcg;
2492 2493
	return 0;
nomem:
2494
	*ptr = NULL;
2495
	return -ENOMEM;
K
KAMEZAWA Hiroyuki 已提交
2496
bypass:
2497 2498
	*ptr = root_mem_cgroup;
	return -EINTR;
2499
}
2500

2501 2502 2503 2504 2505
/*
 * Somemtimes we have to undo a charge we got by try_charge().
 * This function is for that and do uncharge, put css's refcnt.
 * gotten by try_charge().
 */
2506
static void __mem_cgroup_cancel_charge(struct mem_cgroup *memcg,
2507
				       unsigned int nr_pages)
2508
{
2509
	if (!mem_cgroup_is_root(memcg)) {
2510 2511
		unsigned long bytes = nr_pages * PAGE_SIZE;

2512
		res_counter_uncharge(&memcg->res, bytes);
2513
		if (do_swap_account)
2514
			res_counter_uncharge(&memcg->memsw, bytes);
2515
	}
2516 2517
}

2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535
/*
 * Cancel chrages in this cgroup....doesn't propagate to parent cgroup.
 * This is useful when moving usage to parent cgroup.
 */
static void __mem_cgroup_cancel_local_charge(struct mem_cgroup *memcg,
					unsigned int nr_pages)
{
	unsigned long bytes = nr_pages * PAGE_SIZE;

	if (mem_cgroup_is_root(memcg))
		return;

	res_counter_uncharge_until(&memcg->res, memcg->res.parent, bytes);
	if (do_swap_account)
		res_counter_uncharge_until(&memcg->memsw,
						memcg->memsw.parent, bytes);
}

2536 2537
/*
 * A helper function to get mem_cgroup from ID. must be called under
T
Tejun Heo 已提交
2538 2539 2540
 * rcu_read_lock().  The caller is responsible for calling css_tryget if
 * the mem_cgroup is used for charging. (dropping refcnt from swap can be
 * called against removed memcg.)
2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551
 */
static struct mem_cgroup *mem_cgroup_lookup(unsigned short id)
{
	struct cgroup_subsys_state *css;

	/* ID 0 is unused ID */
	if (!id)
		return NULL;
	css = css_lookup(&mem_cgroup_subsys, id);
	if (!css)
		return NULL;
2552
	return mem_cgroup_from_css(css);
2553 2554
}

2555
struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
2556
{
2557
	struct mem_cgroup *memcg = NULL;
2558
	struct page_cgroup *pc;
2559
	unsigned short id;
2560 2561
	swp_entry_t ent;

2562 2563 2564
	VM_BUG_ON(!PageLocked(page));

	pc = lookup_page_cgroup(page);
2565
	lock_page_cgroup(pc);
2566
	if (PageCgroupUsed(pc)) {
2567 2568 2569
		memcg = pc->mem_cgroup;
		if (memcg && !css_tryget(&memcg->css))
			memcg = NULL;
2570
	} else if (PageSwapCache(page)) {
2571
		ent.val = page_private(page);
2572
		id = lookup_swap_cgroup_id(ent);
2573
		rcu_read_lock();
2574 2575 2576
		memcg = mem_cgroup_lookup(id);
		if (memcg && !css_tryget(&memcg->css))
			memcg = NULL;
2577
		rcu_read_unlock();
2578
	}
2579
	unlock_page_cgroup(pc);
2580
	return memcg;
2581 2582
}

2583
static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
2584
				       struct page *page,
2585
				       unsigned int nr_pages,
2586 2587
				       enum charge_type ctype,
				       bool lrucare)
2588
{
2589
	struct page_cgroup *pc = lookup_page_cgroup(page);
2590
	struct zone *uninitialized_var(zone);
2591
	struct lruvec *lruvec;
2592
	bool was_on_lru = false;
2593
	bool anon;
2594

2595
	lock_page_cgroup(pc);
2596
	VM_BUG_ON(PageCgroupUsed(pc));
2597 2598 2599 2600
	/*
	 * we don't need page_cgroup_lock about tail pages, becase they are not
	 * accessed by any other context at this point.
	 */
2601 2602 2603 2604 2605 2606 2607 2608 2609

	/*
	 * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page
	 * may already be on some other mem_cgroup's LRU.  Take care of it.
	 */
	if (lrucare) {
		zone = page_zone(page);
		spin_lock_irq(&zone->lru_lock);
		if (PageLRU(page)) {
2610
			lruvec = mem_cgroup_zone_lruvec(zone, pc->mem_cgroup);
2611
			ClearPageLRU(page);
2612
			del_page_from_lru_list(page, lruvec, page_lru(page));
2613 2614 2615 2616
			was_on_lru = true;
		}
	}

2617
	pc->mem_cgroup = memcg;
2618 2619 2620 2621 2622 2623 2624
	/*
	 * We access a page_cgroup asynchronously without lock_page_cgroup().
	 * Especially when a page_cgroup is taken from a page, pc->mem_cgroup
	 * is accessed after testing USED bit. To make pc->mem_cgroup visible
	 * before USED bit, we need memory barrier here.
	 * See mem_cgroup_add_lru_list(), etc.
 	 */
K
KAMEZAWA Hiroyuki 已提交
2625
	smp_wmb();
2626
	SetPageCgroupUsed(pc);
2627

2628 2629
	if (lrucare) {
		if (was_on_lru) {
2630
			lruvec = mem_cgroup_zone_lruvec(zone, pc->mem_cgroup);
2631 2632
			VM_BUG_ON(PageLRU(page));
			SetPageLRU(page);
2633
			add_page_to_lru_list(page, lruvec, page_lru(page));
2634 2635 2636 2637
		}
		spin_unlock_irq(&zone->lru_lock);
	}

2638
	if (ctype == MEM_CGROUP_CHARGE_TYPE_ANON)
2639 2640 2641 2642
		anon = true;
	else
		anon = false;

2643
	mem_cgroup_charge_statistics(memcg, page, anon, nr_pages);
2644
	unlock_page_cgroup(pc);
2645

2646
	/*
2647
	 * "charge_statistics" updated event counter.
2648
	 */
2649
	memcg_check_events(memcg, page);
2650
}
2651

2652 2653
static DEFINE_MUTEX(set_limit_mutex);

2654 2655 2656 2657 2658 2659 2660
#ifdef CONFIG_MEMCG_KMEM
static inline bool memcg_can_account_kmem(struct mem_cgroup *memcg)
{
	return !mem_cgroup_disabled() && !mem_cgroup_is_root(memcg) &&
		(memcg->kmem_account_flags & KMEM_ACCOUNTED_MASK);
}

G
Glauber Costa 已提交
2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673
/*
 * This is a bit cumbersome, but it is rarely used and avoids a backpointer
 * in the memcg_cache_params struct.
 */
static struct kmem_cache *memcg_params_to_cache(struct memcg_cache_params *p)
{
	struct kmem_cache *cachep;

	VM_BUG_ON(p->is_root_cache);
	cachep = p->root_cache;
	return cachep->memcg_params->memcg_caches[memcg_cache_id(p->memcg)];
}

2674
#ifdef CONFIG_SLABINFO
2675 2676
static int mem_cgroup_slabinfo_read(struct cgroup_subsys_state *css,
				    struct cftype *cft, struct seq_file *m)
2677
{
2678
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694
	struct memcg_cache_params *params;

	if (!memcg_can_account_kmem(memcg))
		return -EIO;

	print_slabinfo_header(m);

	mutex_lock(&memcg->slab_caches_mutex);
	list_for_each_entry(params, &memcg->memcg_slab_caches, list)
		cache_show(memcg_params_to_cache(params), m);
	mutex_unlock(&memcg->slab_caches_mutex);

	return 0;
}
#endif

2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747
static int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, u64 size)
{
	struct res_counter *fail_res;
	struct mem_cgroup *_memcg;
	int ret = 0;
	bool may_oom;

	ret = res_counter_charge(&memcg->kmem, size, &fail_res);
	if (ret)
		return ret;

	/*
	 * Conditions under which we can wait for the oom_killer. Those are
	 * the same conditions tested by the core page allocator
	 */
	may_oom = (gfp & __GFP_FS) && !(gfp & __GFP_NORETRY);

	_memcg = memcg;
	ret = __mem_cgroup_try_charge(NULL, gfp, size >> PAGE_SHIFT,
				      &_memcg, may_oom);

	if (ret == -EINTR)  {
		/*
		 * __mem_cgroup_try_charge() chosed to bypass to root due to
		 * OOM kill or fatal signal.  Since our only options are to
		 * either fail the allocation or charge it to this cgroup, do
		 * it as a temporary condition. But we can't fail. From a
		 * kmem/slab perspective, the cache has already been selected,
		 * by mem_cgroup_kmem_get_cache(), so it is too late to change
		 * our minds.
		 *
		 * This condition will only trigger if the task entered
		 * memcg_charge_kmem in a sane state, but was OOM-killed during
		 * __mem_cgroup_try_charge() above. Tasks that were already
		 * dying when the allocation triggers should have been already
		 * directed to the root cgroup in memcontrol.h
		 */
		res_counter_charge_nofail(&memcg->res, size, &fail_res);
		if (do_swap_account)
			res_counter_charge_nofail(&memcg->memsw, size,
						  &fail_res);
		ret = 0;
	} else if (ret)
		res_counter_uncharge(&memcg->kmem, size);

	return ret;
}

static void memcg_uncharge_kmem(struct mem_cgroup *memcg, u64 size)
{
	res_counter_uncharge(&memcg->res, size);
	if (do_swap_account)
		res_counter_uncharge(&memcg->memsw, size);
2748 2749 2750 2751 2752

	/* Not down to 0 */
	if (res_counter_uncharge(&memcg->kmem, size))
		return;

2753 2754 2755 2756 2757 2758 2759 2760
	/*
	 * Releases a reference taken in kmem_cgroup_css_offline in case
	 * this last uncharge is racing with the offlining code or it is
	 * outliving the memcg existence.
	 *
	 * The memory barrier imposed by test&clear is paired with the
	 * explicit one in memcg_kmem_mark_dead().
	 */
2761
	if (memcg_kmem_test_and_clear_dead(memcg))
2762
		css_put(&memcg->css);
2763 2764
}

2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784
void memcg_cache_list_add(struct mem_cgroup *memcg, struct kmem_cache *cachep)
{
	if (!memcg)
		return;

	mutex_lock(&memcg->slab_caches_mutex);
	list_add(&cachep->memcg_params->list, &memcg->memcg_slab_caches);
	mutex_unlock(&memcg->slab_caches_mutex);
}

/*
 * helper for acessing a memcg's index. It will be used as an index in the
 * child cache array in kmem_cache, and also to derive its name. This function
 * will return -1 when this is not a kmem-limited memcg.
 */
int memcg_cache_id(struct mem_cgroup *memcg)
{
	return memcg ? memcg->kmemcg_id : -1;
}

2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847
/*
 * This ends up being protected by the set_limit mutex, during normal
 * operation, because that is its main call site.
 *
 * But when we create a new cache, we can call this as well if its parent
 * is kmem-limited. That will have to hold set_limit_mutex as well.
 */
int memcg_update_cache_sizes(struct mem_cgroup *memcg)
{
	int num, ret;

	num = ida_simple_get(&kmem_limited_groups,
				0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL);
	if (num < 0)
		return num;
	/*
	 * After this point, kmem_accounted (that we test atomically in
	 * the beginning of this conditional), is no longer 0. This
	 * guarantees only one process will set the following boolean
	 * to true. We don't need test_and_set because we're protected
	 * by the set_limit_mutex anyway.
	 */
	memcg_kmem_set_activated(memcg);

	ret = memcg_update_all_caches(num+1);
	if (ret) {
		ida_simple_remove(&kmem_limited_groups, num);
		memcg_kmem_clear_activated(memcg);
		return ret;
	}

	memcg->kmemcg_id = num;
	INIT_LIST_HEAD(&memcg->memcg_slab_caches);
	mutex_init(&memcg->slab_caches_mutex);
	return 0;
}

static size_t memcg_caches_array_size(int num_groups)
{
	ssize_t size;
	if (num_groups <= 0)
		return 0;

	size = 2 * num_groups;
	if (size < MEMCG_CACHES_MIN_SIZE)
		size = MEMCG_CACHES_MIN_SIZE;
	else if (size > MEMCG_CACHES_MAX_SIZE)
		size = MEMCG_CACHES_MAX_SIZE;

	return size;
}

/*
 * We should update the current array size iff all caches updates succeed. This
 * can only be done from the slab side. The slab mutex needs to be held when
 * calling this.
 */
void memcg_update_array_size(int num)
{
	if (num > memcg_limited_groups_array_size)
		memcg_limited_groups_array_size = memcg_caches_array_size(num);
}

2848 2849
static void kmem_cache_destroy_work_func(struct work_struct *w);

2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860
int memcg_update_cache_size(struct kmem_cache *s, int num_groups)
{
	struct memcg_cache_params *cur_params = s->memcg_params;

	VM_BUG_ON(s->memcg_params && !s->memcg_params->is_root_cache);

	if (num_groups > memcg_limited_groups_array_size) {
		int i;
		ssize_t size = memcg_caches_array_size(num_groups);

		size *= sizeof(void *);
2861
		size += offsetof(struct memcg_cache_params, memcg_caches);
2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900

		s->memcg_params = kzalloc(size, GFP_KERNEL);
		if (!s->memcg_params) {
			s->memcg_params = cur_params;
			return -ENOMEM;
		}

		s->memcg_params->is_root_cache = true;

		/*
		 * There is the chance it will be bigger than
		 * memcg_limited_groups_array_size, if we failed an allocation
		 * in a cache, in which case all caches updated before it, will
		 * have a bigger array.
		 *
		 * But if that is the case, the data after
		 * memcg_limited_groups_array_size is certainly unused
		 */
		for (i = 0; i < memcg_limited_groups_array_size; i++) {
			if (!cur_params->memcg_caches[i])
				continue;
			s->memcg_params->memcg_caches[i] =
						cur_params->memcg_caches[i];
		}

		/*
		 * Ideally, we would wait until all caches succeed, and only
		 * then free the old one. But this is not worth the extra
		 * pointer per-cache we'd have to have for this.
		 *
		 * It is not a big deal if some caches are left with a size
		 * bigger than the others. And all updates will reset this
		 * anyway.
		 */
		kfree(cur_params);
	}
	return 0;
}

G
Glauber Costa 已提交
2901 2902
int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s,
			 struct kmem_cache *root_cache)
2903
{
2904
	size_t size;
2905 2906 2907 2908

	if (!memcg_kmem_enabled())
		return 0;

2909 2910
	if (!memcg) {
		size = offsetof(struct memcg_cache_params, memcg_caches);
2911
		size += memcg_limited_groups_array_size * sizeof(void *);
2912 2913
	} else
		size = sizeof(struct memcg_cache_params);
2914

2915 2916 2917 2918
	s->memcg_params = kzalloc(size, GFP_KERNEL);
	if (!s->memcg_params)
		return -ENOMEM;

G
Glauber Costa 已提交
2919
	if (memcg) {
2920
		s->memcg_params->memcg = memcg;
G
Glauber Costa 已提交
2921
		s->memcg_params->root_cache = root_cache;
2922 2923
		INIT_WORK(&s->memcg_params->destroy,
				kmem_cache_destroy_work_func);
2924 2925 2926
	} else
		s->memcg_params->is_root_cache = true;

2927 2928 2929 2930 2931
	return 0;
}

void memcg_release_cache(struct kmem_cache *s)
{
2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955
	struct kmem_cache *root;
	struct mem_cgroup *memcg;
	int id;

	/*
	 * This happens, for instance, when a root cache goes away before we
	 * add any memcg.
	 */
	if (!s->memcg_params)
		return;

	if (s->memcg_params->is_root_cache)
		goto out;

	memcg = s->memcg_params->memcg;
	id  = memcg_cache_id(memcg);

	root = s->memcg_params->root_cache;
	root->memcg_params->memcg_caches[id] = NULL;

	mutex_lock(&memcg->slab_caches_mutex);
	list_del(&s->memcg_params->list);
	mutex_unlock(&memcg->slab_caches_mutex);

2956
	css_put(&memcg->css);
2957
out:
2958 2959 2960
	kfree(s->memcg_params);
}

2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991
/*
 * During the creation a new cache, we need to disable our accounting mechanism
 * altogether. This is true even if we are not creating, but rather just
 * enqueing new caches to be created.
 *
 * This is because that process will trigger allocations; some visible, like
 * explicit kmallocs to auxiliary data structures, name strings and internal
 * cache structures; some well concealed, like INIT_WORK() that can allocate
 * objects during debug.
 *
 * If any allocation happens during memcg_kmem_get_cache, we will recurse back
 * to it. This may not be a bounded recursion: since the first cache creation
 * failed to complete (waiting on the allocation), we'll just try to create the
 * cache again, failing at the same point.
 *
 * memcg_kmem_get_cache is prepared to abort after seeing a positive count of
 * memcg_kmem_skip_account. So we enclose anything that might allocate memory
 * inside the following two functions.
 */
static inline void memcg_stop_kmem_account(void)
{
	VM_BUG_ON(!current->mm);
	current->memcg_kmem_skip_account++;
}

static inline void memcg_resume_kmem_account(void)
{
	VM_BUG_ON(!current->mm);
	current->memcg_kmem_skip_account--;
}

G
Glauber Costa 已提交
2992 2993 2994 2995 2996 2997 2998 2999 3000
static void kmem_cache_destroy_work_func(struct work_struct *w)
{
	struct kmem_cache *cachep;
	struct memcg_cache_params *p;

	p = container_of(w, struct memcg_cache_params, destroy);

	cachep = memcg_params_to_cache(p);

G
Glauber Costa 已提交
3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021
	/*
	 * If we get down to 0 after shrink, we could delete right away.
	 * However, memcg_release_pages() already puts us back in the workqueue
	 * in that case. If we proceed deleting, we'll get a dangling
	 * reference, and removing the object from the workqueue in that case
	 * is unnecessary complication. We are not a fast path.
	 *
	 * Note that this case is fundamentally different from racing with
	 * shrink_slab(): if memcg_cgroup_destroy_cache() is called in
	 * kmem_cache_shrink, not only we would be reinserting a dead cache
	 * into the queue, but doing so from inside the worker racing to
	 * destroy it.
	 *
	 * So if we aren't down to zero, we'll just schedule a worker and try
	 * again
	 */
	if (atomic_read(&cachep->memcg_params->nr_pages) != 0) {
		kmem_cache_shrink(cachep);
		if (atomic_read(&cachep->memcg_params->nr_pages) == 0)
			return;
	} else
G
Glauber Costa 已提交
3022 3023 3024 3025 3026 3027 3028 3029
		kmem_cache_destroy(cachep);
}

void mem_cgroup_destroy_cache(struct kmem_cache *cachep)
{
	if (!cachep->memcg_params->dead)
		return;

G
Glauber Costa 已提交
3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049
	/*
	 * There are many ways in which we can get here.
	 *
	 * We can get to a memory-pressure situation while the delayed work is
	 * still pending to run. The vmscan shrinkers can then release all
	 * cache memory and get us to destruction. If this is the case, we'll
	 * be executed twice, which is a bug (the second time will execute over
	 * bogus data). In this case, cancelling the work should be fine.
	 *
	 * But we can also get here from the worker itself, if
	 * kmem_cache_shrink is enough to shake all the remaining objects and
	 * get the page count to 0. In this case, we'll deadlock if we try to
	 * cancel the work (the worker runs with an internal lock held, which
	 * is the same lock we would hold for cancel_work_sync().)
	 *
	 * Since we can't possibly know who got us here, just refrain from
	 * running if there is already work pending
	 */
	if (work_pending(&cachep->memcg_params->destroy))
		return;
G
Glauber Costa 已提交
3050 3051 3052 3053 3054 3055 3056
	/*
	 * We have to defer the actual destroying to a workqueue, because
	 * we might currently be in a context that cannot sleep.
	 */
	schedule_work(&cachep->memcg_params->destroy);
}

3057 3058 3059 3060 3061 3062 3063 3064 3065
/*
 * This lock protects updaters, not readers. We want readers to be as fast as
 * they can, and they will either see NULL or a valid cache value. Our model
 * allow them to see NULL, in which case the root memcg will be selected.
 *
 * We need this lock because multiple allocations to the same cache from a non
 * will span more than one worker. Only one of them can create the cache.
 */
static DEFINE_MUTEX(memcg_cache_mutex);
3066

3067 3068 3069
/*
 * Called with memcg_cache_mutex held
 */
3070 3071 3072 3073
static struct kmem_cache *kmem_cache_dup(struct mem_cgroup *memcg,
					 struct kmem_cache *s)
{
	struct kmem_cache *new;
3074
	static char *tmp_name = NULL;
3075

3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093
	lockdep_assert_held(&memcg_cache_mutex);

	/*
	 * kmem_cache_create_memcg duplicates the given name and
	 * cgroup_name for this name requires RCU context.
	 * This static temporary buffer is used to prevent from
	 * pointless shortliving allocation.
	 */
	if (!tmp_name) {
		tmp_name = kmalloc(PATH_MAX, GFP_KERNEL);
		if (!tmp_name)
			return NULL;
	}

	rcu_read_lock();
	snprintf(tmp_name, PATH_MAX, "%s(%d:%s)", s->name,
			 memcg_cache_id(memcg), cgroup_name(memcg->css.cgroup));
	rcu_read_unlock();
3094

3095
	new = kmem_cache_create_memcg(memcg, tmp_name, s->object_size, s->align,
G
Glauber Costa 已提交
3096
				      (s->flags & ~SLAB_PANIC), s->ctor, s);
3097

3098 3099 3100
	if (new)
		new->allocflags |= __GFP_KMEMCG;

3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115
	return new;
}

static struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg,
						  struct kmem_cache *cachep)
{
	struct kmem_cache *new_cachep;
	int idx;

	BUG_ON(!memcg_can_account_kmem(memcg));

	idx = memcg_cache_id(memcg);

	mutex_lock(&memcg_cache_mutex);
	new_cachep = cachep->memcg_params->memcg_caches[idx];
3116 3117
	if (new_cachep) {
		css_put(&memcg->css);
3118
		goto out;
3119
	}
3120 3121 3122 3123

	new_cachep = kmem_cache_dup(memcg, cachep);
	if (new_cachep == NULL) {
		new_cachep = cachep;
3124
		css_put(&memcg->css);
3125 3126 3127
		goto out;
	}

G
Glauber Costa 已提交
3128
	atomic_set(&new_cachep->memcg_params->nr_pages , 0);
3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140

	cachep->memcg_params->memcg_caches[idx] = new_cachep;
	/*
	 * the readers won't lock, make sure everybody sees the updated value,
	 * so they won't put stuff in the queue again for no reason
	 */
	wmb();
out:
	mutex_unlock(&memcg_cache_mutex);
	return new_cachep;
}

3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179
void kmem_cache_destroy_memcg_children(struct kmem_cache *s)
{
	struct kmem_cache *c;
	int i;

	if (!s->memcg_params)
		return;
	if (!s->memcg_params->is_root_cache)
		return;

	/*
	 * If the cache is being destroyed, we trust that there is no one else
	 * requesting objects from it. Even if there are, the sanity checks in
	 * kmem_cache_destroy should caught this ill-case.
	 *
	 * Still, we don't want anyone else freeing memcg_caches under our
	 * noses, which can happen if a new memcg comes to life. As usual,
	 * we'll take the set_limit_mutex to protect ourselves against this.
	 */
	mutex_lock(&set_limit_mutex);
	for (i = 0; i < memcg_limited_groups_array_size; i++) {
		c = s->memcg_params->memcg_caches[i];
		if (!c)
			continue;

		/*
		 * We will now manually delete the caches, so to avoid races
		 * we need to cancel all pending destruction workers and
		 * proceed with destruction ourselves.
		 *
		 * kmem_cache_destroy() will call kmem_cache_shrink internally,
		 * and that could spawn the workers again: it is likely that
		 * the cache still have active pages until this very moment.
		 * This would lead us back to mem_cgroup_destroy_cache.
		 *
		 * But that will not execute at all if the "dead" flag is not
		 * set, so flip it down to guarantee we are in control.
		 */
		c->memcg_params->dead = false;
G
Glauber Costa 已提交
3180
		cancel_work_sync(&c->memcg_params->destroy);
3181 3182 3183 3184 3185
		kmem_cache_destroy(c);
	}
	mutex_unlock(&set_limit_mutex);
}

3186 3187 3188 3189 3190 3191
struct create_work {
	struct mem_cgroup *memcg;
	struct kmem_cache *cachep;
	struct work_struct work;
};

G
Glauber Costa 已提交
3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208
static void mem_cgroup_destroy_all_caches(struct mem_cgroup *memcg)
{
	struct kmem_cache *cachep;
	struct memcg_cache_params *params;

	if (!memcg_kmem_is_active(memcg))
		return;

	mutex_lock(&memcg->slab_caches_mutex);
	list_for_each_entry(params, &memcg->memcg_slab_caches, list) {
		cachep = memcg_params_to_cache(params);
		cachep->memcg_params->dead = true;
		schedule_work(&cachep->memcg_params->destroy);
	}
	mutex_unlock(&memcg->slab_caches_mutex);
}

3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220
static void memcg_create_cache_work_func(struct work_struct *w)
{
	struct create_work *cw;

	cw = container_of(w, struct create_work, work);
	memcg_create_kmem_cache(cw->memcg, cw->cachep);
	kfree(cw);
}

/*
 * Enqueue the creation of a per-memcg kmem_cache.
 */
3221 3222
static void __memcg_create_cache_enqueue(struct mem_cgroup *memcg,
					 struct kmem_cache *cachep)
3223 3224 3225 3226
{
	struct create_work *cw;

	cw = kmalloc(sizeof(struct create_work), GFP_NOWAIT);
3227 3228
	if (cw == NULL) {
		css_put(&memcg->css);
3229 3230 3231 3232 3233 3234 3235 3236 3237 3238
		return;
	}

	cw->memcg = memcg;
	cw->cachep = cachep;

	INIT_WORK(&cw->work, memcg_create_cache_work_func);
	schedule_work(&cw->work);
}

3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256
static void memcg_create_cache_enqueue(struct mem_cgroup *memcg,
				       struct kmem_cache *cachep)
{
	/*
	 * We need to stop accounting when we kmalloc, because if the
	 * corresponding kmalloc cache is not yet created, the first allocation
	 * in __memcg_create_cache_enqueue will recurse.
	 *
	 * However, it is better to enclose the whole function. Depending on
	 * the debugging options enabled, INIT_WORK(), for instance, can
	 * trigger an allocation. This too, will make us recurse. Because at
	 * this point we can't allow ourselves back into memcg_kmem_get_cache,
	 * the safest choice is to do it like this, wrapping the whole function.
	 */
	memcg_stop_kmem_account();
	__memcg_create_cache_enqueue(memcg, cachep);
	memcg_resume_kmem_account();
}
3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278
/*
 * Return the kmem_cache we're supposed to use for a slab allocation.
 * We try to use the current memcg's version of the cache.
 *
 * If the cache does not exist yet, if we are the first user of it,
 * we either create it immediately, if possible, or create it asynchronously
 * in a workqueue.
 * In the latter case, we will let the current allocation go through with
 * the original cache.
 *
 * Can't be called in interrupt context or from kernel threads.
 * This function needs to be called with rcu_read_lock() held.
 */
struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep,
					  gfp_t gfp)
{
	struct mem_cgroup *memcg;
	int idx;

	VM_BUG_ON(!cachep->memcg_params);
	VM_BUG_ON(!cachep->memcg_params->is_root_cache);

3279 3280 3281
	if (!current->mm || current->memcg_kmem_skip_account)
		return cachep;

3282 3283 3284 3285
	rcu_read_lock();
	memcg = mem_cgroup_from_task(rcu_dereference(current->mm->owner));

	if (!memcg_can_account_kmem(memcg))
3286
		goto out;
3287 3288 3289 3290 3291 3292 3293 3294

	idx = memcg_cache_id(memcg);

	/*
	 * barrier to mare sure we're always seeing the up to date value.  The
	 * code updating memcg_caches will issue a write barrier to match this.
	 */
	read_barrier_depends();
3295 3296 3297
	if (likely(cachep->memcg_params->memcg_caches[idx])) {
		cachep = cachep->memcg_params->memcg_caches[idx];
		goto out;
3298 3299
	}

3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326
	/* The corresponding put will be done in the workqueue. */
	if (!css_tryget(&memcg->css))
		goto out;
	rcu_read_unlock();

	/*
	 * If we are in a safe context (can wait, and not in interrupt
	 * context), we could be be predictable and return right away.
	 * This would guarantee that the allocation being performed
	 * already belongs in the new cache.
	 *
	 * However, there are some clashes that can arrive from locking.
	 * For instance, because we acquire the slab_mutex while doing
	 * kmem_cache_dup, this means no further allocation could happen
	 * with the slab_mutex held.
	 *
	 * Also, because cache creation issue get_online_cpus(), this
	 * creates a lock chain: memcg_slab_mutex -> cpu_hotplug_mutex,
	 * that ends up reversed during cpu hotplug. (cpuset allocates
	 * a bunch of GFP_KERNEL memory during cpuup). Due to all that,
	 * better to defer everything.
	 */
	memcg_create_cache_enqueue(memcg, cachep);
	return cachep;
out:
	rcu_read_unlock();
	return cachep;
3327 3328 3329
}
EXPORT_SYMBOL(__memcg_kmem_get_cache);

3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350
/*
 * We need to verify if the allocation against current->mm->owner's memcg is
 * possible for the given order. But the page is not allocated yet, so we'll
 * need a further commit step to do the final arrangements.
 *
 * It is possible for the task to switch cgroups in this mean time, so at
 * commit time, we can't rely on task conversion any longer.  We'll then use
 * the handle argument to return to the caller which cgroup we should commit
 * against. We could also return the memcg directly and avoid the pointer
 * passing, but a boolean return value gives better semantics considering
 * the compiled-out case as well.
 *
 * Returning true means the allocation is possible.
 */
bool
__memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **_memcg, int order)
{
	struct mem_cgroup *memcg;
	int ret;

	*_memcg = NULL;
3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378

	/*
	 * Disabling accounting is only relevant for some specific memcg
	 * internal allocations. Therefore we would initially not have such
	 * check here, since direct calls to the page allocator that are marked
	 * with GFP_KMEMCG only happen outside memcg core. We are mostly
	 * concerned with cache allocations, and by having this test at
	 * memcg_kmem_get_cache, we are already able to relay the allocation to
	 * the root cache and bypass the memcg cache altogether.
	 *
	 * There is one exception, though: the SLUB allocator does not create
	 * large order caches, but rather service large kmallocs directly from
	 * the page allocator. Therefore, the following sequence when backed by
	 * the SLUB allocator:
	 *
	 * 	memcg_stop_kmem_account();
	 * 	kmalloc(<large_number>)
	 * 	memcg_resume_kmem_account();
	 *
	 * would effectively ignore the fact that we should skip accounting,
	 * since it will drive us directly to this function without passing
	 * through the cache selector memcg_kmem_get_cache. Such large
	 * allocations are extremely rare but can happen, for instance, for the
	 * cache arrays. We bring this test here.
	 */
	if (!current->mm || current->memcg_kmem_skip_account)
		return true;

3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452
	memcg = try_get_mem_cgroup_from_mm(current->mm);

	/*
	 * very rare case described in mem_cgroup_from_task. Unfortunately there
	 * isn't much we can do without complicating this too much, and it would
	 * be gfp-dependent anyway. Just let it go
	 */
	if (unlikely(!memcg))
		return true;

	if (!memcg_can_account_kmem(memcg)) {
		css_put(&memcg->css);
		return true;
	}

	ret = memcg_charge_kmem(memcg, gfp, PAGE_SIZE << order);
	if (!ret)
		*_memcg = memcg;

	css_put(&memcg->css);
	return (ret == 0);
}

void __memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg,
			      int order)
{
	struct page_cgroup *pc;

	VM_BUG_ON(mem_cgroup_is_root(memcg));

	/* The page allocation failed. Revert */
	if (!page) {
		memcg_uncharge_kmem(memcg, PAGE_SIZE << order);
		return;
	}

	pc = lookup_page_cgroup(page);
	lock_page_cgroup(pc);
	pc->mem_cgroup = memcg;
	SetPageCgroupUsed(pc);
	unlock_page_cgroup(pc);
}

void __memcg_kmem_uncharge_pages(struct page *page, int order)
{
	struct mem_cgroup *memcg = NULL;
	struct page_cgroup *pc;


	pc = lookup_page_cgroup(page);
	/*
	 * Fast unlocked return. Theoretically might have changed, have to
	 * check again after locking.
	 */
	if (!PageCgroupUsed(pc))
		return;

	lock_page_cgroup(pc);
	if (PageCgroupUsed(pc)) {
		memcg = pc->mem_cgroup;
		ClearPageCgroupUsed(pc);
	}
	unlock_page_cgroup(pc);

	/*
	 * We trust that only if there is a memcg associated with the page, it
	 * is a valid allocation
	 */
	if (!memcg)
		return;

	VM_BUG_ON(mem_cgroup_is_root(memcg));
	memcg_uncharge_kmem(memcg, PAGE_SIZE << order);
}
G
Glauber Costa 已提交
3453 3454 3455 3456
#else
static inline void mem_cgroup_destroy_all_caches(struct mem_cgroup *memcg)
{
}
3457 3458
#endif /* CONFIG_MEMCG_KMEM */

3459 3460
#ifdef CONFIG_TRANSPARENT_HUGEPAGE

3461
#define PCGF_NOCOPY_AT_SPLIT (1 << PCG_LOCK | 1 << PCG_MIGRATION)
3462 3463
/*
 * Because tail pages are not marked as "used", set it. We're under
3464 3465 3466
 * zone->lru_lock, 'splitting on pmd' and compound_lock.
 * charge/uncharge will be never happen and move_account() is done under
 * compound_lock(), so we don't have to take care of races.
3467
 */
3468
void mem_cgroup_split_huge_fixup(struct page *head)
3469 3470
{
	struct page_cgroup *head_pc = lookup_page_cgroup(head);
3471
	struct page_cgroup *pc;
3472
	struct mem_cgroup *memcg;
3473
	int i;
3474

3475 3476
	if (mem_cgroup_disabled())
		return;
3477 3478

	memcg = head_pc->mem_cgroup;
3479 3480
	for (i = 1; i < HPAGE_PMD_NR; i++) {
		pc = head_pc + i;
3481
		pc->mem_cgroup = memcg;
3482 3483 3484
		smp_wmb();/* see __commit_charge() */
		pc->flags = head_pc->flags & ~PCGF_NOCOPY_AT_SPLIT;
	}
3485 3486
	__this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE],
		       HPAGE_PMD_NR);
3487
}
3488
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
3489

3490
/**
3491
 * mem_cgroup_move_account - move account of the page
3492
 * @page: the page
3493
 * @nr_pages: number of regular pages (>1 for huge pages)
3494 3495 3496 3497 3498
 * @pc:	page_cgroup of the page.
 * @from: mem_cgroup which the page is moved from.
 * @to:	mem_cgroup which the page is moved to. @from != @to.
 *
 * The caller must confirm following.
K
KAMEZAWA Hiroyuki 已提交
3499
 * - page is not on LRU (isolate_page() is useful.)
3500
 * - compound_lock is held when nr_pages > 1
3501
 *
3502 3503
 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
 * from old cgroup.
3504
 */
3505 3506 3507 3508
static int mem_cgroup_move_account(struct page *page,
				   unsigned int nr_pages,
				   struct page_cgroup *pc,
				   struct mem_cgroup *from,
3509
				   struct mem_cgroup *to)
3510
{
3511 3512
	unsigned long flags;
	int ret;
3513
	bool anon = PageAnon(page);
3514

3515
	VM_BUG_ON(from == to);
3516
	VM_BUG_ON(PageLRU(page));
3517 3518 3519 3520 3521 3522 3523
	/*
	 * The page is isolated from LRU. So, collapse function
	 * will not handle this page. But page splitting can happen.
	 * Do this check under compound_page_lock(). The caller should
	 * hold it.
	 */
	ret = -EBUSY;
3524
	if (nr_pages > 1 && !PageTransHuge(page))
3525 3526 3527 3528 3529 3530 3531 3532
		goto out;

	lock_page_cgroup(pc);

	ret = -EINVAL;
	if (!PageCgroupUsed(pc) || pc->mem_cgroup != from)
		goto unlock;

3533
	move_lock_mem_cgroup(from, &flags);
3534

3535
	if (!anon && page_mapped(page)) {
3536 3537 3538 3539 3540
		/* Update mapped_file data for mem_cgroup */
		preempt_disable();
		__this_cpu_dec(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
		__this_cpu_inc(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
		preempt_enable();
3541
	}
3542
	mem_cgroup_charge_statistics(from, page, anon, -nr_pages);
3543

3544
	/* caller should have done css_get */
K
KAMEZAWA Hiroyuki 已提交
3545
	pc->mem_cgroup = to;
3546
	mem_cgroup_charge_statistics(to, page, anon, nr_pages);
3547
	move_unlock_mem_cgroup(from, &flags);
3548 3549
	ret = 0;
unlock:
3550
	unlock_page_cgroup(pc);
3551 3552 3553
	/*
	 * check events
	 */
3554 3555
	memcg_check_events(to, page);
	memcg_check_events(from, page);
3556
out:
3557 3558 3559
	return ret;
}

3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579
/**
 * mem_cgroup_move_parent - moves page to the parent group
 * @page: the page to move
 * @pc: page_cgroup of the page
 * @child: page's cgroup
 *
 * move charges to its parent or the root cgroup if the group has no
 * parent (aka use_hierarchy==0).
 * Although this might fail (get_page_unless_zero, isolate_lru_page or
 * mem_cgroup_move_account fails) the failure is always temporary and
 * it signals a race with a page removal/uncharge or migration. In the
 * first case the page is on the way out and it will vanish from the LRU
 * on the next attempt and the call should be retried later.
 * Isolation from the LRU fails only if page has been isolated from
 * the LRU since we looked at it and that usually means either global
 * reclaim or migration going on. The page will either get back to the
 * LRU or vanish.
 * Finaly mem_cgroup_move_account fails only if the page got uncharged
 * (!PageCgroupUsed) or moved to a different group. The page will
 * disappear in the next attempt.
3580
 */
3581 3582
static int mem_cgroup_move_parent(struct page *page,
				  struct page_cgroup *pc,
3583
				  struct mem_cgroup *child)
3584 3585
{
	struct mem_cgroup *parent;
3586
	unsigned int nr_pages;
3587
	unsigned long uninitialized_var(flags);
3588 3589
	int ret;

3590
	VM_BUG_ON(mem_cgroup_is_root(child));
3591

3592 3593 3594 3595 3596
	ret = -EBUSY;
	if (!get_page_unless_zero(page))
		goto out;
	if (isolate_lru_page(page))
		goto put;
3597

3598
	nr_pages = hpage_nr_pages(page);
K
KAMEZAWA Hiroyuki 已提交
3599

3600 3601 3602 3603 3604 3605
	parent = parent_mem_cgroup(child);
	/*
	 * If no parent, move charges to root cgroup.
	 */
	if (!parent)
		parent = root_mem_cgroup;
3606

3607 3608
	if (nr_pages > 1) {
		VM_BUG_ON(!PageTransHuge(page));
3609
		flags = compound_lock_irqsave(page);
3610
	}
3611

3612
	ret = mem_cgroup_move_account(page, nr_pages,
3613
				pc, child, parent);
3614 3615
	if (!ret)
		__mem_cgroup_cancel_local_charge(child, nr_pages);
3616

3617
	if (nr_pages > 1)
3618
		compound_unlock_irqrestore(page, flags);
K
KAMEZAWA Hiroyuki 已提交
3619
	putback_lru_page(page);
3620
put:
3621
	put_page(page);
3622
out:
3623 3624 3625
	return ret;
}

3626 3627 3628 3629 3630 3631 3632
/*
 * Charge the memory controller for page usage.
 * Return
 * 0 if the charge was successful
 * < 0 if the cgroup is over its limit
 */
static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
3633
				gfp_t gfp_mask, enum charge_type ctype)
3634
{
3635
	struct mem_cgroup *memcg = NULL;
3636
	unsigned int nr_pages = 1;
3637
	bool oom = true;
3638
	int ret;
A
Andrea Arcangeli 已提交
3639

A
Andrea Arcangeli 已提交
3640
	if (PageTransHuge(page)) {
3641
		nr_pages <<= compound_order(page);
A
Andrea Arcangeli 已提交
3642
		VM_BUG_ON(!PageTransHuge(page));
3643 3644 3645 3646 3647
		/*
		 * Never OOM-kill a process for a huge page.  The
		 * fault handler will fall back to regular pages.
		 */
		oom = false;
A
Andrea Arcangeli 已提交
3648
	}
3649

3650
	ret = __mem_cgroup_try_charge(mm, gfp_mask, nr_pages, &memcg, oom);
3651
	if (ret == -ENOMEM)
3652
		return ret;
3653
	__mem_cgroup_commit_charge(memcg, page, nr_pages, ctype, false);
3654 3655 3656
	return 0;
}

3657 3658
int mem_cgroup_newpage_charge(struct page *page,
			      struct mm_struct *mm, gfp_t gfp_mask)
3659
{
3660
	if (mem_cgroup_disabled())
3661
		return 0;
3662 3663 3664
	VM_BUG_ON(page_mapped(page));
	VM_BUG_ON(page->mapping && !PageAnon(page));
	VM_BUG_ON(!mm);
3665
	return mem_cgroup_charge_common(page, mm, gfp_mask,
3666
					MEM_CGROUP_CHARGE_TYPE_ANON);
3667 3668
}

3669 3670 3671
/*
 * While swap-in, try_charge -> commit or cancel, the page is locked.
 * And when try_charge() successfully returns, one refcnt to memcg without
3672
 * struct page_cgroup is acquired. This refcnt will be consumed by
3673 3674
 * "commit()" or removed by "cancel()"
 */
3675 3676 3677 3678
static int __mem_cgroup_try_charge_swapin(struct mm_struct *mm,
					  struct page *page,
					  gfp_t mask,
					  struct mem_cgroup **memcgp)
3679
{
3680
	struct mem_cgroup *memcg;
3681
	struct page_cgroup *pc;
3682
	int ret;
3683

3684 3685 3686 3687 3688 3689 3690 3691 3692 3693
	pc = lookup_page_cgroup(page);
	/*
	 * Every swap fault against a single page tries to charge the
	 * page, bail as early as possible.  shmem_unuse() encounters
	 * already charged pages, too.  The USED bit is protected by
	 * the page lock, which serializes swap cache removal, which
	 * in turn serializes uncharging.
	 */
	if (PageCgroupUsed(pc))
		return 0;
3694 3695
	if (!do_swap_account)
		goto charge_cur_mm;
3696 3697
	memcg = try_get_mem_cgroup_from_page(page);
	if (!memcg)
3698
		goto charge_cur_mm;
3699 3700
	*memcgp = memcg;
	ret = __mem_cgroup_try_charge(NULL, mask, 1, memcgp, true);
3701
	css_put(&memcg->css);
3702 3703
	if (ret == -EINTR)
		ret = 0;
3704
	return ret;
3705
charge_cur_mm:
3706 3707 3708 3709
	ret = __mem_cgroup_try_charge(mm, mask, 1, memcgp, true);
	if (ret == -EINTR)
		ret = 0;
	return ret;
3710 3711
}

3712 3713 3714 3715 3716 3717
int mem_cgroup_try_charge_swapin(struct mm_struct *mm, struct page *page,
				 gfp_t gfp_mask, struct mem_cgroup **memcgp)
{
	*memcgp = NULL;
	if (mem_cgroup_disabled())
		return 0;
3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731
	/*
	 * A racing thread's fault, or swapoff, may have already
	 * updated the pte, and even removed page from swap cache: in
	 * those cases unuse_pte()'s pte_same() test will fail; but
	 * there's also a KSM case which does need to charge the page.
	 */
	if (!PageSwapCache(page)) {
		int ret;

		ret = __mem_cgroup_try_charge(mm, gfp_mask, 1, memcgp, true);
		if (ret == -EINTR)
			ret = 0;
		return ret;
	}
3732 3733 3734
	return __mem_cgroup_try_charge_swapin(mm, page, gfp_mask, memcgp);
}

3735 3736 3737 3738 3739 3740 3741 3742 3743
void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg)
{
	if (mem_cgroup_disabled())
		return;
	if (!memcg)
		return;
	__mem_cgroup_cancel_charge(memcg, 1);
}

D
Daisuke Nishimura 已提交
3744
static void
3745
__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *memcg,
D
Daisuke Nishimura 已提交
3746
					enum charge_type ctype)
3747
{
3748
	if (mem_cgroup_disabled())
3749
		return;
3750
	if (!memcg)
3751
		return;
3752

3753
	__mem_cgroup_commit_charge(memcg, page, 1, ctype, true);
3754 3755 3756
	/*
	 * Now swap is on-memory. This means this page may be
	 * counted both as mem and swap....double count.
3757 3758 3759
	 * Fix it by uncharging from memsw. Basically, this SwapCache is stable
	 * under lock_page(). But in do_swap_page()::memory.c, reuse_swap_page()
	 * may call delete_from_swap_cache() before reach here.
3760
	 */
3761
	if (do_swap_account && PageSwapCache(page)) {
3762
		swp_entry_t ent = {.val = page_private(page)};
3763
		mem_cgroup_uncharge_swap(ent);
3764
	}
3765 3766
}

3767 3768
void mem_cgroup_commit_charge_swapin(struct page *page,
				     struct mem_cgroup *memcg)
D
Daisuke Nishimura 已提交
3769
{
3770
	__mem_cgroup_commit_charge_swapin(page, memcg,
3771
					  MEM_CGROUP_CHARGE_TYPE_ANON);
D
Daisuke Nishimura 已提交
3772 3773
}

3774 3775
int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
				gfp_t gfp_mask)
3776
{
3777 3778 3779 3780
	struct mem_cgroup *memcg = NULL;
	enum charge_type type = MEM_CGROUP_CHARGE_TYPE_CACHE;
	int ret;

3781
	if (mem_cgroup_disabled())
3782 3783 3784 3785 3786 3787 3788
		return 0;
	if (PageCompound(page))
		return 0;

	if (!PageSwapCache(page))
		ret = mem_cgroup_charge_common(page, mm, gfp_mask, type);
	else { /* page is swapcache/shmem */
3789 3790
		ret = __mem_cgroup_try_charge_swapin(mm, page,
						     gfp_mask, &memcg);
3791 3792 3793 3794
		if (!ret)
			__mem_cgroup_commit_charge_swapin(page, memcg, type);
	}
	return ret;
3795 3796
}

3797
static void mem_cgroup_do_uncharge(struct mem_cgroup *memcg,
3798 3799
				   unsigned int nr_pages,
				   const enum charge_type ctype)
3800 3801 3802
{
	struct memcg_batch_info *batch = NULL;
	bool uncharge_memsw = true;
3803

3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814
	/* If swapout, usage of swap doesn't decrease */
	if (!do_swap_account || ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
		uncharge_memsw = false;

	batch = &current->memcg_batch;
	/*
	 * In usual, we do css_get() when we remember memcg pointer.
	 * But in this case, we keep res->usage until end of a series of
	 * uncharges. Then, it's ok to ignore memcg's refcnt.
	 */
	if (!batch->memcg)
3815
		batch->memcg = memcg;
3816 3817
	/*
	 * do_batch > 0 when unmapping pages or inode invalidate/truncate.
L
Lucas De Marchi 已提交
3818
	 * In those cases, all pages freed continuously can be expected to be in
3819 3820 3821 3822 3823 3824 3825 3826
	 * the same cgroup and we have chance to coalesce uncharges.
	 * But we do uncharge one by one if this is killed by OOM(TIF_MEMDIE)
	 * because we want to do uncharge as soon as possible.
	 */

	if (!batch->do_batch || test_thread_flag(TIF_MEMDIE))
		goto direct_uncharge;

3827
	if (nr_pages > 1)
A
Andrea Arcangeli 已提交
3828 3829
		goto direct_uncharge;

3830 3831 3832 3833 3834
	/*
	 * In typical case, batch->memcg == mem. This means we can
	 * merge a series of uncharges to an uncharge of res_counter.
	 * If not, we uncharge res_counter ony by one.
	 */
3835
	if (batch->memcg != memcg)
3836 3837
		goto direct_uncharge;
	/* remember freed charge and uncharge it later */
3838
	batch->nr_pages++;
3839
	if (uncharge_memsw)
3840
		batch->memsw_nr_pages++;
3841 3842
	return;
direct_uncharge:
3843
	res_counter_uncharge(&memcg->res, nr_pages * PAGE_SIZE);
3844
	if (uncharge_memsw)
3845 3846 3847
		res_counter_uncharge(&memcg->memsw, nr_pages * PAGE_SIZE);
	if (unlikely(batch->memcg != memcg))
		memcg_oom_recover(memcg);
3848
}
3849

3850
/*
3851
 * uncharge if !page_mapped(page)
3852
 */
3853
static struct mem_cgroup *
3854 3855
__mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype,
			     bool end_migration)
3856
{
3857
	struct mem_cgroup *memcg = NULL;
3858 3859
	unsigned int nr_pages = 1;
	struct page_cgroup *pc;
3860
	bool anon;
3861

3862
	if (mem_cgroup_disabled())
3863
		return NULL;
3864

A
Andrea Arcangeli 已提交
3865
	if (PageTransHuge(page)) {
3866
		nr_pages <<= compound_order(page);
A
Andrea Arcangeli 已提交
3867 3868
		VM_BUG_ON(!PageTransHuge(page));
	}
3869
	/*
3870
	 * Check if our page_cgroup is valid
3871
	 */
3872
	pc = lookup_page_cgroup(page);
3873
	if (unlikely(!PageCgroupUsed(pc)))
3874
		return NULL;
3875

3876
	lock_page_cgroup(pc);
K
KAMEZAWA Hiroyuki 已提交
3877

3878
	memcg = pc->mem_cgroup;
3879

K
KAMEZAWA Hiroyuki 已提交
3880 3881 3882
	if (!PageCgroupUsed(pc))
		goto unlock_out;

3883 3884
	anon = PageAnon(page);

K
KAMEZAWA Hiroyuki 已提交
3885
	switch (ctype) {
3886
	case MEM_CGROUP_CHARGE_TYPE_ANON:
3887 3888 3889 3890 3891
		/*
		 * Generally PageAnon tells if it's the anon statistics to be
		 * updated; but sometimes e.g. mem_cgroup_uncharge_page() is
		 * used before page reached the stage of being marked PageAnon.
		 */
3892 3893
		anon = true;
		/* fallthrough */
K
KAMEZAWA Hiroyuki 已提交
3894
	case MEM_CGROUP_CHARGE_TYPE_DROP:
3895
		/* See mem_cgroup_prepare_migration() */
3896 3897 3898 3899 3900 3901 3902 3903 3904 3905
		if (page_mapped(page))
			goto unlock_out;
		/*
		 * Pages under migration may not be uncharged.  But
		 * end_migration() /must/ be the one uncharging the
		 * unused post-migration page and so it has to call
		 * here with the migration bit still set.  See the
		 * res_counter handling below.
		 */
		if (!end_migration && PageCgroupMigration(pc))
K
KAMEZAWA Hiroyuki 已提交
3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916
			goto unlock_out;
		break;
	case MEM_CGROUP_CHARGE_TYPE_SWAPOUT:
		if (!PageAnon(page)) {	/* Shared memory */
			if (page->mapping && !page_is_file_cache(page))
				goto unlock_out;
		} else if (page_mapped(page)) /* Anon */
				goto unlock_out;
		break;
	default:
		break;
3917
	}
K
KAMEZAWA Hiroyuki 已提交
3918

3919
	mem_cgroup_charge_statistics(memcg, page, anon, -nr_pages);
K
KAMEZAWA Hiroyuki 已提交
3920

3921
	ClearPageCgroupUsed(pc);
3922 3923 3924 3925 3926 3927
	/*
	 * pc->mem_cgroup is not cleared here. It will be accessed when it's
	 * freed from LRU. This is safe because uncharged page is expected not
	 * to be reused (freed soon). Exception is SwapCache, it's handled by
	 * special functions.
	 */
3928

3929
	unlock_page_cgroup(pc);
K
KAMEZAWA Hiroyuki 已提交
3930
	/*
3931
	 * even after unlock, we have memcg->res.usage here and this memcg
L
Li Zefan 已提交
3932
	 * will never be freed, so it's safe to call css_get().
K
KAMEZAWA Hiroyuki 已提交
3933
	 */
3934
	memcg_check_events(memcg, page);
K
KAMEZAWA Hiroyuki 已提交
3935
	if (do_swap_account && ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT) {
3936
		mem_cgroup_swap_statistics(memcg, true);
L
Li Zefan 已提交
3937
		css_get(&memcg->css);
K
KAMEZAWA Hiroyuki 已提交
3938
	}
3939 3940 3941 3942 3943 3944
	/*
	 * Migration does not charge the res_counter for the
	 * replacement page, so leave it alone when phasing out the
	 * page that is unused after the migration.
	 */
	if (!end_migration && !mem_cgroup_is_root(memcg))
3945
		mem_cgroup_do_uncharge(memcg, nr_pages, ctype);
3946

3947
	return memcg;
K
KAMEZAWA Hiroyuki 已提交
3948 3949 3950

unlock_out:
	unlock_page_cgroup(pc);
3951
	return NULL;
3952 3953
}

3954 3955
void mem_cgroup_uncharge_page(struct page *page)
{
3956 3957 3958
	/* early check. */
	if (page_mapped(page))
		return;
3959
	VM_BUG_ON(page->mapping && !PageAnon(page));
3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971
	/*
	 * If the page is in swap cache, uncharge should be deferred
	 * to the swap path, which also properly accounts swap usage
	 * and handles memcg lifetime.
	 *
	 * Note that this check is not stable and reclaim may add the
	 * page to swap cache at any time after this.  However, if the
	 * page is not in swap cache by the time page->mapcount hits
	 * 0, there won't be any page table references to the swap
	 * slot, and reclaim will free it and not actually write the
	 * page to disk.
	 */
3972 3973
	if (PageSwapCache(page))
		return;
3974
	__mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_ANON, false);
3975 3976 3977 3978 3979
}

void mem_cgroup_uncharge_cache_page(struct page *page)
{
	VM_BUG_ON(page_mapped(page));
3980
	VM_BUG_ON(page->mapping);
3981
	__mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE, false);
3982 3983
}

3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997
/*
 * Batch_start/batch_end is called in unmap_page_range/invlidate/trucate.
 * In that cases, pages are freed continuously and we can expect pages
 * are in the same memcg. All these calls itself limits the number of
 * pages freed at once, then uncharge_start/end() is called properly.
 * This may be called prural(2) times in a context,
 */

void mem_cgroup_uncharge_start(void)
{
	current->memcg_batch.do_batch++;
	/* We can do nest. */
	if (current->memcg_batch.do_batch == 1) {
		current->memcg_batch.memcg = NULL;
3998 3999
		current->memcg_batch.nr_pages = 0;
		current->memcg_batch.memsw_nr_pages = 0;
4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019
	}
}

void mem_cgroup_uncharge_end(void)
{
	struct memcg_batch_info *batch = &current->memcg_batch;

	if (!batch->do_batch)
		return;

	batch->do_batch--;
	if (batch->do_batch) /* If stacked, do nothing. */
		return;

	if (!batch->memcg)
		return;
	/*
	 * This "batch->memcg" is valid without any css_get/put etc...
	 * bacause we hide charges behind us.
	 */
4020 4021 4022 4023 4024 4025
	if (batch->nr_pages)
		res_counter_uncharge(&batch->memcg->res,
				     batch->nr_pages * PAGE_SIZE);
	if (batch->memsw_nr_pages)
		res_counter_uncharge(&batch->memcg->memsw,
				     batch->memsw_nr_pages * PAGE_SIZE);
4026
	memcg_oom_recover(batch->memcg);
4027 4028 4029 4030
	/* forget this pointer (for sanity check) */
	batch->memcg = NULL;
}

4031
#ifdef CONFIG_SWAP
4032
/*
4033
 * called after __delete_from_swap_cache() and drop "page" account.
4034 4035
 * memcg information is recorded to swap_cgroup of "ent"
 */
K
KAMEZAWA Hiroyuki 已提交
4036 4037
void
mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
4038 4039
{
	struct mem_cgroup *memcg;
K
KAMEZAWA Hiroyuki 已提交
4040 4041 4042 4043 4044
	int ctype = MEM_CGROUP_CHARGE_TYPE_SWAPOUT;

	if (!swapout) /* this was a swap cache but the swap is unused ! */
		ctype = MEM_CGROUP_CHARGE_TYPE_DROP;

4045
	memcg = __mem_cgroup_uncharge_common(page, ctype, false);
4046

K
KAMEZAWA Hiroyuki 已提交
4047 4048
	/*
	 * record memcg information,  if swapout && memcg != NULL,
L
Li Zefan 已提交
4049
	 * css_get() was called in uncharge().
K
KAMEZAWA Hiroyuki 已提交
4050 4051
	 */
	if (do_swap_account && swapout && memcg)
4052
		swap_cgroup_record(ent, css_id(&memcg->css));
4053
}
4054
#endif
4055

A
Andrew Morton 已提交
4056
#ifdef CONFIG_MEMCG_SWAP
4057 4058 4059 4060 4061
/*
 * called from swap_entry_free(). remove record in swap_cgroup and
 * uncharge "memsw" account.
 */
void mem_cgroup_uncharge_swap(swp_entry_t ent)
K
KAMEZAWA Hiroyuki 已提交
4062
{
4063
	struct mem_cgroup *memcg;
4064
	unsigned short id;
4065 4066 4067 4068

	if (!do_swap_account)
		return;

4069 4070 4071
	id = swap_cgroup_record(ent, 0);
	rcu_read_lock();
	memcg = mem_cgroup_lookup(id);
4072
	if (memcg) {
4073 4074 4075 4076
		/*
		 * We uncharge this because swap is freed.
		 * This memcg can be obsolete one. We avoid calling css_tryget
		 */
4077
		if (!mem_cgroup_is_root(memcg))
4078
			res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
4079
		mem_cgroup_swap_statistics(memcg, false);
L
Li Zefan 已提交
4080
		css_put(&memcg->css);
4081
	}
4082
	rcu_read_unlock();
K
KAMEZAWA Hiroyuki 已提交
4083
}
4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099

/**
 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
 * @entry: swap entry to be moved
 * @from:  mem_cgroup which the entry is moved from
 * @to:  mem_cgroup which the entry is moved to
 *
 * It succeeds only when the swap_cgroup's record for this entry is the same
 * as the mem_cgroup's id of @from.
 *
 * Returns 0 on success, -EINVAL on failure.
 *
 * The caller must have charged to @to, IOW, called res_counter_charge() about
 * both res and memsw, and called css_get().
 */
static int mem_cgroup_move_swap_account(swp_entry_t entry,
4100
				struct mem_cgroup *from, struct mem_cgroup *to)
4101 4102 4103 4104 4105 4106 4107 4108
{
	unsigned short old_id, new_id;

	old_id = css_id(&from->css);
	new_id = css_id(&to->css);

	if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
		mem_cgroup_swap_statistics(from, false);
4109
		mem_cgroup_swap_statistics(to, true);
4110
		/*
4111 4112 4113
		 * This function is only called from task migration context now.
		 * It postpones res_counter and refcount handling till the end
		 * of task migration(mem_cgroup_clear_mc()) for performance
L
Li Zefan 已提交
4114 4115 4116 4117 4118 4119
		 * improvement. But we cannot postpone css_get(to)  because if
		 * the process that has been moved to @to does swap-in, the
		 * refcount of @to might be decreased to 0.
		 *
		 * We are in attach() phase, so the cgroup is guaranteed to be
		 * alive, so we can just call css_get().
4120
		 */
L
Li Zefan 已提交
4121
		css_get(&to->css);
4122 4123 4124 4125 4126 4127
		return 0;
	}
	return -EINVAL;
}
#else
static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
4128
				struct mem_cgroup *from, struct mem_cgroup *to)
4129 4130 4131
{
	return -EINVAL;
}
4132
#endif
K
KAMEZAWA Hiroyuki 已提交
4133

4134
/*
4135 4136
 * Before starting migration, account PAGE_SIZE to mem_cgroup that the old
 * page belongs to.
4137
 */
4138 4139
void mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
				  struct mem_cgroup **memcgp)
4140
{
4141
	struct mem_cgroup *memcg = NULL;
4142
	unsigned int nr_pages = 1;
4143
	struct page_cgroup *pc;
4144
	enum charge_type ctype;
4145

4146
	*memcgp = NULL;
4147

4148
	if (mem_cgroup_disabled())
4149
		return;
4150

4151 4152 4153
	if (PageTransHuge(page))
		nr_pages <<= compound_order(page);

4154 4155 4156
	pc = lookup_page_cgroup(page);
	lock_page_cgroup(pc);
	if (PageCgroupUsed(pc)) {
4157 4158
		memcg = pc->mem_cgroup;
		css_get(&memcg->css);
4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189
		/*
		 * At migrating an anonymous page, its mapcount goes down
		 * to 0 and uncharge() will be called. But, even if it's fully
		 * unmapped, migration may fail and this page has to be
		 * charged again. We set MIGRATION flag here and delay uncharge
		 * until end_migration() is called
		 *
		 * Corner Case Thinking
		 * A)
		 * When the old page was mapped as Anon and it's unmap-and-freed
		 * while migration was ongoing.
		 * If unmap finds the old page, uncharge() of it will be delayed
		 * until end_migration(). If unmap finds a new page, it's
		 * uncharged when it make mapcount to be 1->0. If unmap code
		 * finds swap_migration_entry, the new page will not be mapped
		 * and end_migration() will find it(mapcount==0).
		 *
		 * B)
		 * When the old page was mapped but migraion fails, the kernel
		 * remaps it. A charge for it is kept by MIGRATION flag even
		 * if mapcount goes down to 0. We can do remap successfully
		 * without charging it again.
		 *
		 * C)
		 * The "old" page is under lock_page() until the end of
		 * migration, so, the old page itself will not be swapped-out.
		 * If the new page is swapped out before end_migraton, our
		 * hook to usual swap-out path will catch the event.
		 */
		if (PageAnon(page))
			SetPageCgroupMigration(pc);
4190
	}
4191
	unlock_page_cgroup(pc);
4192 4193 4194 4195
	/*
	 * If the page is not charged at this point,
	 * we return here.
	 */
4196
	if (!memcg)
4197
		return;
4198

4199
	*memcgp = memcg;
4200 4201 4202 4203 4204 4205 4206
	/*
	 * We charge new page before it's used/mapped. So, even if unlock_page()
	 * is called before end_migration, we can catch all events on this new
	 * page. In the case new page is migrated but not remapped, new page's
	 * mapcount will be finally 0 and we call uncharge in end_migration().
	 */
	if (PageAnon(page))
4207
		ctype = MEM_CGROUP_CHARGE_TYPE_ANON;
4208
	else
4209
		ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
4210 4211 4212 4213 4214
	/*
	 * The page is committed to the memcg, but it's not actually
	 * charged to the res_counter since we plan on replacing the
	 * old one and only one page is going to be left afterwards.
	 */
4215
	__mem_cgroup_commit_charge(memcg, newpage, nr_pages, ctype, false);
4216
}
4217

4218
/* remove redundant charge if migration failed*/
4219
void mem_cgroup_end_migration(struct mem_cgroup *memcg,
4220
	struct page *oldpage, struct page *newpage, bool migration_ok)
4221
{
4222
	struct page *used, *unused;
4223
	struct page_cgroup *pc;
4224
	bool anon;
4225

4226
	if (!memcg)
4227
		return;
4228

4229
	if (!migration_ok) {
4230 4231
		used = oldpage;
		unused = newpage;
4232
	} else {
4233
		used = newpage;
4234 4235
		unused = oldpage;
	}
4236
	anon = PageAnon(used);
4237 4238 4239 4240
	__mem_cgroup_uncharge_common(unused,
				     anon ? MEM_CGROUP_CHARGE_TYPE_ANON
				     : MEM_CGROUP_CHARGE_TYPE_CACHE,
				     true);
4241
	css_put(&memcg->css);
4242
	/*
4243 4244 4245
	 * We disallowed uncharge of pages under migration because mapcount
	 * of the page goes down to zero, temporarly.
	 * Clear the flag and check the page should be charged.
4246
	 */
4247 4248 4249 4250 4251
	pc = lookup_page_cgroup(oldpage);
	lock_page_cgroup(pc);
	ClearPageCgroupMigration(pc);
	unlock_page_cgroup(pc);

4252
	/*
4253 4254 4255 4256 4257 4258
	 * If a page is a file cache, radix-tree replacement is very atomic
	 * and we can skip this check. When it was an Anon page, its mapcount
	 * goes down to 0. But because we added MIGRATION flage, it's not
	 * uncharged yet. There are several case but page->mapcount check
	 * and USED bit check in mem_cgroup_uncharge_page() will do enough
	 * check. (see prepare_charge() also)
4259
	 */
4260
	if (anon)
4261
		mem_cgroup_uncharge_page(used);
4262
}
4263

4264 4265 4266 4267 4268 4269 4270 4271
/*
 * At replace page cache, newpage is not under any memcg but it's on
 * LRU. So, this function doesn't touch res_counter but handles LRU
 * in correct way. Both pages are locked so we cannot race with uncharge.
 */
void mem_cgroup_replace_page_cache(struct page *oldpage,
				  struct page *newpage)
{
4272
	struct mem_cgroup *memcg = NULL;
4273 4274 4275 4276 4277 4278 4279 4280 4281
	struct page_cgroup *pc;
	enum charge_type type = MEM_CGROUP_CHARGE_TYPE_CACHE;

	if (mem_cgroup_disabled())
		return;

	pc = lookup_page_cgroup(oldpage);
	/* fix accounting on old pages */
	lock_page_cgroup(pc);
4282 4283
	if (PageCgroupUsed(pc)) {
		memcg = pc->mem_cgroup;
4284
		mem_cgroup_charge_statistics(memcg, oldpage, false, -1);
4285 4286
		ClearPageCgroupUsed(pc);
	}
4287 4288
	unlock_page_cgroup(pc);

4289 4290 4291 4292 4293 4294
	/*
	 * When called from shmem_replace_page(), in some cases the
	 * oldpage has already been charged, and in some cases not.
	 */
	if (!memcg)
		return;
4295 4296 4297 4298 4299
	/*
	 * Even if newpage->mapping was NULL before starting replacement,
	 * the newpage may be on LRU(or pagevec for LRU) already. We lock
	 * LRU while we overwrite pc->mem_cgroup.
	 */
4300
	__mem_cgroup_commit_charge(memcg, newpage, 1, type, true);
4301 4302
}

4303 4304 4305 4306 4307 4308
#ifdef CONFIG_DEBUG_VM
static struct page_cgroup *lookup_page_cgroup_used(struct page *page)
{
	struct page_cgroup *pc;

	pc = lookup_page_cgroup(page);
4309 4310 4311 4312 4313
	/*
	 * Can be NULL while feeding pages into the page allocator for
	 * the first time, i.e. during boot or memory hotplug;
	 * or when mem_cgroup_disabled().
	 */
4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332
	if (likely(pc) && PageCgroupUsed(pc))
		return pc;
	return NULL;
}

bool mem_cgroup_bad_page_check(struct page *page)
{
	if (mem_cgroup_disabled())
		return false;

	return lookup_page_cgroup_used(page) != NULL;
}

void mem_cgroup_print_bad_page(struct page *page)
{
	struct page_cgroup *pc;

	pc = lookup_page_cgroup_used(page);
	if (pc) {
4333 4334
		pr_alert("pc:%p pc->flags:%lx pc->mem_cgroup:%p\n",
			 pc, pc->flags, pc->mem_cgroup);
4335 4336 4337 4338
	}
}
#endif

4339
static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
4340
				unsigned long long val)
4341
{
4342
	int retry_count;
4343
	u64 memswlimit, memlimit;
4344
	int ret = 0;
4345 4346
	int children = mem_cgroup_count_children(memcg);
	u64 curusage, oldusage;
4347
	int enlarge;
4348 4349 4350 4351 4352 4353 4354 4355 4356

	/*
	 * For keeping hierarchical_reclaim simple, how long we should retry
	 * is depends on callers. We set our retry-count to be function
	 * of # of children which we should visit in this loop.
	 */
	retry_count = MEM_CGROUP_RECLAIM_RETRIES * children;

	oldusage = res_counter_read_u64(&memcg->res, RES_USAGE);
4357

4358
	enlarge = 0;
4359
	while (retry_count) {
4360 4361 4362 4363
		if (signal_pending(current)) {
			ret = -EINTR;
			break;
		}
4364 4365 4366
		/*
		 * Rather than hide all in some function, I do this in
		 * open coded manner. You see what this really does.
4367
		 * We have to guarantee memcg->res.limit <= memcg->memsw.limit.
4368 4369 4370 4371 4372 4373
		 */
		mutex_lock(&set_limit_mutex);
		memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
		if (memswlimit < val) {
			ret = -EINVAL;
			mutex_unlock(&set_limit_mutex);
4374 4375
			break;
		}
4376 4377 4378 4379 4380

		memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
		if (memlimit < val)
			enlarge = 1;

4381
		ret = res_counter_set_limit(&memcg->res, val);
4382 4383 4384 4385 4386 4387
		if (!ret) {
			if (memswlimit == val)
				memcg->memsw_is_minimum = true;
			else
				memcg->memsw_is_minimum = false;
		}
4388 4389 4390 4391 4392
		mutex_unlock(&set_limit_mutex);

		if (!ret)
			break;

4393 4394
		mem_cgroup_reclaim(memcg, GFP_KERNEL,
				   MEM_CGROUP_RECLAIM_SHRINK);
4395 4396 4397 4398 4399 4400
		curusage = res_counter_read_u64(&memcg->res, RES_USAGE);
		/* Usage is reduced ? */
  		if (curusage >= oldusage)
			retry_count--;
		else
			oldusage = curusage;
4401
	}
4402 4403
	if (!ret && enlarge)
		memcg_oom_recover(memcg);
4404

4405 4406 4407
	return ret;
}

L
Li Zefan 已提交
4408 4409
static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
					unsigned long long val)
4410
{
4411
	int retry_count;
4412
	u64 memlimit, memswlimit, oldusage, curusage;
4413 4414
	int children = mem_cgroup_count_children(memcg);
	int ret = -EBUSY;
4415
	int enlarge = 0;
4416

4417 4418 4419
	/* see mem_cgroup_resize_res_limit */
 	retry_count = children * MEM_CGROUP_RECLAIM_RETRIES;
	oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
4420 4421 4422 4423 4424 4425 4426 4427
	while (retry_count) {
		if (signal_pending(current)) {
			ret = -EINTR;
			break;
		}
		/*
		 * Rather than hide all in some function, I do this in
		 * open coded manner. You see what this really does.
4428
		 * We have to guarantee memcg->res.limit <= memcg->memsw.limit.
4429 4430 4431 4432 4433 4434 4435 4436
		 */
		mutex_lock(&set_limit_mutex);
		memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
		if (memlimit > val) {
			ret = -EINVAL;
			mutex_unlock(&set_limit_mutex);
			break;
		}
4437 4438 4439
		memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
		if (memswlimit < val)
			enlarge = 1;
4440
		ret = res_counter_set_limit(&memcg->memsw, val);
4441 4442 4443 4444 4445 4446
		if (!ret) {
			if (memlimit == val)
				memcg->memsw_is_minimum = true;
			else
				memcg->memsw_is_minimum = false;
		}
4447 4448 4449 4450 4451
		mutex_unlock(&set_limit_mutex);

		if (!ret)
			break;

4452 4453 4454
		mem_cgroup_reclaim(memcg, GFP_KERNEL,
				   MEM_CGROUP_RECLAIM_NOSWAP |
				   MEM_CGROUP_RECLAIM_SHRINK);
4455
		curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
4456
		/* Usage is reduced ? */
4457
		if (curusage >= oldusage)
4458
			retry_count--;
4459 4460
		else
			oldusage = curusage;
4461
	}
4462 4463
	if (!ret && enlarge)
		memcg_oom_recover(memcg);
4464 4465 4466
	return ret;
}

4467 4468 4469 4470 4471 4472 4473
/**
 * mem_cgroup_force_empty_list - clears LRU of a group
 * @memcg: group to clear
 * @node: NUMA node
 * @zid: zone id
 * @lru: lru to to clear
 *
4474
 * Traverse a specified page_cgroup list and try to drop them all.  This doesn't
4475 4476
 * reclaim the pages page themselves - pages are moved to the parent (or root)
 * group.
4477
 */
4478
static void mem_cgroup_force_empty_list(struct mem_cgroup *memcg,
K
KAMEZAWA Hiroyuki 已提交
4479
				int node, int zid, enum lru_list lru)
4480
{
4481
	struct lruvec *lruvec;
4482
	unsigned long flags;
4483
	struct list_head *list;
4484 4485
	struct page *busy;
	struct zone *zone;
4486

K
KAMEZAWA Hiroyuki 已提交
4487
	zone = &NODE_DATA(node)->node_zones[zid];
4488 4489
	lruvec = mem_cgroup_zone_lruvec(zone, memcg);
	list = &lruvec->lists[lru];
4490

4491
	busy = NULL;
4492
	do {
4493
		struct page_cgroup *pc;
4494 4495
		struct page *page;

K
KAMEZAWA Hiroyuki 已提交
4496
		spin_lock_irqsave(&zone->lru_lock, flags);
4497
		if (list_empty(list)) {
K
KAMEZAWA Hiroyuki 已提交
4498
			spin_unlock_irqrestore(&zone->lru_lock, flags);
4499
			break;
4500
		}
4501 4502 4503
		page = list_entry(list->prev, struct page, lru);
		if (busy == page) {
			list_move(&page->lru, list);
4504
			busy = NULL;
K
KAMEZAWA Hiroyuki 已提交
4505
			spin_unlock_irqrestore(&zone->lru_lock, flags);
4506 4507
			continue;
		}
K
KAMEZAWA Hiroyuki 已提交
4508
		spin_unlock_irqrestore(&zone->lru_lock, flags);
4509

4510
		pc = lookup_page_cgroup(page);
4511

4512
		if (mem_cgroup_move_parent(page, pc, memcg)) {
4513
			/* found lock contention or "pc" is obsolete. */
4514
			busy = page;
4515 4516 4517
			cond_resched();
		} else
			busy = NULL;
4518
	} while (!list_empty(list));
4519 4520 4521
}

/*
4522 4523
 * make mem_cgroup's charge to be 0 if there is no task by moving
 * all the charges and pages to the parent.
4524
 * This enables deleting this mem_cgroup.
4525 4526
 *
 * Caller is responsible for holding css reference on the memcg.
4527
 */
4528
static void mem_cgroup_reparent_charges(struct mem_cgroup *memcg)
4529
{
4530
	int node, zid;
4531
	u64 usage;
4532

4533
	do {
4534 4535
		/* This is for making all *used* pages to be on LRU. */
		lru_add_drain_all();
4536 4537
		drain_all_stock_sync(memcg);
		mem_cgroup_start_move(memcg);
4538
		for_each_node_state(node, N_MEMORY) {
4539
			for (zid = 0; zid < MAX_NR_ZONES; zid++) {
H
Hugh Dickins 已提交
4540 4541
				enum lru_list lru;
				for_each_lru(lru) {
4542
					mem_cgroup_force_empty_list(memcg,
H
Hugh Dickins 已提交
4543
							node, zid, lru);
4544
				}
4545
			}
4546
		}
4547 4548
		mem_cgroup_end_move(memcg);
		memcg_oom_recover(memcg);
4549
		cond_resched();
4550

4551
		/*
4552 4553 4554 4555 4556
		 * Kernel memory may not necessarily be trackable to a specific
		 * process. So they are not migrated, and therefore we can't
		 * expect their value to drop to 0 here.
		 * Having res filled up with kmem only is enough.
		 *
4557 4558 4559 4560 4561 4562
		 * This is a safety check because mem_cgroup_force_empty_list
		 * could have raced with mem_cgroup_replace_page_cache callers
		 * so the lru seemed empty but the page could have been added
		 * right after the check. RES_USAGE should be safe as we always
		 * charge before adding to the LRU.
		 */
4563 4564 4565
		usage = res_counter_read_u64(&memcg->res, RES_USAGE) -
			res_counter_read_u64(&memcg->kmem, RES_USAGE);
	} while (usage > 0);
4566 4567
}

4568 4569 4570 4571 4572 4573 4574
/*
 * This mainly exists for tests during the setting of set of use_hierarchy.
 * Since this is the very setting we are changing, the current hierarchy value
 * is meaningless
 */
static inline bool __memcg_has_children(struct mem_cgroup *memcg)
{
4575
	struct cgroup_subsys_state *pos;
4576 4577

	/* bounce at first found */
4578
	css_for_each_child(pos, &memcg->css)
4579 4580 4581 4582 4583
		return true;
	return false;
}

/*
4584 4585
 * Must be called with memcg_create_mutex held, unless the cgroup is guaranteed
 * to be already dead (as in mem_cgroup_force_empty, for instance).  This is
4586 4587 4588 4589 4590 4591 4592 4593 4594
 * from mem_cgroup_count_children(), in the sense that we don't really care how
 * many children we have; we only need to know if we have any.  It also counts
 * any memcg without hierarchy as infertile.
 */
static inline bool memcg_has_children(struct mem_cgroup *memcg)
{
	return memcg->use_hierarchy && __memcg_has_children(memcg);
}

4595 4596 4597 4598 4599 4600 4601 4602 4603 4604
/*
 * Reclaims as many pages from the given memcg as possible and moves
 * the rest to the parent.
 *
 * Caller is responsible for holding css reference for memcg.
 */
static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
{
	int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
	struct cgroup *cgrp = memcg->css.cgroup;
4605

4606
	/* returns EBUSY if there is a task or if we come here twice. */
4607 4608 4609
	if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children))
		return -EBUSY;

4610 4611
	/* we call try-to-free pages for make this cgroup empty */
	lru_add_drain_all();
4612
	/* try to free all pages in this cgroup */
4613
	while (nr_retries && res_counter_read_u64(&memcg->res, RES_USAGE) > 0) {
4614
		int progress;
4615

4616 4617 4618
		if (signal_pending(current))
			return -EINTR;

4619
		progress = try_to_free_mem_cgroup_pages(memcg, GFP_KERNEL,
4620
						false);
4621
		if (!progress) {
4622
			nr_retries--;
4623
			/* maybe some writeback is necessary */
4624
			congestion_wait(BLK_RW_ASYNC, HZ/10);
4625
		}
4626 4627

	}
K
KAMEZAWA Hiroyuki 已提交
4628
	lru_add_drain();
4629 4630 4631
	mem_cgroup_reparent_charges(memcg);

	return 0;
4632 4633
}

4634 4635
static int mem_cgroup_force_empty_write(struct cgroup_subsys_state *css,
					unsigned int event)
4636
{
4637
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4638

4639 4640
	if (mem_cgroup_is_root(memcg))
		return -EINVAL;
4641
	return mem_cgroup_force_empty(memcg);
4642 4643
}

4644 4645
static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
				     struct cftype *cft)
4646
{
4647
	return mem_cgroup_from_css(css)->use_hierarchy;
4648 4649
}

4650 4651
static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
				      struct cftype *cft, u64 val)
4652 4653
{
	int retval = 0;
4654
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
T
Tejun Heo 已提交
4655
	struct mem_cgroup *parent_memcg = mem_cgroup_from_css(css_parent(&memcg->css));
4656

4657
	mutex_lock(&memcg_create_mutex);
4658 4659 4660 4661

	if (memcg->use_hierarchy == val)
		goto out;

4662
	/*
4663
	 * If parent's use_hierarchy is set, we can't make any modifications
4664 4665 4666 4667 4668 4669
	 * in the child subtrees. If it is unset, then the change can
	 * occur, provided the current cgroup has no children.
	 *
	 * For the root cgroup, parent_mem is NULL, we allow value to be
	 * set if there are no children.
	 */
4670
	if ((!parent_memcg || !parent_memcg->use_hierarchy) &&
4671
				(val == 1 || val == 0)) {
4672
		if (!__memcg_has_children(memcg))
4673
			memcg->use_hierarchy = val;
4674 4675 4676 4677
		else
			retval = -EBUSY;
	} else
		retval = -EINVAL;
4678 4679

out:
4680
	mutex_unlock(&memcg_create_mutex);
4681 4682 4683 4684

	return retval;
}

4685

4686
static unsigned long mem_cgroup_recursive_stat(struct mem_cgroup *memcg,
4687
					       enum mem_cgroup_stat_index idx)
4688
{
K
KAMEZAWA Hiroyuki 已提交
4689
	struct mem_cgroup *iter;
4690
	long val = 0;
4691

4692
	/* Per-cpu values can be negative, use a signed accumulator */
4693
	for_each_mem_cgroup_tree(iter, memcg)
K
KAMEZAWA Hiroyuki 已提交
4694 4695 4696 4697 4698
		val += mem_cgroup_read_stat(iter, idx);

	if (val < 0) /* race ? */
		val = 0;
	return val;
4699 4700
}

4701
static inline u64 mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
4702
{
K
KAMEZAWA Hiroyuki 已提交
4703
	u64 val;
4704

4705
	if (!mem_cgroup_is_root(memcg)) {
4706
		if (!swap)
4707
			return res_counter_read_u64(&memcg->res, RES_USAGE);
4708
		else
4709
			return res_counter_read_u64(&memcg->memsw, RES_USAGE);
4710 4711
	}

4712 4713 4714 4715
	/*
	 * Transparent hugepages are still accounted for in MEM_CGROUP_STAT_RSS
	 * as well as in MEM_CGROUP_STAT_RSS_HUGE.
	 */
4716 4717
	val = mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_CACHE);
	val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_RSS);
4718

K
KAMEZAWA Hiroyuki 已提交
4719
	if (swap)
4720
		val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_SWAP);
4721 4722 4723 4724

	return val << PAGE_SHIFT;
}

4725 4726 4727
static ssize_t mem_cgroup_read(struct cgroup_subsys_state *css,
			       struct cftype *cft, struct file *file,
			       char __user *buf, size_t nbytes, loff_t *ppos)
B
Balbir Singh 已提交
4728
{
4729
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4730
	char str[64];
4731
	u64 val;
G
Glauber Costa 已提交
4732 4733
	int name, len;
	enum res_type type;
4734 4735 4736

	type = MEMFILE_TYPE(cft->private);
	name = MEMFILE_ATTR(cft->private);
4737

4738 4739
	switch (type) {
	case _MEM:
4740
		if (name == RES_USAGE)
4741
			val = mem_cgroup_usage(memcg, false);
4742
		else
4743
			val = res_counter_read_u64(&memcg->res, name);
4744 4745
		break;
	case _MEMSWAP:
4746
		if (name == RES_USAGE)
4747
			val = mem_cgroup_usage(memcg, true);
4748
		else
4749
			val = res_counter_read_u64(&memcg->memsw, name);
4750
		break;
4751 4752 4753
	case _KMEM:
		val = res_counter_read_u64(&memcg->kmem, name);
		break;
4754 4755 4756
	default:
		BUG();
	}
4757 4758 4759

	len = scnprintf(str, sizeof(str), "%llu\n", (unsigned long long)val);
	return simple_read_from_buffer(buf, nbytes, ppos, str, len);
B
Balbir Singh 已提交
4760
}
4761

4762
static int memcg_update_kmem_limit(struct cgroup_subsys_state *css, u64 val)
4763 4764 4765
{
	int ret = -EINVAL;
#ifdef CONFIG_MEMCG_KMEM
4766
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778
	/*
	 * For simplicity, we won't allow this to be disabled.  It also can't
	 * be changed if the cgroup has children already, or if tasks had
	 * already joined.
	 *
	 * If tasks join before we set the limit, a person looking at
	 * kmem.usage_in_bytes will have no way to determine when it took
	 * place, which makes the value quite meaningless.
	 *
	 * After it first became limited, changes in the value of the limit are
	 * of course permitted.
	 */
4779
	mutex_lock(&memcg_create_mutex);
4780 4781
	mutex_lock(&set_limit_mutex);
	if (!memcg->kmem_account_flags && val != RESOURCE_MAX) {
4782
		if (cgroup_task_count(css->cgroup) || memcg_has_children(memcg)) {
4783 4784 4785 4786 4787 4788
			ret = -EBUSY;
			goto out;
		}
		ret = res_counter_set_limit(&memcg->kmem, val);
		VM_BUG_ON(ret);

4789 4790 4791 4792 4793
		ret = memcg_update_cache_sizes(memcg);
		if (ret) {
			res_counter_set_limit(&memcg->kmem, RESOURCE_MAX);
			goto out;
		}
4794 4795 4796 4797 4798 4799
		static_key_slow_inc(&memcg_kmem_enabled_key);
		/*
		 * setting the active bit after the inc will guarantee no one
		 * starts accounting before all call sites are patched
		 */
		memcg_kmem_set_active(memcg);
4800 4801 4802 4803
	} else
		ret = res_counter_set_limit(&memcg->kmem, val);
out:
	mutex_unlock(&set_limit_mutex);
4804
	mutex_unlock(&memcg_create_mutex);
4805 4806 4807 4808
#endif
	return ret;
}

4809
#ifdef CONFIG_MEMCG_KMEM
4810
static int memcg_propagate_kmem(struct mem_cgroup *memcg)
4811
{
4812
	int ret = 0;
4813 4814
	struct mem_cgroup *parent = parent_mem_cgroup(memcg);
	if (!parent)
4815 4816
		goto out;

4817
	memcg->kmem_account_flags = parent->kmem_account_flags;
4818 4819 4820 4821 4822 4823 4824 4825 4826 4827
	/*
	 * When that happen, we need to disable the static branch only on those
	 * memcgs that enabled it. To achieve this, we would be forced to
	 * complicate the code by keeping track of which memcgs were the ones
	 * that actually enabled limits, and which ones got it from its
	 * parents.
	 *
	 * It is a lot simpler just to do static_key_slow_inc() on every child
	 * that is accounted.
	 */
4828 4829 4830 4831
	if (!memcg_kmem_is_active(memcg))
		goto out;

	/*
4832 4833 4834
	 * __mem_cgroup_free() will issue static_key_slow_dec() because this
	 * memcg is active already. If the later initialization fails then the
	 * cgroup core triggers the cleanup so we do not have to do it here.
4835 4836 4837 4838
	 */
	static_key_slow_inc(&memcg_kmem_enabled_key);

	mutex_lock(&set_limit_mutex);
4839
	memcg_stop_kmem_account();
4840
	ret = memcg_update_cache_sizes(memcg);
4841
	memcg_resume_kmem_account();
4842 4843 4844
	mutex_unlock(&set_limit_mutex);
out:
	return ret;
4845
}
4846
#endif /* CONFIG_MEMCG_KMEM */
4847

4848 4849 4850 4851
/*
 * The user of this function is...
 * RES_LIMIT.
 */
4852
static int mem_cgroup_write(struct cgroup_subsys_state *css, struct cftype *cft,
4853
			    const char *buffer)
B
Balbir Singh 已提交
4854
{
4855
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
G
Glauber Costa 已提交
4856 4857
	enum res_type type;
	int name;
4858 4859 4860
	unsigned long long val;
	int ret;

4861 4862
	type = MEMFILE_TYPE(cft->private);
	name = MEMFILE_ATTR(cft->private);
4863

4864
	switch (name) {
4865
	case RES_LIMIT:
4866 4867 4868 4869
		if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
			ret = -EINVAL;
			break;
		}
4870 4871
		/* This function does all necessary parse...reuse it */
		ret = res_counter_memparse_write_strategy(buffer, &val);
4872 4873 4874
		if (ret)
			break;
		if (type == _MEM)
4875
			ret = mem_cgroup_resize_limit(memcg, val);
4876
		else if (type == _MEMSWAP)
4877
			ret = mem_cgroup_resize_memsw_limit(memcg, val);
4878
		else if (type == _KMEM)
4879
			ret = memcg_update_kmem_limit(css, val);
4880 4881
		else
			return -EINVAL;
4882
		break;
4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896
	case RES_SOFT_LIMIT:
		ret = res_counter_memparse_write_strategy(buffer, &val);
		if (ret)
			break;
		/*
		 * For memsw, soft limits are hard to implement in terms
		 * of semantics, for now, we support soft limits for
		 * control without swap
		 */
		if (type == _MEM)
			ret = res_counter_set_soft_limit(&memcg->res, val);
		else
			ret = -EINVAL;
		break;
4897 4898 4899 4900 4901
	default:
		ret = -EINVAL; /* should be BUG() ? */
		break;
	}
	return ret;
B
Balbir Singh 已提交
4902 4903
}

4904 4905 4906 4907 4908 4909 4910 4911 4912 4913
static void memcg_get_hierarchical_limit(struct mem_cgroup *memcg,
		unsigned long long *mem_limit, unsigned long long *memsw_limit)
{
	unsigned long long min_limit, min_memsw_limit, tmp;

	min_limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
	min_memsw_limit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
	if (!memcg->use_hierarchy)
		goto out;

T
Tejun Heo 已提交
4914 4915
	while (css_parent(&memcg->css)) {
		memcg = mem_cgroup_from_css(css_parent(&memcg->css));
4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927
		if (!memcg->use_hierarchy)
			break;
		tmp = res_counter_read_u64(&memcg->res, RES_LIMIT);
		min_limit = min(min_limit, tmp);
		tmp = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
		min_memsw_limit = min(min_memsw_limit, tmp);
	}
out:
	*mem_limit = min_limit;
	*memsw_limit = min_memsw_limit;
}

4928
static int mem_cgroup_reset(struct cgroup_subsys_state *css, unsigned int event)
4929
{
4930
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
G
Glauber Costa 已提交
4931 4932
	int name;
	enum res_type type;
4933

4934 4935
	type = MEMFILE_TYPE(event);
	name = MEMFILE_ATTR(event);
4936

4937
	switch (name) {
4938
	case RES_MAX_USAGE:
4939
		if (type == _MEM)
4940
			res_counter_reset_max(&memcg->res);
4941
		else if (type == _MEMSWAP)
4942
			res_counter_reset_max(&memcg->memsw);
4943 4944 4945 4946
		else if (type == _KMEM)
			res_counter_reset_max(&memcg->kmem);
		else
			return -EINVAL;
4947 4948
		break;
	case RES_FAILCNT:
4949
		if (type == _MEM)
4950
			res_counter_reset_failcnt(&memcg->res);
4951
		else if (type == _MEMSWAP)
4952
			res_counter_reset_failcnt(&memcg->memsw);
4953 4954 4955 4956
		else if (type == _KMEM)
			res_counter_reset_failcnt(&memcg->kmem);
		else
			return -EINVAL;
4957 4958
		break;
	}
4959

4960
	return 0;
4961 4962
}

4963
static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
4964 4965
					struct cftype *cft)
{
4966
	return mem_cgroup_from_css(css)->move_charge_at_immigrate;
4967 4968
}

4969
#ifdef CONFIG_MMU
4970
static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
4971 4972
					struct cftype *cft, u64 val)
{
4973
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4974 4975 4976

	if (val >= (1 << NR_MOVE_TYPE))
		return -EINVAL;
4977

4978
	/*
4979 4980 4981 4982
	 * No kind of locking is needed in here, because ->can_attach() will
	 * check this value once in the beginning of the process, and then carry
	 * on with stale data. This means that changes to this value will only
	 * affect task migrations starting after the change.
4983
	 */
4984
	memcg->move_charge_at_immigrate = val;
4985 4986
	return 0;
}
4987
#else
4988
static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
4989 4990 4991 4992 4993
					struct cftype *cft, u64 val)
{
	return -ENOSYS;
}
#endif
4994

4995
#ifdef CONFIG_NUMA
4996 4997
static int memcg_numa_stat_show(struct cgroup_subsys_state *css,
				struct cftype *cft, struct seq_file *m)
4998 4999 5000 5001
{
	int nid;
	unsigned long total_nr, file_nr, anon_nr, unevictable_nr;
	unsigned long node_nr;
5002
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5003

5004
	total_nr = mem_cgroup_nr_lru_pages(memcg, LRU_ALL);
5005
	seq_printf(m, "total=%lu", total_nr);
5006
	for_each_node_state(nid, N_MEMORY) {
5007
		node_nr = mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL);
5008 5009 5010 5011
		seq_printf(m, " N%d=%lu", nid, node_nr);
	}
	seq_putc(m, '\n');

5012
	file_nr = mem_cgroup_nr_lru_pages(memcg, LRU_ALL_FILE);
5013
	seq_printf(m, "file=%lu", file_nr);
5014
	for_each_node_state(nid, N_MEMORY) {
5015
		node_nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
5016
				LRU_ALL_FILE);
5017 5018 5019 5020
		seq_printf(m, " N%d=%lu", nid, node_nr);
	}
	seq_putc(m, '\n');

5021
	anon_nr = mem_cgroup_nr_lru_pages(memcg, LRU_ALL_ANON);
5022
	seq_printf(m, "anon=%lu", anon_nr);
5023
	for_each_node_state(nid, N_MEMORY) {
5024
		node_nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
5025
				LRU_ALL_ANON);
5026 5027 5028 5029
		seq_printf(m, " N%d=%lu", nid, node_nr);
	}
	seq_putc(m, '\n');

5030
	unevictable_nr = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_UNEVICTABLE));
5031
	seq_printf(m, "unevictable=%lu", unevictable_nr);
5032
	for_each_node_state(nid, N_MEMORY) {
5033
		node_nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
5034
				BIT(LRU_UNEVICTABLE));
5035 5036 5037 5038 5039 5040 5041
		seq_printf(m, " N%d=%lu", nid, node_nr);
	}
	seq_putc(m, '\n');
	return 0;
}
#endif /* CONFIG_NUMA */

5042 5043 5044 5045 5046
static inline void mem_cgroup_lru_names_not_uptodate(void)
{
	BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS);
}

5047
static int memcg_stat_show(struct cgroup_subsys_state *css, struct cftype *cft,
5048
				 struct seq_file *m)
5049
{
5050
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5051 5052
	struct mem_cgroup *mi;
	unsigned int i;
5053

5054
	for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
5055
		if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
5056
			continue;
5057 5058
		seq_printf(m, "%s %ld\n", mem_cgroup_stat_names[i],
			   mem_cgroup_read_stat(memcg, i) * PAGE_SIZE);
5059
	}
L
Lee Schermerhorn 已提交
5060

5061 5062 5063 5064 5065 5066 5067 5068
	for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++)
		seq_printf(m, "%s %lu\n", mem_cgroup_events_names[i],
			   mem_cgroup_read_events(memcg, i));

	for (i = 0; i < NR_LRU_LISTS; i++)
		seq_printf(m, "%s %lu\n", mem_cgroup_lru_names[i],
			   mem_cgroup_nr_lru_pages(memcg, BIT(i)) * PAGE_SIZE);

K
KAMEZAWA Hiroyuki 已提交
5069
	/* Hierarchical information */
5070 5071
	{
		unsigned long long limit, memsw_limit;
5072
		memcg_get_hierarchical_limit(memcg, &limit, &memsw_limit);
5073
		seq_printf(m, "hierarchical_memory_limit %llu\n", limit);
5074
		if (do_swap_account)
5075 5076
			seq_printf(m, "hierarchical_memsw_limit %llu\n",
				   memsw_limit);
5077
	}
K
KOSAKI Motohiro 已提交
5078

5079 5080 5081
	for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
		long long val = 0;

5082
		if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
5083
			continue;
5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103
		for_each_mem_cgroup_tree(mi, memcg)
			val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE;
		seq_printf(m, "total_%s %lld\n", mem_cgroup_stat_names[i], val);
	}

	for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
		unsigned long long val = 0;

		for_each_mem_cgroup_tree(mi, memcg)
			val += mem_cgroup_read_events(mi, i);
		seq_printf(m, "total_%s %llu\n",
			   mem_cgroup_events_names[i], val);
	}

	for (i = 0; i < NR_LRU_LISTS; i++) {
		unsigned long long val = 0;

		for_each_mem_cgroup_tree(mi, memcg)
			val += mem_cgroup_nr_lru_pages(mi, BIT(i)) * PAGE_SIZE;
		seq_printf(m, "total_%s %llu\n", mem_cgroup_lru_names[i], val);
5104
	}
K
KAMEZAWA Hiroyuki 已提交
5105

K
KOSAKI Motohiro 已提交
5106 5107 5108 5109
#ifdef CONFIG_DEBUG_VM
	{
		int nid, zid;
		struct mem_cgroup_per_zone *mz;
5110
		struct zone_reclaim_stat *rstat;
K
KOSAKI Motohiro 已提交
5111 5112 5113 5114 5115
		unsigned long recent_rotated[2] = {0, 0};
		unsigned long recent_scanned[2] = {0, 0};

		for_each_online_node(nid)
			for (zid = 0; zid < MAX_NR_ZONES; zid++) {
5116
				mz = mem_cgroup_zoneinfo(memcg, nid, zid);
5117
				rstat = &mz->lruvec.reclaim_stat;
K
KOSAKI Motohiro 已提交
5118

5119 5120 5121 5122
				recent_rotated[0] += rstat->recent_rotated[0];
				recent_rotated[1] += rstat->recent_rotated[1];
				recent_scanned[0] += rstat->recent_scanned[0];
				recent_scanned[1] += rstat->recent_scanned[1];
K
KOSAKI Motohiro 已提交
5123
			}
5124 5125 5126 5127
		seq_printf(m, "recent_rotated_anon %lu\n", recent_rotated[0]);
		seq_printf(m, "recent_rotated_file %lu\n", recent_rotated[1]);
		seq_printf(m, "recent_scanned_anon %lu\n", recent_scanned[0]);
		seq_printf(m, "recent_scanned_file %lu\n", recent_scanned[1]);
K
KOSAKI Motohiro 已提交
5128 5129 5130
	}
#endif

5131 5132 5133
	return 0;
}

5134 5135
static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
				      struct cftype *cft)
K
KOSAKI Motohiro 已提交
5136
{
5137
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
K
KOSAKI Motohiro 已提交
5138

5139
	return mem_cgroup_swappiness(memcg);
K
KOSAKI Motohiro 已提交
5140 5141
}

5142 5143
static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
				       struct cftype *cft, u64 val)
K
KOSAKI Motohiro 已提交
5144
{
5145
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
T
Tejun Heo 已提交
5146
	struct mem_cgroup *parent = mem_cgroup_from_css(css_parent(&memcg->css));
K
KOSAKI Motohiro 已提交
5147

T
Tejun Heo 已提交
5148
	if (val > 100 || !parent)
K
KOSAKI Motohiro 已提交
5149 5150
		return -EINVAL;

5151
	mutex_lock(&memcg_create_mutex);
5152

K
KOSAKI Motohiro 已提交
5153
	/* If under hierarchy, only empty-root can set this value */
5154
	if ((parent->use_hierarchy) || memcg_has_children(memcg)) {
5155
		mutex_unlock(&memcg_create_mutex);
K
KOSAKI Motohiro 已提交
5156
		return -EINVAL;
5157
	}
K
KOSAKI Motohiro 已提交
5158 5159 5160

	memcg->swappiness = val;

5161
	mutex_unlock(&memcg_create_mutex);
5162

K
KOSAKI Motohiro 已提交
5163 5164 5165
	return 0;
}

5166 5167 5168 5169 5170 5171 5172 5173
static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
{
	struct mem_cgroup_threshold_ary *t;
	u64 usage;
	int i;

	rcu_read_lock();
	if (!swap)
5174
		t = rcu_dereference(memcg->thresholds.primary);
5175
	else
5176
		t = rcu_dereference(memcg->memsw_thresholds.primary);
5177 5178 5179 5180 5181 5182 5183

	if (!t)
		goto unlock;

	usage = mem_cgroup_usage(memcg, swap);

	/*
5184
	 * current_threshold points to threshold just below or equal to usage.
5185 5186 5187
	 * If it's not true, a threshold was crossed after last
	 * call of __mem_cgroup_threshold().
	 */
5188
	i = t->current_threshold;
5189 5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211

	/*
	 * Iterate backward over array of thresholds starting from
	 * current_threshold and check if a threshold is crossed.
	 * If none of thresholds below usage is crossed, we read
	 * only one element of the array here.
	 */
	for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
		eventfd_signal(t->entries[i].eventfd, 1);

	/* i = current_threshold + 1 */
	i++;

	/*
	 * Iterate forward over array of thresholds starting from
	 * current_threshold+1 and check if a threshold is crossed.
	 * If none of thresholds above usage is crossed, we read
	 * only one element of the array here.
	 */
	for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
		eventfd_signal(t->entries[i].eventfd, 1);

	/* Update current_threshold */
5212
	t->current_threshold = i - 1;
5213 5214 5215 5216 5217 5218
unlock:
	rcu_read_unlock();
}

static void mem_cgroup_threshold(struct mem_cgroup *memcg)
{
5219 5220 5221 5222 5223 5224 5225
	while (memcg) {
		__mem_cgroup_threshold(memcg, false);
		if (do_swap_account)
			__mem_cgroup_threshold(memcg, true);

		memcg = parent_mem_cgroup(memcg);
	}
5226 5227 5228 5229 5230 5231 5232
}

static int compare_thresholds(const void *a, const void *b)
{
	const struct mem_cgroup_threshold *_a = a;
	const struct mem_cgroup_threshold *_b = b;

5233 5234 5235 5236 5237 5238 5239
	if (_a->threshold > _b->threshold)
		return 1;

	if (_a->threshold < _b->threshold)
		return -1;

	return 0;
5240 5241
}

5242
static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
K
KAMEZAWA Hiroyuki 已提交
5243 5244 5245
{
	struct mem_cgroup_eventfd_list *ev;

5246
	list_for_each_entry(ev, &memcg->oom_notify, list)
K
KAMEZAWA Hiroyuki 已提交
5247 5248 5249 5250
		eventfd_signal(ev->eventfd, 1);
	return 0;
}

5251
static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
K
KAMEZAWA Hiroyuki 已提交
5252
{
K
KAMEZAWA Hiroyuki 已提交
5253 5254
	struct mem_cgroup *iter;

5255
	for_each_mem_cgroup_tree(iter, memcg)
K
KAMEZAWA Hiroyuki 已提交
5256
		mem_cgroup_oom_notify_cb(iter);
K
KAMEZAWA Hiroyuki 已提交
5257 5258
}

5259
static int mem_cgroup_usage_register_event(struct cgroup_subsys_state *css,
K
KAMEZAWA Hiroyuki 已提交
5260
	struct cftype *cft, struct eventfd_ctx *eventfd, const char *args)
5261
{
5262
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5263 5264
	struct mem_cgroup_thresholds *thresholds;
	struct mem_cgroup_threshold_ary *new;
G
Glauber Costa 已提交
5265
	enum res_type type = MEMFILE_TYPE(cft->private);
5266
	u64 threshold, usage;
5267
	int i, size, ret;
5268 5269 5270 5271 5272 5273

	ret = res_counter_memparse_write_strategy(args, &threshold);
	if (ret)
		return ret;

	mutex_lock(&memcg->thresholds_lock);
5274

5275
	if (type == _MEM)
5276
		thresholds = &memcg->thresholds;
5277
	else if (type == _MEMSWAP)
5278
		thresholds = &memcg->memsw_thresholds;
5279 5280 5281 5282 5283 5284
	else
		BUG();

	usage = mem_cgroup_usage(memcg, type == _MEMSWAP);

	/* Check if a threshold crossed before adding a new one */
5285
	if (thresholds->primary)
5286 5287
		__mem_cgroup_threshold(memcg, type == _MEMSWAP);

5288
	size = thresholds->primary ? thresholds->primary->size + 1 : 1;
5289 5290

	/* Allocate memory for new array of thresholds */
5291
	new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold),
5292
			GFP_KERNEL);
5293
	if (!new) {
5294 5295 5296
		ret = -ENOMEM;
		goto unlock;
	}
5297
	new->size = size;
5298 5299

	/* Copy thresholds (if any) to new array */
5300 5301
	if (thresholds->primary) {
		memcpy(new->entries, thresholds->primary->entries, (size - 1) *
5302
				sizeof(struct mem_cgroup_threshold));
5303 5304
	}

5305
	/* Add new threshold */
5306 5307
	new->entries[size - 1].eventfd = eventfd;
	new->entries[size - 1].threshold = threshold;
5308 5309

	/* Sort thresholds. Registering of new threshold isn't time-critical */
5310
	sort(new->entries, size, sizeof(struct mem_cgroup_threshold),
5311 5312 5313
			compare_thresholds, NULL);

	/* Find current threshold */
5314
	new->current_threshold = -1;
5315
	for (i = 0; i < size; i++) {
5316
		if (new->entries[i].threshold <= usage) {
5317
			/*
5318 5319
			 * new->current_threshold will not be used until
			 * rcu_assign_pointer(), so it's safe to increment
5320 5321
			 * it here.
			 */
5322
			++new->current_threshold;
5323 5324
		} else
			break;
5325 5326
	}

5327 5328 5329 5330 5331
	/* Free old spare buffer and save old primary buffer as spare */
	kfree(thresholds->spare);
	thresholds->spare = thresholds->primary;

	rcu_assign_pointer(thresholds->primary, new);
5332

5333
	/* To be sure that nobody uses thresholds */
5334 5335 5336 5337 5338 5339 5340 5341
	synchronize_rcu();

unlock:
	mutex_unlock(&memcg->thresholds_lock);

	return ret;
}

5342
static void mem_cgroup_usage_unregister_event(struct cgroup_subsys_state *css,
K
KAMEZAWA Hiroyuki 已提交
5343
	struct cftype *cft, struct eventfd_ctx *eventfd)
5344
{
5345
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5346 5347
	struct mem_cgroup_thresholds *thresholds;
	struct mem_cgroup_threshold_ary *new;
G
Glauber Costa 已提交
5348
	enum res_type type = MEMFILE_TYPE(cft->private);
5349
	u64 usage;
5350
	int i, j, size;
5351 5352 5353

	mutex_lock(&memcg->thresholds_lock);
	if (type == _MEM)
5354
		thresholds = &memcg->thresholds;
5355
	else if (type == _MEMSWAP)
5356
		thresholds = &memcg->memsw_thresholds;
5357 5358 5359
	else
		BUG();

5360 5361 5362
	if (!thresholds->primary)
		goto unlock;

5363 5364 5365 5366 5367 5368
	usage = mem_cgroup_usage(memcg, type == _MEMSWAP);

	/* Check if a threshold crossed before removing */
	__mem_cgroup_threshold(memcg, type == _MEMSWAP);

	/* Calculate new number of threshold */
5369 5370 5371
	size = 0;
	for (i = 0; i < thresholds->primary->size; i++) {
		if (thresholds->primary->entries[i].eventfd != eventfd)
5372 5373 5374
			size++;
	}

5375
	new = thresholds->spare;
5376

5377 5378
	/* Set thresholds array to NULL if we don't have thresholds */
	if (!size) {
5379 5380
		kfree(new);
		new = NULL;
5381
		goto swap_buffers;
5382 5383
	}

5384
	new->size = size;
5385 5386

	/* Copy thresholds and find current threshold */
5387 5388 5389
	new->current_threshold = -1;
	for (i = 0, j = 0; i < thresholds->primary->size; i++) {
		if (thresholds->primary->entries[i].eventfd == eventfd)
5390 5391
			continue;

5392
		new->entries[j] = thresholds->primary->entries[i];
5393
		if (new->entries[j].threshold <= usage) {
5394
			/*
5395
			 * new->current_threshold will not be used
5396 5397 5398
			 * until rcu_assign_pointer(), so it's safe to increment
			 * it here.
			 */
5399
			++new->current_threshold;
5400 5401 5402 5403
		}
		j++;
	}

5404
swap_buffers:
5405 5406
	/* Swap primary and spare array */
	thresholds->spare = thresholds->primary;
5407 5408 5409 5410 5411 5412
	/* If all events are unregistered, free the spare array */
	if (!new) {
		kfree(thresholds->spare);
		thresholds->spare = NULL;
	}

5413
	rcu_assign_pointer(thresholds->primary, new);
5414

5415
	/* To be sure that nobody uses thresholds */
5416
	synchronize_rcu();
5417
unlock:
5418 5419
	mutex_unlock(&memcg->thresholds_lock);
}
5420

5421
static int mem_cgroup_oom_register_event(struct cgroup_subsys_state *css,
K
KAMEZAWA Hiroyuki 已提交
5422 5423
	struct cftype *cft, struct eventfd_ctx *eventfd, const char *args)
{
5424
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
K
KAMEZAWA Hiroyuki 已提交
5425
	struct mem_cgroup_eventfd_list *event;
G
Glauber Costa 已提交
5426
	enum res_type type = MEMFILE_TYPE(cft->private);
K
KAMEZAWA Hiroyuki 已提交
5427 5428 5429 5430 5431 5432

	BUG_ON(type != _OOM_TYPE);
	event = kmalloc(sizeof(*event),	GFP_KERNEL);
	if (!event)
		return -ENOMEM;

5433
	spin_lock(&memcg_oom_lock);
K
KAMEZAWA Hiroyuki 已提交
5434 5435 5436 5437 5438

	event->eventfd = eventfd;
	list_add(&event->list, &memcg->oom_notify);

	/* already in OOM ? */
5439
	if (atomic_read(&memcg->under_oom))
K
KAMEZAWA Hiroyuki 已提交
5440
		eventfd_signal(eventfd, 1);
5441
	spin_unlock(&memcg_oom_lock);
K
KAMEZAWA Hiroyuki 已提交
5442 5443 5444 5445

	return 0;
}

5446
static void mem_cgroup_oom_unregister_event(struct cgroup_subsys_state *css,
K
KAMEZAWA Hiroyuki 已提交
5447 5448
	struct cftype *cft, struct eventfd_ctx *eventfd)
{
5449
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
K
KAMEZAWA Hiroyuki 已提交
5450
	struct mem_cgroup_eventfd_list *ev, *tmp;
G
Glauber Costa 已提交
5451
	enum res_type type = MEMFILE_TYPE(cft->private);
K
KAMEZAWA Hiroyuki 已提交
5452 5453 5454

	BUG_ON(type != _OOM_TYPE);

5455
	spin_lock(&memcg_oom_lock);
K
KAMEZAWA Hiroyuki 已提交
5456

5457
	list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
K
KAMEZAWA Hiroyuki 已提交
5458 5459 5460 5461 5462 5463
		if (ev->eventfd == eventfd) {
			list_del(&ev->list);
			kfree(ev);
		}
	}

5464
	spin_unlock(&memcg_oom_lock);
K
KAMEZAWA Hiroyuki 已提交
5465 5466
}

5467
static int mem_cgroup_oom_control_read(struct cgroup_subsys_state *css,
5468 5469
	struct cftype *cft,  struct cgroup_map_cb *cb)
{
5470
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5471

5472
	cb->fill(cb, "oom_kill_disable", memcg->oom_kill_disable);
5473

5474
	if (atomic_read(&memcg->under_oom))
5475 5476 5477 5478 5479 5480
		cb->fill(cb, "under_oom", 1);
	else
		cb->fill(cb, "under_oom", 0);
	return 0;
}

5481
static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
5482 5483
	struct cftype *cft, u64 val)
{
5484
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
T
Tejun Heo 已提交
5485
	struct mem_cgroup *parent = mem_cgroup_from_css(css_parent(&memcg->css));
5486 5487

	/* cannot set to root cgroup and only 0 and 1 are allowed */
T
Tejun Heo 已提交
5488
	if (!parent || !((val == 0) || (val == 1)))
5489 5490
		return -EINVAL;

5491
	mutex_lock(&memcg_create_mutex);
5492
	/* oom-kill-disable is a flag for subhierarchy. */
5493
	if ((parent->use_hierarchy) || memcg_has_children(memcg)) {
5494
		mutex_unlock(&memcg_create_mutex);
5495 5496
		return -EINVAL;
	}
5497
	memcg->oom_kill_disable = val;
5498
	if (!val)
5499
		memcg_oom_recover(memcg);
5500
	mutex_unlock(&memcg_create_mutex);
5501 5502 5503
	return 0;
}

A
Andrew Morton 已提交
5504
#ifdef CONFIG_MEMCG_KMEM
5505
static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
5506
{
5507 5508
	int ret;

5509
	memcg->kmemcg_id = -1;
5510 5511 5512
	ret = memcg_propagate_kmem(memcg);
	if (ret)
		return ret;
5513

5514
	return mem_cgroup_sockets_init(memcg, ss);
5515
}
5516

5517
static void memcg_destroy_kmem(struct mem_cgroup *memcg)
G
Glauber Costa 已提交
5518
{
5519
	mem_cgroup_sockets_destroy(memcg);
5520 5521 5522 5523 5524 5525 5526 5527 5528 5529 5530 5531 5532 5533 5534 5535 5536 5537 5538 5539 5540 5541 5542 5543 5544 5545
}

static void kmem_cgroup_css_offline(struct mem_cgroup *memcg)
{
	if (!memcg_kmem_is_active(memcg))
		return;

	/*
	 * kmem charges can outlive the cgroup. In the case of slab
	 * pages, for instance, a page contain objects from various
	 * processes. As we prevent from taking a reference for every
	 * such allocation we have to be careful when doing uncharge
	 * (see memcg_uncharge_kmem) and here during offlining.
	 *
	 * The idea is that that only the _last_ uncharge which sees
	 * the dead memcg will drop the last reference. An additional
	 * reference is taken here before the group is marked dead
	 * which is then paired with css_put during uncharge resp. here.
	 *
	 * Although this might sound strange as this path is called from
	 * css_offline() when the referencemight have dropped down to 0
	 * and shouldn't be incremented anymore (css_tryget would fail)
	 * we do not have other options because of the kmem allocations
	 * lifetime.
	 */
	css_get(&memcg->css);
5546 5547 5548 5549 5550 5551 5552

	memcg_kmem_mark_dead(memcg);

	if (res_counter_read_u64(&memcg->kmem, RES_USAGE) != 0)
		return;

	if (memcg_kmem_test_and_clear_dead(memcg))
5553
		css_put(&memcg->css);
G
Glauber Costa 已提交
5554
}
5555
#else
5556
static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
5557 5558 5559
{
	return 0;
}
G
Glauber Costa 已提交
5560

5561 5562 5563 5564 5565
static void memcg_destroy_kmem(struct mem_cgroup *memcg)
{
}

static void kmem_cgroup_css_offline(struct mem_cgroup *memcg)
G
Glauber Costa 已提交
5566 5567
{
}
5568 5569
#endif

B
Balbir Singh 已提交
5570 5571
static struct cftype mem_cgroup_files[] = {
	{
5572
		.name = "usage_in_bytes",
5573
		.private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
5574
		.read = mem_cgroup_read,
K
KAMEZAWA Hiroyuki 已提交
5575 5576
		.register_event = mem_cgroup_usage_register_event,
		.unregister_event = mem_cgroup_usage_unregister_event,
B
Balbir Singh 已提交
5577
	},
5578 5579
	{
		.name = "max_usage_in_bytes",
5580
		.private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
5581
		.trigger = mem_cgroup_reset,
5582
		.read = mem_cgroup_read,
5583
	},
B
Balbir Singh 已提交
5584
	{
5585
		.name = "limit_in_bytes",
5586
		.private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
5587
		.write_string = mem_cgroup_write,
5588
		.read = mem_cgroup_read,
B
Balbir Singh 已提交
5589
	},
5590 5591 5592 5593
	{
		.name = "soft_limit_in_bytes",
		.private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
		.write_string = mem_cgroup_write,
5594
		.read = mem_cgroup_read,
5595
	},
B
Balbir Singh 已提交
5596 5597
	{
		.name = "failcnt",
5598
		.private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
5599
		.trigger = mem_cgroup_reset,
5600
		.read = mem_cgroup_read,
B
Balbir Singh 已提交
5601
	},
5602 5603
	{
		.name = "stat",
5604
		.read_seq_string = memcg_stat_show,
5605
	},
5606 5607 5608 5609
	{
		.name = "force_empty",
		.trigger = mem_cgroup_force_empty_write,
	},
5610 5611
	{
		.name = "use_hierarchy",
5612
		.flags = CFTYPE_INSANE,
5613 5614 5615
		.write_u64 = mem_cgroup_hierarchy_write,
		.read_u64 = mem_cgroup_hierarchy_read,
	},
K
KOSAKI Motohiro 已提交
5616 5617 5618 5619 5620
	{
		.name = "swappiness",
		.read_u64 = mem_cgroup_swappiness_read,
		.write_u64 = mem_cgroup_swappiness_write,
	},
5621 5622 5623 5624 5625
	{
		.name = "move_charge_at_immigrate",
		.read_u64 = mem_cgroup_move_charge_read,
		.write_u64 = mem_cgroup_move_charge_write,
	},
K
KAMEZAWA Hiroyuki 已提交
5626 5627
	{
		.name = "oom_control",
5628 5629
		.read_map = mem_cgroup_oom_control_read,
		.write_u64 = mem_cgroup_oom_control_write,
K
KAMEZAWA Hiroyuki 已提交
5630 5631 5632 5633
		.register_event = mem_cgroup_oom_register_event,
		.unregister_event = mem_cgroup_oom_unregister_event,
		.private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
	},
5634 5635 5636 5637 5638
	{
		.name = "pressure_level",
		.register_event = vmpressure_register_event,
		.unregister_event = vmpressure_unregister_event,
	},
5639 5640 5641
#ifdef CONFIG_NUMA
	{
		.name = "numa_stat",
5642
		.read_seq_string = memcg_numa_stat_show,
5643 5644
	},
#endif
5645 5646 5647 5648 5649 5650 5651 5652 5653 5654 5655 5656 5657 5658 5659 5660 5661 5662 5663 5664 5665 5666 5667 5668
#ifdef CONFIG_MEMCG_KMEM
	{
		.name = "kmem.limit_in_bytes",
		.private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
		.write_string = mem_cgroup_write,
		.read = mem_cgroup_read,
	},
	{
		.name = "kmem.usage_in_bytes",
		.private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
		.read = mem_cgroup_read,
	},
	{
		.name = "kmem.failcnt",
		.private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
		.trigger = mem_cgroup_reset,
		.read = mem_cgroup_read,
	},
	{
		.name = "kmem.max_usage_in_bytes",
		.private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
		.trigger = mem_cgroup_reset,
		.read = mem_cgroup_read,
	},
5669 5670 5671 5672 5673 5674
#ifdef CONFIG_SLABINFO
	{
		.name = "kmem.slabinfo",
		.read_seq_string = mem_cgroup_slabinfo_read,
	},
#endif
5675
#endif
5676
	{ },	/* terminate */
5677
};
5678

5679 5680 5681 5682 5683 5684 5685 5686 5687 5688 5689 5690 5691 5692 5693 5694 5695 5696 5697 5698 5699 5700 5701 5702 5703 5704 5705 5706 5707 5708
#ifdef CONFIG_MEMCG_SWAP
static struct cftype memsw_cgroup_files[] = {
	{
		.name = "memsw.usage_in_bytes",
		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
		.read = mem_cgroup_read,
		.register_event = mem_cgroup_usage_register_event,
		.unregister_event = mem_cgroup_usage_unregister_event,
	},
	{
		.name = "memsw.max_usage_in_bytes",
		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
		.trigger = mem_cgroup_reset,
		.read = mem_cgroup_read,
	},
	{
		.name = "memsw.limit_in_bytes",
		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
		.write_string = mem_cgroup_write,
		.read = mem_cgroup_read,
	},
	{
		.name = "memsw.failcnt",
		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
		.trigger = mem_cgroup_reset,
		.read = mem_cgroup_read,
	},
	{ },	/* terminate */
};
#endif
5709
static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
5710 5711
{
	struct mem_cgroup_per_node *pn;
5712
	struct mem_cgroup_per_zone *mz;
5713
	int zone, tmp = node;
5714 5715 5716 5717 5718 5719 5720 5721
	/*
	 * This routine is called against possible nodes.
	 * But it's BUG to call kmalloc() against offline node.
	 *
	 * TODO: this routine can waste much memory for nodes which will
	 *       never be onlined. It's better to use memory hotplug callback
	 *       function.
	 */
5722 5723
	if (!node_state(node, N_NORMAL_MEMORY))
		tmp = -1;
5724
	pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
5725 5726
	if (!pn)
		return 1;
5727 5728 5729

	for (zone = 0; zone < MAX_NR_ZONES; zone++) {
		mz = &pn->zoneinfo[zone];
5730
		lruvec_init(&mz->lruvec);
5731
		mz->memcg = memcg;
5732
	}
5733
	memcg->nodeinfo[node] = pn;
5734 5735 5736
	return 0;
}

5737
static void free_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
5738
{
5739
	kfree(memcg->nodeinfo[node]);
5740 5741
}

5742 5743
static struct mem_cgroup *mem_cgroup_alloc(void)
{
5744
	struct mem_cgroup *memcg;
5745
	size_t size = memcg_size();
5746

5747
	/* Can be very big if nr_node_ids is very big */
5748
	if (size < PAGE_SIZE)
5749
		memcg = kzalloc(size, GFP_KERNEL);
5750
	else
5751
		memcg = vzalloc(size);
5752

5753
	if (!memcg)
5754 5755
		return NULL;

5756 5757
	memcg->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
	if (!memcg->stat)
5758
		goto out_free;
5759 5760
	spin_lock_init(&memcg->pcp_counter_lock);
	return memcg;
5761 5762 5763

out_free:
	if (size < PAGE_SIZE)
5764
		kfree(memcg);
5765
	else
5766
		vfree(memcg);
5767
	return NULL;
5768 5769
}

5770
/*
5771 5772 5773 5774 5775 5776 5777 5778
 * At destroying mem_cgroup, references from swap_cgroup can remain.
 * (scanning all at force_empty is too costly...)
 *
 * Instead of clearing all references at force_empty, we remember
 * the number of reference from swap_cgroup and free mem_cgroup when
 * it goes down to 0.
 *
 * Removal of cgroup itself succeeds regardless of refs from swap.
5779
 */
5780 5781

static void __mem_cgroup_free(struct mem_cgroup *memcg)
5782
{
5783
	int node;
5784
	size_t size = memcg_size();
5785

5786 5787 5788 5789 5790 5791 5792
	free_css_id(&mem_cgroup_subsys, &memcg->css);

	for_each_node(node)
		free_mem_cgroup_per_zone_info(memcg, node);

	free_percpu(memcg->stat);

5793 5794 5795 5796 5797 5798 5799 5800 5801 5802 5803
	/*
	 * We need to make sure that (at least for now), the jump label
	 * destruction code runs outside of the cgroup lock. This is because
	 * get_online_cpus(), which is called from the static_branch update,
	 * can't be called inside the cgroup_lock. cpusets are the ones
	 * enforcing this dependency, so if they ever change, we might as well.
	 *
	 * schedule_work() will guarantee this happens. Be careful if you need
	 * to move this code around, and make sure it is outside
	 * the cgroup_lock.
	 */
5804
	disarm_static_keys(memcg);
5805 5806 5807 5808
	if (size < PAGE_SIZE)
		kfree(memcg);
	else
		vfree(memcg);
5809
}
5810

5811 5812 5813
/*
 * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled.
 */
G
Glauber Costa 已提交
5814
struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
5815
{
5816
	if (!memcg->res.parent)
5817
		return NULL;
5818
	return mem_cgroup_from_res_counter(memcg->res.parent, res);
5819
}
G
Glauber Costa 已提交
5820
EXPORT_SYMBOL(parent_mem_cgroup);
5821

L
Li Zefan 已提交
5822
static struct cgroup_subsys_state * __ref
5823
mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
B
Balbir Singh 已提交
5824
{
5825
	struct mem_cgroup *memcg;
K
KAMEZAWA Hiroyuki 已提交
5826
	long error = -ENOMEM;
5827
	int node;
B
Balbir Singh 已提交
5828

5829 5830
	memcg = mem_cgroup_alloc();
	if (!memcg)
K
KAMEZAWA Hiroyuki 已提交
5831
		return ERR_PTR(error);
5832

B
Bob Liu 已提交
5833
	for_each_node(node)
5834
		if (alloc_mem_cgroup_per_zone_info(memcg, node))
5835
			goto free_out;
5836

5837
	/* root ? */
5838
	if (parent_css == NULL) {
5839
		root_mem_cgroup = memcg;
5840 5841 5842
		res_counter_init(&memcg->res, NULL);
		res_counter_init(&memcg->memsw, NULL);
		res_counter_init(&memcg->kmem, NULL);
5843
	}
5844

5845 5846 5847 5848 5849
	memcg->last_scanned_node = MAX_NUMNODES;
	INIT_LIST_HEAD(&memcg->oom_notify);
	memcg->move_charge_at_immigrate = 0;
	mutex_init(&memcg->thresholds_lock);
	spin_lock_init(&memcg->move_lock);
5850
	vmpressure_init(&memcg->vmpressure);
5851 5852 5853 5854 5855 5856 5857 5858 5859

	return &memcg->css;

free_out:
	__mem_cgroup_free(memcg);
	return ERR_PTR(error);
}

static int
5860
mem_cgroup_css_online(struct cgroup_subsys_state *css)
5861
{
5862 5863
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
	struct mem_cgroup *parent = mem_cgroup_from_css(css_parent(css));
5864 5865
	int error = 0;

T
Tejun Heo 已提交
5866
	if (!parent)
5867 5868
		return 0;

5869
	mutex_lock(&memcg_create_mutex);
5870 5871 5872 5873 5874 5875

	memcg->use_hierarchy = parent->use_hierarchy;
	memcg->oom_kill_disable = parent->oom_kill_disable;
	memcg->swappiness = mem_cgroup_swappiness(parent);

	if (parent->use_hierarchy) {
5876 5877
		res_counter_init(&memcg->res, &parent->res);
		res_counter_init(&memcg->memsw, &parent->memsw);
5878
		res_counter_init(&memcg->kmem, &parent->kmem);
5879

5880
		/*
5881 5882
		 * No need to take a reference to the parent because cgroup
		 * core guarantees its existence.
5883
		 */
5884
	} else {
5885 5886
		res_counter_init(&memcg->res, NULL);
		res_counter_init(&memcg->memsw, NULL);
5887
		res_counter_init(&memcg->kmem, NULL);
5888 5889 5890 5891 5892
		/*
		 * Deeper hierachy with use_hierarchy == false doesn't make
		 * much sense so let cgroup subsystem know about this
		 * unfortunate state in our controller.
		 */
5893
		if (parent != root_mem_cgroup)
5894
			mem_cgroup_subsys.broken_hierarchy = true;
5895
	}
5896 5897

	error = memcg_init_kmem(memcg, &mem_cgroup_subsys);
5898
	mutex_unlock(&memcg_create_mutex);
5899
	return error;
B
Balbir Singh 已提交
5900 5901
}

M
Michal Hocko 已提交
5902 5903 5904 5905 5906 5907 5908 5909
/*
 * Announce all parents that a group from their hierarchy is gone.
 */
static void mem_cgroup_invalidate_reclaim_iterators(struct mem_cgroup *memcg)
{
	struct mem_cgroup *parent = memcg;

	while ((parent = parent_mem_cgroup(parent)))
5910
		mem_cgroup_iter_invalidate(parent);
M
Michal Hocko 已提交
5911 5912 5913 5914 5915 5916

	/*
	 * if the root memcg is not hierarchical we have to check it
	 * explicitely.
	 */
	if (!root_mem_cgroup->use_hierarchy)
5917
		mem_cgroup_iter_invalidate(root_mem_cgroup);
M
Michal Hocko 已提交
5918 5919
}

5920
static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
5921
{
5922
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5923

5924 5925
	kmem_cgroup_css_offline(memcg);

M
Michal Hocko 已提交
5926
	mem_cgroup_invalidate_reclaim_iterators(memcg);
5927
	mem_cgroup_reparent_charges(memcg);
G
Glauber Costa 已提交
5928
	mem_cgroup_destroy_all_caches(memcg);
5929
	vmpressure_cleanup(&memcg->vmpressure);
5930 5931
}

5932
static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
B
Balbir Singh 已提交
5933
{
5934
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5935

5936
	memcg_destroy_kmem(memcg);
5937
	__mem_cgroup_free(memcg);
B
Balbir Singh 已提交
5938 5939
}

5940
#ifdef CONFIG_MMU
5941
/* Handlers for move charge at task migration. */
5942 5943
#define PRECHARGE_COUNT_AT_ONCE	256
static int mem_cgroup_do_precharge(unsigned long count)
5944
{
5945 5946
	int ret = 0;
	int batch_count = PRECHARGE_COUNT_AT_ONCE;
5947
	struct mem_cgroup *memcg = mc.to;
5948

5949
	if (mem_cgroup_is_root(memcg)) {
5950 5951 5952 5953 5954 5955 5956 5957
		mc.precharge += count;
		/* we don't need css_get for root */
		return ret;
	}
	/* try to charge at once */
	if (count > 1) {
		struct res_counter *dummy;
		/*
5958
		 * "memcg" cannot be under rmdir() because we've already checked
5959 5960 5961 5962
		 * by cgroup_lock_live_cgroup() that it is not removed and we
		 * are still under the same cgroup_mutex. So we can postpone
		 * css_get().
		 */
5963
		if (res_counter_charge(&memcg->res, PAGE_SIZE * count, &dummy))
5964
			goto one_by_one;
5965
		if (do_swap_account && res_counter_charge(&memcg->memsw,
5966
						PAGE_SIZE * count, &dummy)) {
5967
			res_counter_uncharge(&memcg->res, PAGE_SIZE * count);
5968 5969 5970 5971 5972 5973 5974 5975 5976 5977 5978 5979 5980 5981 5982 5983
			goto one_by_one;
		}
		mc.precharge += count;
		return ret;
	}
one_by_one:
	/* fall back to one by one charge */
	while (count--) {
		if (signal_pending(current)) {
			ret = -EINTR;
			break;
		}
		if (!batch_count--) {
			batch_count = PRECHARGE_COUNT_AT_ONCE;
			cond_resched();
		}
5984 5985
		ret = __mem_cgroup_try_charge(NULL,
					GFP_KERNEL, 1, &memcg, false);
5986
		if (ret)
5987
			/* mem_cgroup_clear_mc() will do uncharge later */
5988
			return ret;
5989 5990
		mc.precharge++;
	}
5991 5992 5993 5994
	return ret;
}

/**
5995
 * get_mctgt_type - get target type of moving charge
5996 5997 5998
 * @vma: the vma the pte to be checked belongs
 * @addr: the address corresponding to the pte to be checked
 * @ptent: the pte to be checked
5999
 * @target: the pointer the target page or swap ent will be stored(can be NULL)
6000 6001 6002 6003 6004 6005
 *
 * Returns
 *   0(MC_TARGET_NONE): if the pte is not a target for move charge.
 *   1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
 *     move charge. if @target is not NULL, the page is stored in target->page
 *     with extra refcnt got(Callers should handle it).
6006 6007 6008
 *   2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
 *     target for charge migration. if @target is not NULL, the entry is stored
 *     in target->ent.
6009 6010 6011 6012 6013
 *
 * Called with pte lock held.
 */
union mc_target {
	struct page	*page;
6014
	swp_entry_t	ent;
6015 6016 6017
};

enum mc_target_type {
6018
	MC_TARGET_NONE = 0,
6019
	MC_TARGET_PAGE,
6020
	MC_TARGET_SWAP,
6021 6022
};

D
Daisuke Nishimura 已提交
6023 6024
static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
						unsigned long addr, pte_t ptent)
6025
{
D
Daisuke Nishimura 已提交
6026
	struct page *page = vm_normal_page(vma, addr, ptent);
6027

D
Daisuke Nishimura 已提交
6028 6029 6030 6031
	if (!page || !page_mapped(page))
		return NULL;
	if (PageAnon(page)) {
		/* we don't move shared anon */
6032
		if (!move_anon())
D
Daisuke Nishimura 已提交
6033
			return NULL;
6034 6035
	} else if (!move_file())
		/* we ignore mapcount for file pages */
D
Daisuke Nishimura 已提交
6036 6037 6038 6039 6040 6041 6042
		return NULL;
	if (!get_page_unless_zero(page))
		return NULL;

	return page;
}

6043
#ifdef CONFIG_SWAP
D
Daisuke Nishimura 已提交
6044 6045 6046 6047 6048 6049 6050 6051
static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
			unsigned long addr, pte_t ptent, swp_entry_t *entry)
{
	struct page *page = NULL;
	swp_entry_t ent = pte_to_swp_entry(ptent);

	if (!move_anon() || non_swap_entry(ent))
		return NULL;
6052 6053 6054 6055
	/*
	 * Because lookup_swap_cache() updates some statistics counter,
	 * we call find_get_page() with swapper_space directly.
	 */
6056
	page = find_get_page(swap_address_space(ent), ent.val);
D
Daisuke Nishimura 已提交
6057 6058 6059 6060 6061
	if (do_swap_account)
		entry->val = ent.val;

	return page;
}
6062 6063 6064 6065 6066 6067 6068
#else
static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
			unsigned long addr, pte_t ptent, swp_entry_t *entry)
{
	return NULL;
}
#endif
D
Daisuke Nishimura 已提交
6069

6070 6071 6072 6073 6074 6075 6076 6077 6078 6079 6080 6081 6082 6083 6084 6085 6086 6087 6088
static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
			unsigned long addr, pte_t ptent, swp_entry_t *entry)
{
	struct page *page = NULL;
	struct address_space *mapping;
	pgoff_t pgoff;

	if (!vma->vm_file) /* anonymous vma */
		return NULL;
	if (!move_file())
		return NULL;

	mapping = vma->vm_file->f_mapping;
	if (pte_none(ptent))
		pgoff = linear_page_index(vma, addr);
	else /* pte_file(ptent) is true */
		pgoff = pte_to_pgoff(ptent);

	/* page is moved even if it's not RSS of this task(page-faulted). */
6089 6090 6091 6092 6093 6094
	page = find_get_page(mapping, pgoff);

#ifdef CONFIG_SWAP
	/* shmem/tmpfs may report page out on swap: account for that too. */
	if (radix_tree_exceptional_entry(page)) {
		swp_entry_t swap = radix_to_swp_entry(page);
6095
		if (do_swap_account)
6096
			*entry = swap;
6097
		page = find_get_page(swap_address_space(swap), swap.val);
6098
	}
6099
#endif
6100 6101 6102
	return page;
}

6103
static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
D
Daisuke Nishimura 已提交
6104 6105 6106 6107
		unsigned long addr, pte_t ptent, union mc_target *target)
{
	struct page *page = NULL;
	struct page_cgroup *pc;
6108
	enum mc_target_type ret = MC_TARGET_NONE;
D
Daisuke Nishimura 已提交
6109 6110 6111 6112 6113 6114
	swp_entry_t ent = { .val = 0 };

	if (pte_present(ptent))
		page = mc_handle_present_pte(vma, addr, ptent);
	else if (is_swap_pte(ptent))
		page = mc_handle_swap_pte(vma, addr, ptent, &ent);
6115 6116
	else if (pte_none(ptent) || pte_file(ptent))
		page = mc_handle_file_pte(vma, addr, ptent, &ent);
D
Daisuke Nishimura 已提交
6117 6118

	if (!page && !ent.val)
6119
		return ret;
6120 6121 6122 6123 6124 6125 6126 6127 6128 6129 6130 6131 6132 6133 6134
	if (page) {
		pc = lookup_page_cgroup(page);
		/*
		 * Do only loose check w/o page_cgroup lock.
		 * mem_cgroup_move_account() checks the pc is valid or not under
		 * the lock.
		 */
		if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) {
			ret = MC_TARGET_PAGE;
			if (target)
				target->page = page;
		}
		if (!ret || !target)
			put_page(page);
	}
D
Daisuke Nishimura 已提交
6135 6136
	/* There is a swap entry and a page doesn't exist or isn't charged */
	if (ent.val && !ret &&
6137
			css_id(&mc.from->css) == lookup_swap_cgroup_id(ent)) {
6138 6139 6140
		ret = MC_TARGET_SWAP;
		if (target)
			target->ent = ent;
6141 6142 6143 6144
	}
	return ret;
}

6145 6146 6147 6148 6149 6150 6151 6152 6153 6154 6155 6156 6157 6158 6159 6160 6161 6162 6163 6164 6165 6166 6167 6168 6169 6170 6171 6172 6173 6174 6175 6176 6177 6178 6179
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/*
 * We don't consider swapping or file mapped pages because THP does not
 * support them for now.
 * Caller should make sure that pmd_trans_huge(pmd) is true.
 */
static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
		unsigned long addr, pmd_t pmd, union mc_target *target)
{
	struct page *page = NULL;
	struct page_cgroup *pc;
	enum mc_target_type ret = MC_TARGET_NONE;

	page = pmd_page(pmd);
	VM_BUG_ON(!page || !PageHead(page));
	if (!move_anon())
		return ret;
	pc = lookup_page_cgroup(page);
	if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) {
		ret = MC_TARGET_PAGE;
		if (target) {
			get_page(page);
			target->page = page;
		}
	}
	return ret;
}
#else
static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
		unsigned long addr, pmd_t pmd, union mc_target *target)
{
	return MC_TARGET_NONE;
}
#endif

6180 6181 6182 6183 6184 6185 6186 6187
static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
					unsigned long addr, unsigned long end,
					struct mm_walk *walk)
{
	struct vm_area_struct *vma = walk->private;
	pte_t *pte;
	spinlock_t *ptl;

6188 6189 6190 6191
	if (pmd_trans_huge_lock(pmd, vma) == 1) {
		if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
			mc.precharge += HPAGE_PMD_NR;
		spin_unlock(&vma->vm_mm->page_table_lock);
6192
		return 0;
6193
	}
6194

6195 6196
	if (pmd_trans_unstable(pmd))
		return 0;
6197 6198
	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
	for (; addr != end; pte++, addr += PAGE_SIZE)
6199
		if (get_mctgt_type(vma, addr, *pte, NULL))
6200 6201 6202 6203
			mc.precharge++;	/* increment precharge temporarily */
	pte_unmap_unlock(pte - 1, ptl);
	cond_resched();

6204 6205 6206
	return 0;
}

6207 6208 6209 6210 6211
static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
{
	unsigned long precharge;
	struct vm_area_struct *vma;

6212
	down_read(&mm->mmap_sem);
6213 6214 6215 6216 6217 6218 6219 6220 6221 6222 6223
	for (vma = mm->mmap; vma; vma = vma->vm_next) {
		struct mm_walk mem_cgroup_count_precharge_walk = {
			.pmd_entry = mem_cgroup_count_precharge_pte_range,
			.mm = mm,
			.private = vma,
		};
		if (is_vm_hugetlb_page(vma))
			continue;
		walk_page_range(vma->vm_start, vma->vm_end,
					&mem_cgroup_count_precharge_walk);
	}
6224
	up_read(&mm->mmap_sem);
6225 6226 6227 6228 6229 6230 6231 6232 6233

	precharge = mc.precharge;
	mc.precharge = 0;

	return precharge;
}

static int mem_cgroup_precharge_mc(struct mm_struct *mm)
{
6234 6235 6236 6237 6238
	unsigned long precharge = mem_cgroup_count_precharge(mm);

	VM_BUG_ON(mc.moving_task);
	mc.moving_task = current;
	return mem_cgroup_do_precharge(precharge);
6239 6240
}

6241 6242
/* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
static void __mem_cgroup_clear_mc(void)
6243
{
6244 6245
	struct mem_cgroup *from = mc.from;
	struct mem_cgroup *to = mc.to;
L
Li Zefan 已提交
6246
	int i;
6247

6248
	/* we must uncharge all the leftover precharges from mc.to */
6249 6250 6251 6252 6253 6254 6255 6256 6257 6258 6259
	if (mc.precharge) {
		__mem_cgroup_cancel_charge(mc.to, mc.precharge);
		mc.precharge = 0;
	}
	/*
	 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
	 * we must uncharge here.
	 */
	if (mc.moved_charge) {
		__mem_cgroup_cancel_charge(mc.from, mc.moved_charge);
		mc.moved_charge = 0;
6260
	}
6261 6262 6263 6264 6265 6266
	/* we must fixup refcnts and charges */
	if (mc.moved_swap) {
		/* uncharge swap account from the old cgroup */
		if (!mem_cgroup_is_root(mc.from))
			res_counter_uncharge(&mc.from->memsw,
						PAGE_SIZE * mc.moved_swap);
L
Li Zefan 已提交
6267 6268 6269

		for (i = 0; i < mc.moved_swap; i++)
			css_put(&mc.from->css);
6270 6271 6272 6273 6274 6275 6276 6277 6278

		if (!mem_cgroup_is_root(mc.to)) {
			/*
			 * we charged both to->res and to->memsw, so we should
			 * uncharge to->res.
			 */
			res_counter_uncharge(&mc.to->res,
						PAGE_SIZE * mc.moved_swap);
		}
L
Li Zefan 已提交
6279
		/* we've already done css_get(mc.to) */
6280 6281
		mc.moved_swap = 0;
	}
6282 6283 6284 6285 6286 6287 6288 6289 6290 6291 6292 6293 6294 6295 6296
	memcg_oom_recover(from);
	memcg_oom_recover(to);
	wake_up_all(&mc.waitq);
}

static void mem_cgroup_clear_mc(void)
{
	struct mem_cgroup *from = mc.from;

	/*
	 * we must clear moving_task before waking up waiters at the end of
	 * task migration.
	 */
	mc.moving_task = NULL;
	__mem_cgroup_clear_mc();
6297
	spin_lock(&mc.lock);
6298 6299
	mc.from = NULL;
	mc.to = NULL;
6300
	spin_unlock(&mc.lock);
6301
	mem_cgroup_end_move(from);
6302 6303
}

6304
static int mem_cgroup_can_attach(struct cgroup_subsys_state *css,
6305
				 struct cgroup_taskset *tset)
6306
{
6307
	struct task_struct *p = cgroup_taskset_first(tset);
6308
	int ret = 0;
6309
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6310
	unsigned long move_charge_at_immigrate;
6311

6312 6313 6314 6315 6316 6317 6318
	/*
	 * We are now commited to this value whatever it is. Changes in this
	 * tunable will only affect upcoming migrations, not the current one.
	 * So we need to save it, and keep it going.
	 */
	move_charge_at_immigrate  = memcg->move_charge_at_immigrate;
	if (move_charge_at_immigrate) {
6319 6320 6321
		struct mm_struct *mm;
		struct mem_cgroup *from = mem_cgroup_from_task(p);

6322
		VM_BUG_ON(from == memcg);
6323 6324 6325 6326 6327

		mm = get_task_mm(p);
		if (!mm)
			return 0;
		/* We move charges only when we move a owner of the mm */
6328 6329 6330 6331
		if (mm->owner == p) {
			VM_BUG_ON(mc.from);
			VM_BUG_ON(mc.to);
			VM_BUG_ON(mc.precharge);
6332
			VM_BUG_ON(mc.moved_charge);
6333
			VM_BUG_ON(mc.moved_swap);
6334
			mem_cgroup_start_move(from);
6335
			spin_lock(&mc.lock);
6336
			mc.from = from;
6337
			mc.to = memcg;
6338
			mc.immigrate_flags = move_charge_at_immigrate;
6339
			spin_unlock(&mc.lock);
6340
			/* We set mc.moving_task later */
6341 6342 6343 6344

			ret = mem_cgroup_precharge_mc(mm);
			if (ret)
				mem_cgroup_clear_mc();
6345 6346
		}
		mmput(mm);
6347 6348 6349 6350
	}
	return ret;
}

6351
static void mem_cgroup_cancel_attach(struct cgroup_subsys_state *css,
6352
				     struct cgroup_taskset *tset)
6353
{
6354
	mem_cgroup_clear_mc();
6355 6356
}

6357 6358 6359
static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
				unsigned long addr, unsigned long end,
				struct mm_walk *walk)
6360
{
6361 6362 6363 6364
	int ret = 0;
	struct vm_area_struct *vma = walk->private;
	pte_t *pte;
	spinlock_t *ptl;
6365 6366 6367 6368
	enum mc_target_type target_type;
	union mc_target target;
	struct page *page;
	struct page_cgroup *pc;
6369

6370 6371 6372 6373 6374 6375 6376 6377 6378 6379 6380
	/*
	 * We don't take compound_lock() here but no race with splitting thp
	 * happens because:
	 *  - if pmd_trans_huge_lock() returns 1, the relevant thp is not
	 *    under splitting, which means there's no concurrent thp split,
	 *  - if another thread runs into split_huge_page() just after we
	 *    entered this if-block, the thread must wait for page table lock
	 *    to be unlocked in __split_huge_page_splitting(), where the main
	 *    part of thp split is not executed yet.
	 */
	if (pmd_trans_huge_lock(pmd, vma) == 1) {
6381
		if (mc.precharge < HPAGE_PMD_NR) {
6382 6383 6384 6385 6386 6387 6388 6389 6390
			spin_unlock(&vma->vm_mm->page_table_lock);
			return 0;
		}
		target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
		if (target_type == MC_TARGET_PAGE) {
			page = target.page;
			if (!isolate_lru_page(page)) {
				pc = lookup_page_cgroup(page);
				if (!mem_cgroup_move_account(page, HPAGE_PMD_NR,
6391
							pc, mc.from, mc.to)) {
6392 6393 6394 6395 6396 6397 6398 6399
					mc.precharge -= HPAGE_PMD_NR;
					mc.moved_charge += HPAGE_PMD_NR;
				}
				putback_lru_page(page);
			}
			put_page(page);
		}
		spin_unlock(&vma->vm_mm->page_table_lock);
6400
		return 0;
6401 6402
	}

6403 6404
	if (pmd_trans_unstable(pmd))
		return 0;
6405 6406 6407 6408
retry:
	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
	for (; addr != end; addr += PAGE_SIZE) {
		pte_t ptent = *(pte++);
6409
		swp_entry_t ent;
6410 6411 6412 6413

		if (!mc.precharge)
			break;

6414
		switch (get_mctgt_type(vma, addr, ptent, &target)) {
6415 6416 6417 6418 6419
		case MC_TARGET_PAGE:
			page = target.page;
			if (isolate_lru_page(page))
				goto put;
			pc = lookup_page_cgroup(page);
6420
			if (!mem_cgroup_move_account(page, 1, pc,
6421
						     mc.from, mc.to)) {
6422
				mc.precharge--;
6423 6424
				/* we uncharge from mc.from later. */
				mc.moved_charge++;
6425 6426
			}
			putback_lru_page(page);
6427
put:			/* get_mctgt_type() gets the page */
6428 6429
			put_page(page);
			break;
6430 6431
		case MC_TARGET_SWAP:
			ent = target.ent;
6432
			if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
6433
				mc.precharge--;
6434 6435 6436
				/* we fixup refcnts and charges later. */
				mc.moved_swap++;
			}
6437
			break;
6438 6439 6440 6441 6442 6443 6444 6445 6446 6447 6448 6449 6450 6451
		default:
			break;
		}
	}
	pte_unmap_unlock(pte - 1, ptl);
	cond_resched();

	if (addr != end) {
		/*
		 * We have consumed all precharges we got in can_attach().
		 * We try charge one by one, but don't do any additional
		 * charges to mc.to if we have failed in charge once in attach()
		 * phase.
		 */
6452
		ret = mem_cgroup_do_precharge(1);
6453 6454 6455 6456 6457 6458 6459 6460 6461 6462 6463 6464
		if (!ret)
			goto retry;
	}

	return ret;
}

static void mem_cgroup_move_charge(struct mm_struct *mm)
{
	struct vm_area_struct *vma;

	lru_add_drain_all();
6465 6466 6467 6468 6469 6470 6471 6472 6473 6474 6475 6476 6477
retry:
	if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
		/*
		 * Someone who are holding the mmap_sem might be waiting in
		 * waitq. So we cancel all extra charges, wake up all waiters,
		 * and retry. Because we cancel precharges, we might not be able
		 * to move enough charges, but moving charge is a best-effort
		 * feature anyway, so it wouldn't be a big problem.
		 */
		__mem_cgroup_clear_mc();
		cond_resched();
		goto retry;
	}
6478 6479 6480 6481 6482 6483 6484 6485 6486 6487 6488 6489 6490 6491 6492 6493 6494 6495
	for (vma = mm->mmap; vma; vma = vma->vm_next) {
		int ret;
		struct mm_walk mem_cgroup_move_charge_walk = {
			.pmd_entry = mem_cgroup_move_charge_pte_range,
			.mm = mm,
			.private = vma,
		};
		if (is_vm_hugetlb_page(vma))
			continue;
		ret = walk_page_range(vma->vm_start, vma->vm_end,
						&mem_cgroup_move_charge_walk);
		if (ret)
			/*
			 * means we have consumed all precharges and failed in
			 * doing additional charge. Just abandon here.
			 */
			break;
	}
6496
	up_read(&mm->mmap_sem);
6497 6498
}

6499
static void mem_cgroup_move_task(struct cgroup_subsys_state *css,
6500
				 struct cgroup_taskset *tset)
B
Balbir Singh 已提交
6501
{
6502
	struct task_struct *p = cgroup_taskset_first(tset);
6503
	struct mm_struct *mm = get_task_mm(p);
6504 6505

	if (mm) {
6506 6507
		if (mc.to)
			mem_cgroup_move_charge(mm);
6508 6509
		mmput(mm);
	}
6510 6511
	if (mc.to)
		mem_cgroup_clear_mc();
B
Balbir Singh 已提交
6512
}
6513
#else	/* !CONFIG_MMU */
6514
static int mem_cgroup_can_attach(struct cgroup_subsys_state *css,
6515
				 struct cgroup_taskset *tset)
6516 6517 6518
{
	return 0;
}
6519
static void mem_cgroup_cancel_attach(struct cgroup_subsys_state *css,
6520
				     struct cgroup_taskset *tset)
6521 6522
{
}
6523
static void mem_cgroup_move_task(struct cgroup_subsys_state *css,
6524
				 struct cgroup_taskset *tset)
6525 6526 6527
{
}
#endif
B
Balbir Singh 已提交
6528

6529 6530 6531 6532
/*
 * Cgroup retains root cgroups across [un]mount cycles making it necessary
 * to verify sane_behavior flag on each mount attempt.
 */
6533
static void mem_cgroup_bind(struct cgroup_subsys_state *root_css)
6534 6535 6536 6537 6538 6539
{
	/*
	 * use_hierarchy is forced with sane_behavior.  cgroup core
	 * guarantees that @root doesn't have any children, so turning it
	 * on for the root memcg is enough.
	 */
6540 6541
	if (cgroup_sane_behavior(root_css->cgroup))
		mem_cgroup_from_css(root_css)->use_hierarchy = true;
6542 6543
}

B
Balbir Singh 已提交
6544 6545 6546
struct cgroup_subsys mem_cgroup_subsys = {
	.name = "memory",
	.subsys_id = mem_cgroup_subsys_id,
6547
	.css_alloc = mem_cgroup_css_alloc,
6548
	.css_online = mem_cgroup_css_online,
6549 6550
	.css_offline = mem_cgroup_css_offline,
	.css_free = mem_cgroup_css_free,
6551 6552
	.can_attach = mem_cgroup_can_attach,
	.cancel_attach = mem_cgroup_cancel_attach,
B
Balbir Singh 已提交
6553
	.attach = mem_cgroup_move_task,
6554
	.bind = mem_cgroup_bind,
6555
	.base_cftypes = mem_cgroup_files,
6556
	.early_init = 0,
K
KAMEZAWA Hiroyuki 已提交
6557
	.use_id = 1,
B
Balbir Singh 已提交
6558
};
6559

A
Andrew Morton 已提交
6560
#ifdef CONFIG_MEMCG_SWAP
6561 6562
static int __init enable_swap_account(char *s)
{
6563
	if (!strcmp(s, "1"))
6564
		really_do_swap_account = 1;
6565
	else if (!strcmp(s, "0"))
6566 6567 6568
		really_do_swap_account = 0;
	return 1;
}
6569
__setup("swapaccount=", enable_swap_account);
6570

6571 6572
static void __init memsw_file_init(void)
{
6573 6574 6575 6576 6577 6578 6579 6580 6581
	WARN_ON(cgroup_add_cftypes(&mem_cgroup_subsys, memsw_cgroup_files));
}

static void __init enable_swap_cgroup(void)
{
	if (!mem_cgroup_disabled() && really_do_swap_account) {
		do_swap_account = 1;
		memsw_file_init();
	}
6582
}
6583

6584
#else
6585
static void __init enable_swap_cgroup(void)
6586 6587
{
}
6588
#endif
6589 6590

/*
6591 6592 6593 6594 6595 6596
 * subsys_initcall() for memory controller.
 *
 * Some parts like hotcpu_notifier() have to be initialized from this context
 * because of lock dependencies (cgroup_lock -> cpu hotplug) but basically
 * everything that doesn't depend on a specific mem_cgroup structure should
 * be initialized from here.
6597 6598 6599 6600
 */
static int __init mem_cgroup_init(void)
{
	hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
6601
	enable_swap_cgroup();
6602
	memcg_stock_init();
6603 6604 6605
	return 0;
}
subsys_initcall(mem_cgroup_init);