memcontrol.c 175.5 KB
Newer Older
B
Balbir Singh 已提交
1 2 3 4 5
/* memcontrol.c - Memory Controller
 *
 * Copyright IBM Corporation, 2007
 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
 *
6 7 8
 * Copyright 2007 OpenVZ SWsoft Inc
 * Author: Pavel Emelianov <xemul@openvz.org>
 *
9 10 11 12
 * Memory thresholds
 * Copyright (C) 2009 Nokia Corporation
 * Author: Kirill A. Shutemov
 *
13 14 15 16
 * Kernel Memory Controller
 * Copyright (C) 2012 Parallels Inc. and Google Inc.
 * Authors: Glauber Costa and Suleiman Souhlal
 *
B
Balbir Singh 已提交
17 18 19 20 21 22 23 24 25 26 27 28 29 30
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 */

#include <linux/res_counter.h>
#include <linux/memcontrol.h>
#include <linux/cgroup.h>
31
#include <linux/mm.h>
32
#include <linux/hugetlb.h>
K
KAMEZAWA Hiroyuki 已提交
33
#include <linux/pagemap.h>
34
#include <linux/smp.h>
35
#include <linux/page-flags.h>
36
#include <linux/backing-dev.h>
37 38
#include <linux/bit_spinlock.h>
#include <linux/rcupdate.h>
39
#include <linux/limits.h>
40
#include <linux/export.h>
41
#include <linux/mutex.h>
42
#include <linux/slab.h>
43
#include <linux/swap.h>
44
#include <linux/swapops.h>
45
#include <linux/spinlock.h>
46 47
#include <linux/eventfd.h>
#include <linux/sort.h>
48
#include <linux/fs.h>
49
#include <linux/seq_file.h>
50
#include <linux/vmalloc.h>
51
#include <linux/vmpressure.h>
52
#include <linux/mm_inline.h>
53
#include <linux/page_cgroup.h>
54
#include <linux/cpu.h>
55
#include <linux/oom.h>
K
KAMEZAWA Hiroyuki 已提交
56
#include "internal.h"
G
Glauber Costa 已提交
57
#include <net/sock.h>
M
Michal Hocko 已提交
58
#include <net/ip.h>
G
Glauber Costa 已提交
59
#include <net/tcp_memcontrol.h>
B
Balbir Singh 已提交
60

61 62
#include <asm/uaccess.h>

63 64
#include <trace/events/vmscan.h>

65
struct cgroup_subsys mem_cgroup_subsys __read_mostly;
66 67
EXPORT_SYMBOL(mem_cgroup_subsys);

68
#define MEM_CGROUP_RECLAIM_RETRIES	5
69
static struct mem_cgroup *root_mem_cgroup __read_mostly;
B
Balbir Singh 已提交
70

A
Andrew Morton 已提交
71
#ifdef CONFIG_MEMCG_SWAP
L
Li Zefan 已提交
72
/* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */
73
int do_swap_account __read_mostly;
74 75

/* for remember boot option*/
A
Andrew Morton 已提交
76
#ifdef CONFIG_MEMCG_SWAP_ENABLED
77 78 79 80 81
static int really_do_swap_account __initdata = 1;
#else
static int really_do_swap_account __initdata = 0;
#endif

82
#else
83
#define do_swap_account		0
84 85 86
#endif


87 88 89 90 91 92 93
/*
 * Statistics for memory cgroup.
 */
enum mem_cgroup_stat_index {
	/*
	 * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
	 */
94 95 96 97 98
	MEM_CGROUP_STAT_CACHE,		/* # of pages charged as cache */
	MEM_CGROUP_STAT_RSS,		/* # of pages charged as anon rss */
	MEM_CGROUP_STAT_RSS_HUGE,	/* # of pages charged as anon huge */
	MEM_CGROUP_STAT_FILE_MAPPED,	/* # of pages charged as file rss */
	MEM_CGROUP_STAT_SWAP,		/* # of pages, swapped out */
99 100 101
	MEM_CGROUP_STAT_NSTATS,
};

102 103 104
static const char * const mem_cgroup_stat_names[] = {
	"cache",
	"rss",
105
	"rss_huge",
106 107 108 109
	"mapped_file",
	"swap",
};

110 111 112
enum mem_cgroup_events_index {
	MEM_CGROUP_EVENTS_PGPGIN,	/* # of pages paged in */
	MEM_CGROUP_EVENTS_PGPGOUT,	/* # of pages paged out */
113 114
	MEM_CGROUP_EVENTS_PGFAULT,	/* # of page-faults */
	MEM_CGROUP_EVENTS_PGMAJFAULT,	/* # of major page-faults */
115 116
	MEM_CGROUP_EVENTS_NSTATS,
};
117 118 119 120 121 122 123 124

static const char * const mem_cgroup_events_names[] = {
	"pgpgin",
	"pgpgout",
	"pgfault",
	"pgmajfault",
};

125 126 127 128 129 130 131 132
static const char * const mem_cgroup_lru_names[] = {
	"inactive_anon",
	"active_anon",
	"inactive_file",
	"active_file",
	"unevictable",
};

133 134 135 136 137 138 139 140
/*
 * Per memcg event counter is incremented at every pagein/pageout. With THP,
 * it will be incremated by the number of pages. This counter is used for
 * for trigger some periodic events. This is straightforward and better
 * than using jiffies etc. to handle periodic memcg event.
 */
enum mem_cgroup_events_target {
	MEM_CGROUP_TARGET_THRESH,
141
	MEM_CGROUP_TARGET_NUMAINFO,
142 143
	MEM_CGROUP_NTARGETS,
};
144 145 146
#define THRESHOLDS_EVENTS_TARGET 128
#define SOFTLIMIT_EVENTS_TARGET 1024
#define NUMAINFO_EVENTS_TARGET	1024
147

148
struct mem_cgroup_stat_cpu {
149
	long count[MEM_CGROUP_STAT_NSTATS];
150
	unsigned long events[MEM_CGROUP_EVENTS_NSTATS];
151
	unsigned long nr_page_events;
152
	unsigned long targets[MEM_CGROUP_NTARGETS];
153 154
};

155
struct mem_cgroup_reclaim_iter {
M
Michal Hocko 已提交
156 157 158 159
	/*
	 * last scanned hierarchy member. Valid only if last_dead_count
	 * matches memcg->dead_count of the hierarchy root group.
	 */
160
	struct mem_cgroup *last_visited;
M
Michal Hocko 已提交
161 162
	unsigned long last_dead_count;

163 164 165 166
	/* scan generation, increased every round-trip */
	unsigned int generation;
};

167 168 169 170
/*
 * per-zone information in memory controller.
 */
struct mem_cgroup_per_zone {
171
	struct lruvec		lruvec;
172
	unsigned long		lru_size[NR_LRU_LISTS];
K
KOSAKI Motohiro 已提交
173

174 175
	struct mem_cgroup_reclaim_iter reclaim_iter[DEF_PRIORITY + 1];

176
	struct mem_cgroup	*memcg;		/* Back pointer, we cannot */
177
						/* use container_of	   */
178 179 180 181 182 183
};

struct mem_cgroup_per_node {
	struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
};

184 185 186 187 188
struct mem_cgroup_threshold {
	struct eventfd_ctx *eventfd;
	u64 threshold;
};

K
KAMEZAWA Hiroyuki 已提交
189
/* For threshold */
190
struct mem_cgroup_threshold_ary {
191
	/* An array index points to threshold just below or equal to usage. */
192
	int current_threshold;
193 194 195 196 197
	/* Size of entries[] */
	unsigned int size;
	/* Array of thresholds */
	struct mem_cgroup_threshold entries[0];
};
198 199 200 201 202 203 204 205 206 207 208 209

struct mem_cgroup_thresholds {
	/* Primary thresholds array */
	struct mem_cgroup_threshold_ary *primary;
	/*
	 * Spare threshold array.
	 * This is needed to make mem_cgroup_unregister_event() "never fail".
	 * It must be able to store at least primary->size - 1 entries.
	 */
	struct mem_cgroup_threshold_ary *spare;
};

K
KAMEZAWA Hiroyuki 已提交
210 211 212 213 214
/* for OOM */
struct mem_cgroup_eventfd_list {
	struct list_head list;
	struct eventfd_ctx *eventfd;
};
215

216 217
static void mem_cgroup_threshold(struct mem_cgroup *memcg);
static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
218

B
Balbir Singh 已提交
219 220 221 222 223 224 225
/*
 * The memory controller data structure. The memory controller controls both
 * page cache and RSS per cgroup. We would eventually like to provide
 * statistics based on the statistics developed by Rik Van Riel for clock-pro,
 * to help the administrator determine what knobs to tune.
 *
 * TODO: Add a water mark for the memory controller. Reclaim will begin when
226 227 228
 * we hit the water mark. May be even add a low water mark, such that
 * no reclaim occurs from a cgroup at it's low water mark, this is
 * a feature that will be implemented much later in the future.
B
Balbir Singh 已提交
229 230 231 232 233 234 235
 */
struct mem_cgroup {
	struct cgroup_subsys_state css;
	/*
	 * the counter to account for memory usage
	 */
	struct res_counter res;
236

237 238 239
	/* vmpressure notifications */
	struct vmpressure vmpressure;

240 241 242 243
	/*
	 * the counter to account for mem+swap usage.
	 */
	struct res_counter memsw;
244

245 246 247 248
	/*
	 * the counter to account for kernel memory usage.
	 */
	struct res_counter kmem;
249 250 251 252
	/*
	 * Should the accounting and control be hierarchical, per subtree?
	 */
	bool use_hierarchy;
253
	unsigned long kmem_account_flags; /* See KMEM_ACCOUNTED_*, below */
254 255 256 257

	bool		oom_lock;
	atomic_t	under_oom;

258
	int	swappiness;
259 260
	/* OOM-Killer disable */
	int		oom_kill_disable;
K
KOSAKI Motohiro 已提交
261

262 263 264
	/* set when res.limit == memsw.limit */
	bool		memsw_is_minimum;

265 266 267 268
	/* protect arrays of thresholds */
	struct mutex thresholds_lock;

	/* thresholds for memory usage. RCU-protected */
269
	struct mem_cgroup_thresholds thresholds;
270

271
	/* thresholds for mem+swap usage. RCU-protected */
272
	struct mem_cgroup_thresholds memsw_thresholds;
273

K
KAMEZAWA Hiroyuki 已提交
274 275
	/* For oom notifier event fd */
	struct list_head oom_notify;
276

277 278 279 280 281
	/*
	 * Should we move charges of a task when a task is moved into this
	 * mem_cgroup ? And what type of charges should we move ?
	 */
	unsigned long 	move_charge_at_immigrate;
282 283 284 285
	/*
	 * set > 0 if pages under this cgroup are moving to other cgroup.
	 */
	atomic_t	moving_account;
286 287
	/* taken only while moving_account > 0 */
	spinlock_t	move_lock;
288
	/*
289
	 * percpu counter.
290
	 */
291
	struct mem_cgroup_stat_cpu __percpu *stat;
292 293 294 295 296 297
	/*
	 * used when a cpu is offlined or other synchronizations
	 * See mem_cgroup_read_stat().
	 */
	struct mem_cgroup_stat_cpu nocpu_base;
	spinlock_t pcp_counter_lock;
G
Glauber Costa 已提交
298

M
Michal Hocko 已提交
299
	atomic_t	dead_count;
M
Michal Hocko 已提交
300
#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET)
G
Glauber Costa 已提交
301 302
	struct tcp_memcontrol tcp_mem;
#endif
303 304 305 306 307 308 309 310
#if defined(CONFIG_MEMCG_KMEM)
	/* analogous to slab_common's slab_caches list. per-memcg */
	struct list_head memcg_slab_caches;
	/* Not a spinlock, we can take a lot of time walking the list */
	struct mutex slab_caches_mutex;
        /* Index in the kmem_cache->memcg_params->memcg_caches array */
	int kmemcg_id;
#endif
311 312 313 314 315 316 317

	int last_scanned_node;
#if MAX_NUMNODES > 1
	nodemask_t	scan_nodes;
	atomic_t	numainfo_events;
	atomic_t	numainfo_updating;
#endif
318

319 320
	struct mem_cgroup_per_node *nodeinfo[0];
	/* WARNING: nodeinfo must be the last member here */
B
Balbir Singh 已提交
321 322
};

323 324 325 326 327 328
static size_t memcg_size(void)
{
	return sizeof(struct mem_cgroup) +
		nr_node_ids * sizeof(struct mem_cgroup_per_node);
}

329 330 331
/* internal only representation about the status of kmem accounting. */
enum {
	KMEM_ACCOUNTED_ACTIVE = 0, /* accounted by this cgroup itself */
332
	KMEM_ACCOUNTED_ACTIVATED, /* static key enabled. */
333
	KMEM_ACCOUNTED_DEAD, /* dead memcg with pending kmem charges */
334 335
};

336 337 338
/* We account when limit is on, but only after call sites are patched */
#define KMEM_ACCOUNTED_MASK \
		((1 << KMEM_ACCOUNTED_ACTIVE) | (1 << KMEM_ACCOUNTED_ACTIVATED))
339 340 341 342 343 344

#ifdef CONFIG_MEMCG_KMEM
static inline void memcg_kmem_set_active(struct mem_cgroup *memcg)
{
	set_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags);
}
345 346 347 348 349 350

static bool memcg_kmem_is_active(struct mem_cgroup *memcg)
{
	return test_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags);
}

351 352 353 354 355
static void memcg_kmem_set_activated(struct mem_cgroup *memcg)
{
	set_bit(KMEM_ACCOUNTED_ACTIVATED, &memcg->kmem_account_flags);
}

356 357 358 359 360
static void memcg_kmem_clear_activated(struct mem_cgroup *memcg)
{
	clear_bit(KMEM_ACCOUNTED_ACTIVATED, &memcg->kmem_account_flags);
}

361 362
static void memcg_kmem_mark_dead(struct mem_cgroup *memcg)
{
363 364 365 366 367
	/*
	 * Our caller must use css_get() first, because memcg_uncharge_kmem()
	 * will call css_put() if it sees the memcg is dead.
	 */
	smp_wmb();
368 369 370 371 372 373 374 375 376
	if (test_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags))
		set_bit(KMEM_ACCOUNTED_DEAD, &memcg->kmem_account_flags);
}

static bool memcg_kmem_test_and_clear_dead(struct mem_cgroup *memcg)
{
	return test_and_clear_bit(KMEM_ACCOUNTED_DEAD,
				  &memcg->kmem_account_flags);
}
377 378
#endif

379 380
/* Stuffs for move charges at task migration. */
/*
381 382
 * Types of charges to be moved. "move_charge_at_immitgrate" and
 * "immigrate_flags" are treated as a left-shifted bitmap of these types.
383 384
 */
enum move_type {
385
	MOVE_CHARGE_TYPE_ANON,	/* private anonymous page and swap of it */
386
	MOVE_CHARGE_TYPE_FILE,	/* file page(including tmpfs) and swap of it */
387 388 389
	NR_MOVE_TYPE,
};

390 391
/* "mc" and its members are protected by cgroup_mutex */
static struct move_charge_struct {
392
	spinlock_t	  lock; /* for from, to */
393 394
	struct mem_cgroup *from;
	struct mem_cgroup *to;
395
	unsigned long immigrate_flags;
396
	unsigned long precharge;
397
	unsigned long moved_charge;
398
	unsigned long moved_swap;
399 400 401
	struct task_struct *moving_task;	/* a task moving charges */
	wait_queue_head_t waitq;		/* a waitq for other context */
} mc = {
402
	.lock = __SPIN_LOCK_UNLOCKED(mc.lock),
403 404
	.waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
};
405

D
Daisuke Nishimura 已提交
406 407
static bool move_anon(void)
{
408
	return test_bit(MOVE_CHARGE_TYPE_ANON, &mc.immigrate_flags);
D
Daisuke Nishimura 已提交
409 410
}

411 412
static bool move_file(void)
{
413
	return test_bit(MOVE_CHARGE_TYPE_FILE, &mc.immigrate_flags);
414 415
}

416 417 418 419
/*
 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
 * limit reclaim to prevent infinite loops, if they ever occur.
 */
420
#define	MEM_CGROUP_MAX_RECLAIM_LOOPS		100
421

422 423
enum charge_type {
	MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
424
	MEM_CGROUP_CHARGE_TYPE_ANON,
K
KAMEZAWA Hiroyuki 已提交
425
	MEM_CGROUP_CHARGE_TYPE_SWAPOUT,	/* for accounting swapcache */
K
KAMEZAWA Hiroyuki 已提交
426
	MEM_CGROUP_CHARGE_TYPE_DROP,	/* a page was unused swap cache */
427 428 429
	NR_CHARGE_TYPE,
};

430
/* for encoding cft->private value on file */
G
Glauber Costa 已提交
431 432 433 434
enum res_type {
	_MEM,
	_MEMSWAP,
	_OOM_TYPE,
435
	_KMEM,
G
Glauber Costa 已提交
436 437
};

438 439
#define MEMFILE_PRIVATE(x, val)	((x) << 16 | (val))
#define MEMFILE_TYPE(val)	((val) >> 16 & 0xffff)
440
#define MEMFILE_ATTR(val)	((val) & 0xffff)
K
KAMEZAWA Hiroyuki 已提交
441 442
/* Used for OOM nofiier */
#define OOM_CONTROL		(0)
443

444 445 446 447 448 449 450 451
/*
 * Reclaim flags for mem_cgroup_hierarchical_reclaim
 */
#define MEM_CGROUP_RECLAIM_NOSWAP_BIT	0x0
#define MEM_CGROUP_RECLAIM_NOSWAP	(1 << MEM_CGROUP_RECLAIM_NOSWAP_BIT)
#define MEM_CGROUP_RECLAIM_SHRINK_BIT	0x1
#define MEM_CGROUP_RECLAIM_SHRINK	(1 << MEM_CGROUP_RECLAIM_SHRINK_BIT)

452 453 454 455 456 457 458
/*
 * The memcg_create_mutex will be held whenever a new cgroup is created.
 * As a consequence, any change that needs to protect against new child cgroups
 * appearing has to hold it as well.
 */
static DEFINE_MUTEX(memcg_create_mutex);

459 460
struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *s)
{
461
	return s ? container_of(s, struct mem_cgroup, css) : NULL;
462 463
}

464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481
/* Some nice accessors for the vmpressure. */
struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
{
	if (!memcg)
		memcg = root_mem_cgroup;
	return &memcg->vmpressure;
}

struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr)
{
	return &container_of(vmpr, struct mem_cgroup, vmpressure)->css;
}

struct vmpressure *css_to_vmpressure(struct cgroup_subsys_state *css)
{
	return &mem_cgroup_from_css(css)->vmpressure;
}

482 483 484 485 486
static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
{
	return (memcg == root_mem_cgroup);
}

G
Glauber Costa 已提交
487
/* Writing them here to avoid exposing memcg's inner layout */
M
Michal Hocko 已提交
488
#if defined(CONFIG_INET) && defined(CONFIG_MEMCG_KMEM)
G
Glauber Costa 已提交
489 490 491

void sock_update_memcg(struct sock *sk)
{
492
	if (mem_cgroup_sockets_enabled) {
G
Glauber Costa 已提交
493
		struct mem_cgroup *memcg;
494
		struct cg_proto *cg_proto;
G
Glauber Costa 已提交
495 496 497

		BUG_ON(!sk->sk_prot->proto_cgroup);

498 499 500 501 502 503 504 505 506 507
		/* Socket cloning can throw us here with sk_cgrp already
		 * filled. It won't however, necessarily happen from
		 * process context. So the test for root memcg given
		 * the current task's memcg won't help us in this case.
		 *
		 * Respecting the original socket's memcg is a better
		 * decision in this case.
		 */
		if (sk->sk_cgrp) {
			BUG_ON(mem_cgroup_is_root(sk->sk_cgrp->memcg));
508
			css_get(&sk->sk_cgrp->memcg->css);
509 510 511
			return;
		}

G
Glauber Costa 已提交
512 513
		rcu_read_lock();
		memcg = mem_cgroup_from_task(current);
514
		cg_proto = sk->sk_prot->proto_cgroup(memcg);
515 516
		if (!mem_cgroup_is_root(memcg) &&
		    memcg_proto_active(cg_proto) && css_tryget(&memcg->css)) {
517
			sk->sk_cgrp = cg_proto;
G
Glauber Costa 已提交
518 519 520 521 522 523 524 525
		}
		rcu_read_unlock();
	}
}
EXPORT_SYMBOL(sock_update_memcg);

void sock_release_memcg(struct sock *sk)
{
526
	if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
G
Glauber Costa 已提交
527 528 529
		struct mem_cgroup *memcg;
		WARN_ON(!sk->sk_cgrp->memcg);
		memcg = sk->sk_cgrp->memcg;
530
		css_put(&sk->sk_cgrp->memcg->css);
G
Glauber Costa 已提交
531 532
	}
}
G
Glauber Costa 已提交
533 534 535 536 537 538 539 540 541

struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg)
{
	if (!memcg || mem_cgroup_is_root(memcg))
		return NULL;

	return &memcg->tcp_mem.cg_proto;
}
EXPORT_SYMBOL(tcp_proto_cgroup);
G
Glauber Costa 已提交
542

543 544 545 546 547 548 549 550 551 552 553 554
static void disarm_sock_keys(struct mem_cgroup *memcg)
{
	if (!memcg_proto_activated(&memcg->tcp_mem.cg_proto))
		return;
	static_key_slow_dec(&memcg_socket_limit_enabled);
}
#else
static void disarm_sock_keys(struct mem_cgroup *memcg)
{
}
#endif

555
#ifdef CONFIG_MEMCG_KMEM
556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573
/*
 * This will be the memcg's index in each cache's ->memcg_params->memcg_caches.
 * There are two main reasons for not using the css_id for this:
 *  1) this works better in sparse environments, where we have a lot of memcgs,
 *     but only a few kmem-limited. Or also, if we have, for instance, 200
 *     memcgs, and none but the 200th is kmem-limited, we'd have to have a
 *     200 entry array for that.
 *
 *  2) In order not to violate the cgroup API, we would like to do all memory
 *     allocation in ->create(). At that point, we haven't yet allocated the
 *     css_id. Having a separate index prevents us from messing with the cgroup
 *     core for this
 *
 * The current size of the caches array is stored in
 * memcg_limited_groups_array_size.  It will double each time we have to
 * increase it.
 */
static DEFINE_IDA(kmem_limited_groups);
574 575
int memcg_limited_groups_array_size;

576 577 578 579 580 581 582 583 584 585 586 587 588 589 590
/*
 * MIN_SIZE is different than 1, because we would like to avoid going through
 * the alloc/free process all the time. In a small machine, 4 kmem-limited
 * cgroups is a reasonable guess. In the future, it could be a parameter or
 * tunable, but that is strictly not necessary.
 *
 * MAX_SIZE should be as large as the number of css_ids. Ideally, we could get
 * this constant directly from cgroup, but it is understandable that this is
 * better kept as an internal representation in cgroup.c. In any case, the
 * css_id space is not getting any smaller, and we don't have to necessarily
 * increase ours as well if it increases.
 */
#define MEMCG_CACHES_MIN_SIZE 4
#define MEMCG_CACHES_MAX_SIZE 65535

591 592 593 594 595 596
/*
 * A lot of the calls to the cache allocation functions are expected to be
 * inlined by the compiler. Since the calls to memcg_kmem_get_cache are
 * conditional to this static branch, we'll have to allow modules that does
 * kmem_cache_alloc and the such to see this symbol as well
 */
597
struct static_key memcg_kmem_enabled_key;
598
EXPORT_SYMBOL(memcg_kmem_enabled_key);
599 600 601

static void disarm_kmem_keys(struct mem_cgroup *memcg)
{
602
	if (memcg_kmem_is_active(memcg)) {
603
		static_key_slow_dec(&memcg_kmem_enabled_key);
604 605
		ida_simple_remove(&kmem_limited_groups, memcg->kmemcg_id);
	}
606 607 608 609 610
	/*
	 * This check can't live in kmem destruction function,
	 * since the charges will outlive the cgroup
	 */
	WARN_ON(res_counter_read_u64(&memcg->kmem, RES_USAGE) != 0);
611 612 613 614 615 616 617 618 619 620 621 622 623
}
#else
static void disarm_kmem_keys(struct mem_cgroup *memcg)
{
}
#endif /* CONFIG_MEMCG_KMEM */

static void disarm_static_keys(struct mem_cgroup *memcg)
{
	disarm_sock_keys(memcg);
	disarm_kmem_keys(memcg);
}

624
static void drain_all_stock_async(struct mem_cgroup *memcg);
625

626
static struct mem_cgroup_per_zone *
627
mem_cgroup_zoneinfo(struct mem_cgroup *memcg, int nid, int zid)
628
{
629
	VM_BUG_ON((unsigned)nid >= nr_node_ids);
630
	return &memcg->nodeinfo[nid]->zoneinfo[zid];
631 632
}

633
struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg)
634
{
635
	return &memcg->css;
636 637
}

638
static struct mem_cgroup_per_zone *
639
page_cgroup_zoneinfo(struct mem_cgroup *memcg, struct page *page)
640
{
641 642
	int nid = page_to_nid(page);
	int zid = page_zonenum(page);
643

644
	return mem_cgroup_zoneinfo(memcg, nid, zid);
645 646
}

647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665
/*
 * Implementation Note: reading percpu statistics for memcg.
 *
 * Both of vmstat[] and percpu_counter has threshold and do periodic
 * synchronization to implement "quick" read. There are trade-off between
 * reading cost and precision of value. Then, we may have a chance to implement
 * a periodic synchronizion of counter in memcg's counter.
 *
 * But this _read() function is used for user interface now. The user accounts
 * memory usage by memory cgroup and he _always_ requires exact value because
 * he accounts memory. Even if we provide quick-and-fuzzy read, we always
 * have to visit all online cpus and make sum. So, for now, unnecessary
 * synchronization is not implemented. (just implemented for cpu hotplug)
 *
 * If there are kernel internal actions which can make use of some not-exact
 * value, and reading all cpu value can be performance bottleneck in some
 * common workload, threashold and synchonization as vmstat[] should be
 * implemented.
 */
666
static long mem_cgroup_read_stat(struct mem_cgroup *memcg,
667
				 enum mem_cgroup_stat_index idx)
668
{
669
	long val = 0;
670 671
	int cpu;

672 673
	get_online_cpus();
	for_each_online_cpu(cpu)
674
		val += per_cpu(memcg->stat->count[idx], cpu);
675
#ifdef CONFIG_HOTPLUG_CPU
676 677 678
	spin_lock(&memcg->pcp_counter_lock);
	val += memcg->nocpu_base.count[idx];
	spin_unlock(&memcg->pcp_counter_lock);
679 680
#endif
	put_online_cpus();
681 682 683
	return val;
}

684
static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg,
685 686 687
					 bool charge)
{
	int val = (charge) ? 1 : -1;
688
	this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_SWAP], val);
689 690
}

691
static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg,
692 693 694 695 696 697
					    enum mem_cgroup_events_index idx)
{
	unsigned long val = 0;
	int cpu;

	for_each_online_cpu(cpu)
698
		val += per_cpu(memcg->stat->events[idx], cpu);
699
#ifdef CONFIG_HOTPLUG_CPU
700 701 702
	spin_lock(&memcg->pcp_counter_lock);
	val += memcg->nocpu_base.events[idx];
	spin_unlock(&memcg->pcp_counter_lock);
703 704 705 706
#endif
	return val;
}

707
static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
708
					 struct page *page,
709
					 bool anon, int nr_pages)
710
{
711 712
	preempt_disable();

713 714 715 716 717 718
	/*
	 * Here, RSS means 'mapped anon' and anon's SwapCache. Shmem/tmpfs is
	 * counted as CACHE even if it's on ANON LRU.
	 */
	if (anon)
		__this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS],
719
				nr_pages);
720
	else
721
		__this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE],
722
				nr_pages);
723

724 725 726 727
	if (PageTransHuge(page))
		__this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE],
				nr_pages);

728 729
	/* pagein of a big page is an event. So, ignore page size */
	if (nr_pages > 0)
730
		__this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGIN]);
731
	else {
732
		__this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]);
733 734
		nr_pages = -nr_pages; /* for event */
	}
735

736
	__this_cpu_add(memcg->stat->nr_page_events, nr_pages);
737

738
	preempt_enable();
739 740
}

741
unsigned long
742
mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
743 744 745 746 747 748 749 750
{
	struct mem_cgroup_per_zone *mz;

	mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
	return mz->lru_size[lru];
}

static unsigned long
751
mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, int nid, int zid,
752
			unsigned int lru_mask)
753 754
{
	struct mem_cgroup_per_zone *mz;
H
Hugh Dickins 已提交
755
	enum lru_list lru;
756 757
	unsigned long ret = 0;

758
	mz = mem_cgroup_zoneinfo(memcg, nid, zid);
759

H
Hugh Dickins 已提交
760 761 762
	for_each_lru(lru) {
		if (BIT(lru) & lru_mask)
			ret += mz->lru_size[lru];
763 764 765 766 767
	}
	return ret;
}

static unsigned long
768
mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
769 770
			int nid, unsigned int lru_mask)
{
771 772 773
	u64 total = 0;
	int zid;

774
	for (zid = 0; zid < MAX_NR_ZONES; zid++)
775 776
		total += mem_cgroup_zone_nr_lru_pages(memcg,
						nid, zid, lru_mask);
777

778 779
	return total;
}
780

781
static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
782
			unsigned int lru_mask)
783
{
784
	int nid;
785 786
	u64 total = 0;

787
	for_each_node_state(nid, N_MEMORY)
788
		total += mem_cgroup_node_nr_lru_pages(memcg, nid, lru_mask);
789
	return total;
790 791
}

792 793
static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
				       enum mem_cgroup_events_target target)
794 795 796
{
	unsigned long val, next;

797
	val = __this_cpu_read(memcg->stat->nr_page_events);
798
	next = __this_cpu_read(memcg->stat->targets[target]);
799
	/* from time_after() in jiffies.h */
800 801 802 803 804 805 806 807 808 809 810 811 812
	if ((long)next - (long)val < 0) {
		switch (target) {
		case MEM_CGROUP_TARGET_THRESH:
			next = val + THRESHOLDS_EVENTS_TARGET;
			break;
		case MEM_CGROUP_TARGET_NUMAINFO:
			next = val + NUMAINFO_EVENTS_TARGET;
			break;
		default:
			break;
		}
		__this_cpu_write(memcg->stat->targets[target], next);
		return true;
813
	}
814
	return false;
815 816 817 818 819 820
}

/*
 * Check events in order.
 *
 */
821
static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
822
{
823
	preempt_disable();
824
	/* threshold event is triggered in finer grain than soft limit */
825 826
	if (unlikely(mem_cgroup_event_ratelimit(memcg,
						MEM_CGROUP_TARGET_THRESH))) {
827
		bool do_numainfo __maybe_unused;
828 829 830 831 832 833 834

#if MAX_NUMNODES > 1
		do_numainfo = mem_cgroup_event_ratelimit(memcg,
						MEM_CGROUP_TARGET_NUMAINFO);
#endif
		preempt_enable();

835
		mem_cgroup_threshold(memcg);
836
#if MAX_NUMNODES > 1
837
		if (unlikely(do_numainfo))
838
			atomic_inc(&memcg->numainfo_events);
839
#endif
840 841
	} else
		preempt_enable();
842 843
}

844
struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
845
{
846 847 848 849 850 851 852 853
	/*
	 * mm_update_next_owner() may clear mm->owner to NULL
	 * if it races with swapoff, page migration, etc.
	 * So this can be called with p == NULL.
	 */
	if (unlikely(!p))
		return NULL;

854
	return mem_cgroup_from_css(task_css(p, mem_cgroup_subsys_id));
855 856
}

857
struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
858
{
859
	struct mem_cgroup *memcg = NULL;
860 861 862

	if (!mm)
		return NULL;
863 864 865 866 867 868 869
	/*
	 * Because we have no locks, mm->owner's may be being moved to other
	 * cgroup. We use css_tryget() here even if this looks
	 * pessimistic (rather than adding locks here).
	 */
	rcu_read_lock();
	do {
870 871
		memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
		if (unlikely(!memcg))
872
			break;
873
	} while (!css_tryget(&memcg->css));
874
	rcu_read_unlock();
875
	return memcg;
876 877
}

878 879 880 881 882 883 884 885 886
/*
 * Returns a next (in a pre-order walk) alive memcg (with elevated css
 * ref. count) or NULL if the whole root's subtree has been visited.
 *
 * helper function to be used by mem_cgroup_iter
 */
static struct mem_cgroup *__mem_cgroup_iter_next(struct mem_cgroup *root,
		struct mem_cgroup *last_visited)
{
887
	struct cgroup_subsys_state *prev_css, *next_css;
888

889
	prev_css = last_visited ? &last_visited->css : NULL;
890
skip_node:
891
	next_css = css_next_descendant_pre(prev_css, &root->css);
892 893 894 895 896 897 898 899

	/*
	 * Even if we found a group we have to make sure it is
	 * alive. css && !memcg means that the groups should be
	 * skipped and we should continue the tree walk.
	 * last_visited css is safe to use because it is
	 * protected by css_get and the tree walk is rcu safe.
	 */
900 901 902
	if (next_css) {
		struct mem_cgroup *mem = mem_cgroup_from_css(next_css);

903 904 905
		if (css_tryget(&mem->css))
			return mem;
		else {
906
			prev_css = next_css;
907 908 909 910 911 912 913
			goto skip_node;
		}
	}

	return NULL;
}

914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965
static void mem_cgroup_iter_invalidate(struct mem_cgroup *root)
{
	/*
	 * When a group in the hierarchy below root is destroyed, the
	 * hierarchy iterator can no longer be trusted since it might
	 * have pointed to the destroyed group.  Invalidate it.
	 */
	atomic_inc(&root->dead_count);
}

static struct mem_cgroup *
mem_cgroup_iter_load(struct mem_cgroup_reclaim_iter *iter,
		     struct mem_cgroup *root,
		     int *sequence)
{
	struct mem_cgroup *position = NULL;
	/*
	 * A cgroup destruction happens in two stages: offlining and
	 * release.  They are separated by a RCU grace period.
	 *
	 * If the iterator is valid, we may still race with an
	 * offlining.  The RCU lock ensures the object won't be
	 * released, tryget will fail if we lost the race.
	 */
	*sequence = atomic_read(&root->dead_count);
	if (iter->last_dead_count == *sequence) {
		smp_rmb();
		position = iter->last_visited;
		if (position && !css_tryget(&position->css))
			position = NULL;
	}
	return position;
}

static void mem_cgroup_iter_update(struct mem_cgroup_reclaim_iter *iter,
				   struct mem_cgroup *last_visited,
				   struct mem_cgroup *new_position,
				   int sequence)
{
	if (last_visited)
		css_put(&last_visited->css);
	/*
	 * We store the sequence count from the time @last_visited was
	 * loaded successfully instead of rereading it here so that we
	 * don't lose destruction events in between.  We could have
	 * raced with the destruction of @new_position after all.
	 */
	iter->last_visited = new_position;
	smp_wmb();
	iter->last_dead_count = sequence;
}

966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985
/**
 * mem_cgroup_iter - iterate over memory cgroup hierarchy
 * @root: hierarchy root
 * @prev: previously returned memcg, NULL on first invocation
 * @reclaim: cookie for shared reclaim walks, NULL for full walks
 *
 * Returns references to children of the hierarchy below @root, or
 * @root itself, or %NULL after a full round-trip.
 *
 * Caller must pass the return value in @prev on subsequent
 * invocations for reference counting, or use mem_cgroup_iter_break()
 * to cancel a hierarchy walk before the round-trip is complete.
 *
 * Reclaimers can specify a zone and a priority level in @reclaim to
 * divide up the memcgs in the hierarchy among all concurrent
 * reclaimers operating on the same zone and priority.
 */
struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
				   struct mem_cgroup *prev,
				   struct mem_cgroup_reclaim_cookie *reclaim)
K
KAMEZAWA Hiroyuki 已提交
986
{
987
	struct mem_cgroup *memcg = NULL;
988
	struct mem_cgroup *last_visited = NULL;
989

990 991 992
	if (mem_cgroup_disabled())
		return NULL;

993 994
	if (!root)
		root = root_mem_cgroup;
K
KAMEZAWA Hiroyuki 已提交
995

996
	if (prev && !reclaim)
997
		last_visited = prev;
K
KAMEZAWA Hiroyuki 已提交
998

999 1000
	if (!root->use_hierarchy && root != root_mem_cgroup) {
		if (prev)
1001
			goto out_css_put;
1002 1003
		return root;
	}
K
KAMEZAWA Hiroyuki 已提交
1004

1005
	rcu_read_lock();
1006
	while (!memcg) {
1007
		struct mem_cgroup_reclaim_iter *uninitialized_var(iter);
1008
		int uninitialized_var(seq);
1009

1010 1011 1012 1013 1014 1015 1016
		if (reclaim) {
			int nid = zone_to_nid(reclaim->zone);
			int zid = zone_idx(reclaim->zone);
			struct mem_cgroup_per_zone *mz;

			mz = mem_cgroup_zoneinfo(root, nid, zid);
			iter = &mz->reclaim_iter[reclaim->priority];
1017
			if (prev && reclaim->generation != iter->generation) {
M
Michal Hocko 已提交
1018
				iter->last_visited = NULL;
1019 1020
				goto out_unlock;
			}
M
Michal Hocko 已提交
1021

1022
			last_visited = mem_cgroup_iter_load(iter, root, &seq);
1023
		}
K
KAMEZAWA Hiroyuki 已提交
1024

1025
		memcg = __mem_cgroup_iter_next(root, last_visited);
K
KAMEZAWA Hiroyuki 已提交
1026

1027
		if (reclaim) {
1028
			mem_cgroup_iter_update(iter, last_visited, memcg, seq);
1029

M
Michal Hocko 已提交
1030
			if (!memcg)
1031 1032 1033 1034
				iter->generation++;
			else if (!prev && memcg)
				reclaim->generation = iter->generation;
		}
1035

M
Michal Hocko 已提交
1036
		if (prev && !memcg)
1037
			goto out_unlock;
1038
	}
1039 1040
out_unlock:
	rcu_read_unlock();
1041 1042 1043 1044
out_css_put:
	if (prev && prev != root)
		css_put(&prev->css);

1045
	return memcg;
K
KAMEZAWA Hiroyuki 已提交
1046
}
K
KAMEZAWA Hiroyuki 已提交
1047

1048 1049 1050 1051 1052 1053 1054
/**
 * mem_cgroup_iter_break - abort a hierarchy walk prematurely
 * @root: hierarchy root
 * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
 */
void mem_cgroup_iter_break(struct mem_cgroup *root,
			   struct mem_cgroup *prev)
1055 1056 1057 1058 1059 1060
{
	if (!root)
		root = root_mem_cgroup;
	if (prev && prev != root)
		css_put(&prev->css);
}
K
KAMEZAWA Hiroyuki 已提交
1061

1062 1063 1064 1065 1066 1067
/*
 * Iteration constructs for visiting all cgroups (under a tree).  If
 * loops are exited prematurely (break), mem_cgroup_iter_break() must
 * be used for reference counting.
 */
#define for_each_mem_cgroup_tree(iter, root)		\
1068
	for (iter = mem_cgroup_iter(root, NULL, NULL);	\
1069
	     iter != NULL;				\
1070
	     iter = mem_cgroup_iter(root, iter, NULL))
1071

1072
#define for_each_mem_cgroup(iter)			\
1073
	for (iter = mem_cgroup_iter(NULL, NULL, NULL);	\
1074
	     iter != NULL;				\
1075
	     iter = mem_cgroup_iter(NULL, iter, NULL))
K
KAMEZAWA Hiroyuki 已提交
1076

1077
void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
1078
{
1079
	struct mem_cgroup *memcg;
1080 1081

	rcu_read_lock();
1082 1083
	memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
	if (unlikely(!memcg))
1084 1085 1086 1087
		goto out;

	switch (idx) {
	case PGFAULT:
1088 1089 1090 1091
		this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGFAULT]);
		break;
	case PGMAJFAULT:
		this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGMAJFAULT]);
1092 1093 1094 1095 1096 1097 1098
		break;
	default:
		BUG();
	}
out:
	rcu_read_unlock();
}
1099
EXPORT_SYMBOL(__mem_cgroup_count_vm_event);
1100

1101 1102 1103
/**
 * mem_cgroup_zone_lruvec - get the lru list vector for a zone and memcg
 * @zone: zone of the wanted lruvec
1104
 * @memcg: memcg of the wanted lruvec
1105 1106 1107 1108 1109 1110 1111 1112 1113
 *
 * Returns the lru list vector holding pages for the given @zone and
 * @mem.  This can be the global zone lruvec, if the memory controller
 * is disabled.
 */
struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
				      struct mem_cgroup *memcg)
{
	struct mem_cgroup_per_zone *mz;
1114
	struct lruvec *lruvec;
1115

1116 1117 1118 1119
	if (mem_cgroup_disabled()) {
		lruvec = &zone->lruvec;
		goto out;
	}
1120 1121

	mz = mem_cgroup_zoneinfo(memcg, zone_to_nid(zone), zone_idx(zone));
1122 1123 1124 1125 1126 1127 1128 1129 1130 1131
	lruvec = &mz->lruvec;
out:
	/*
	 * Since a node can be onlined after the mem_cgroup was created,
	 * we have to be prepared to initialize lruvec->zone here;
	 * and if offlined then reonlined, we need to reinitialize it.
	 */
	if (unlikely(lruvec->zone != zone))
		lruvec->zone = zone;
	return lruvec;
1132 1133
}

K
KAMEZAWA Hiroyuki 已提交
1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146
/*
 * Following LRU functions are allowed to be used without PCG_LOCK.
 * Operations are called by routine of global LRU independently from memcg.
 * What we have to take care of here is validness of pc->mem_cgroup.
 *
 * Changes to pc->mem_cgroup happens when
 * 1. charge
 * 2. moving account
 * In typical case, "charge" is done before add-to-lru. Exception is SwapCache.
 * It is added to LRU before charge.
 * If PCG_USED bit is not set, page_cgroup is not added to this private LRU.
 * When moving account, the page is not on LRU. It's isolated.
 */
1147

1148
/**
1149
 * mem_cgroup_page_lruvec - return lruvec for adding an lru page
1150
 * @page: the page
1151
 * @zone: zone of the page
1152
 */
1153
struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone)
K
KAMEZAWA Hiroyuki 已提交
1154 1155
{
	struct mem_cgroup_per_zone *mz;
1156 1157
	struct mem_cgroup *memcg;
	struct page_cgroup *pc;
1158
	struct lruvec *lruvec;
1159

1160 1161 1162 1163
	if (mem_cgroup_disabled()) {
		lruvec = &zone->lruvec;
		goto out;
	}
1164

K
KAMEZAWA Hiroyuki 已提交
1165
	pc = lookup_page_cgroup(page);
1166
	memcg = pc->mem_cgroup;
1167 1168

	/*
1169
	 * Surreptitiously switch any uncharged offlist page to root:
1170 1171 1172 1173 1174 1175 1176
	 * an uncharged page off lru does nothing to secure
	 * its former mem_cgroup from sudden removal.
	 *
	 * Our caller holds lru_lock, and PageCgroupUsed is updated
	 * under page_cgroup lock: between them, they make all uses
	 * of pc->mem_cgroup safe.
	 */
1177
	if (!PageLRU(page) && !PageCgroupUsed(pc) && memcg != root_mem_cgroup)
1178 1179
		pc->mem_cgroup = memcg = root_mem_cgroup;

1180
	mz = page_cgroup_zoneinfo(memcg, page);
1181 1182 1183 1184 1185 1186 1187 1188 1189 1190
	lruvec = &mz->lruvec;
out:
	/*
	 * Since a node can be onlined after the mem_cgroup was created,
	 * we have to be prepared to initialize lruvec->zone here;
	 * and if offlined then reonlined, we need to reinitialize it.
	 */
	if (unlikely(lruvec->zone != zone))
		lruvec->zone = zone;
	return lruvec;
K
KAMEZAWA Hiroyuki 已提交
1191
}
1192

1193
/**
1194 1195 1196 1197
 * mem_cgroup_update_lru_size - account for adding or removing an lru page
 * @lruvec: mem_cgroup per zone lru vector
 * @lru: index of lru list the page is sitting on
 * @nr_pages: positive when adding or negative when removing
1198
 *
1199 1200
 * This function must be called when a page is added to or removed from an
 * lru list.
1201
 */
1202 1203
void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
				int nr_pages)
1204 1205
{
	struct mem_cgroup_per_zone *mz;
1206
	unsigned long *lru_size;
1207 1208 1209 1210

	if (mem_cgroup_disabled())
		return;

1211 1212 1213 1214
	mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
	lru_size = mz->lru_size + lru;
	*lru_size += nr_pages;
	VM_BUG_ON((long)(*lru_size) < 0);
K
KAMEZAWA Hiroyuki 已提交
1215
}
1216

1217
/*
1218
 * Checks whether given mem is same or in the root_mem_cgroup's
1219 1220
 * hierarchy subtree
 */
1221 1222
bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
				  struct mem_cgroup *memcg)
1223
{
1224 1225
	if (root_memcg == memcg)
		return true;
1226
	if (!root_memcg->use_hierarchy || !memcg)
1227
		return false;
1228 1229 1230 1231 1232 1233 1234 1235
	return css_is_ancestor(&memcg->css, &root_memcg->css);
}

static bool mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
				       struct mem_cgroup *memcg)
{
	bool ret;

1236
	rcu_read_lock();
1237
	ret = __mem_cgroup_same_or_subtree(root_memcg, memcg);
1238 1239
	rcu_read_unlock();
	return ret;
1240 1241
}

1242 1243
bool task_in_mem_cgroup(struct task_struct *task,
			const struct mem_cgroup *memcg)
1244
{
1245
	struct mem_cgroup *curr = NULL;
1246
	struct task_struct *p;
1247
	bool ret;
1248

1249
	p = find_lock_task_mm(task);
1250 1251 1252 1253 1254 1255 1256 1257 1258
	if (p) {
		curr = try_get_mem_cgroup_from_mm(p->mm);
		task_unlock(p);
	} else {
		/*
		 * All threads may have already detached their mm's, but the oom
		 * killer still needs to detect if they have already been oom
		 * killed to prevent needlessly killing additional tasks.
		 */
1259
		rcu_read_lock();
1260 1261 1262
		curr = mem_cgroup_from_task(task);
		if (curr)
			css_get(&curr->css);
1263
		rcu_read_unlock();
1264
	}
1265
	if (!curr)
1266
		return false;
1267
	/*
1268
	 * We should check use_hierarchy of "memcg" not "curr". Because checking
1269
	 * use_hierarchy of "curr" here make this function true if hierarchy is
1270 1271
	 * enabled in "curr" and "curr" is a child of "memcg" in *cgroup*
	 * hierarchy(even if use_hierarchy is disabled in "memcg").
1272
	 */
1273
	ret = mem_cgroup_same_or_subtree(memcg, curr);
1274
	css_put(&curr->css);
1275 1276 1277
	return ret;
}

1278
int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec)
1279
{
1280
	unsigned long inactive_ratio;
1281
	unsigned long inactive;
1282
	unsigned long active;
1283
	unsigned long gb;
1284

1285 1286
	inactive = mem_cgroup_get_lru_size(lruvec, LRU_INACTIVE_ANON);
	active = mem_cgroup_get_lru_size(lruvec, LRU_ACTIVE_ANON);
1287

1288 1289 1290 1291 1292 1293
	gb = (inactive + active) >> (30 - PAGE_SHIFT);
	if (gb)
		inactive_ratio = int_sqrt(10 * gb);
	else
		inactive_ratio = 1;

1294
	return inactive * inactive_ratio < active;
1295 1296
}

1297 1298 1299
#define mem_cgroup_from_res_counter(counter, member)	\
	container_of(counter, struct mem_cgroup, member)

1300
/**
1301
 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
W
Wanpeng Li 已提交
1302
 * @memcg: the memory cgroup
1303
 *
1304
 * Returns the maximum amount of memory @mem can be charged with, in
1305
 * pages.
1306
 */
1307
static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1308
{
1309 1310
	unsigned long long margin;

1311
	margin = res_counter_margin(&memcg->res);
1312
	if (do_swap_account)
1313
		margin = min(margin, res_counter_margin(&memcg->memsw));
1314
	return margin >> PAGE_SHIFT;
1315 1316
}

1317
int mem_cgroup_swappiness(struct mem_cgroup *memcg)
K
KOSAKI Motohiro 已提交
1318 1319
{
	/* root ? */
T
Tejun Heo 已提交
1320
	if (!css_parent(&memcg->css))
K
KOSAKI Motohiro 已提交
1321 1322
		return vm_swappiness;

1323
	return memcg->swappiness;
K
KOSAKI Motohiro 已提交
1324 1325
}

1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339
/*
 * memcg->moving_account is used for checking possibility that some thread is
 * calling move_account(). When a thread on CPU-A starts moving pages under
 * a memcg, other threads should check memcg->moving_account under
 * rcu_read_lock(), like this:
 *
 *         CPU-A                                    CPU-B
 *                                              rcu_read_lock()
 *         memcg->moving_account+1              if (memcg->mocing_account)
 *                                                   take heavy locks.
 *         synchronize_rcu()                    update something.
 *                                              rcu_read_unlock()
 *         start move here.
 */
1340 1341 1342 1343

/* for quick checking without looking up memcg */
atomic_t memcg_moving __read_mostly;

1344
static void mem_cgroup_start_move(struct mem_cgroup *memcg)
1345
{
1346
	atomic_inc(&memcg_moving);
1347
	atomic_inc(&memcg->moving_account);
1348 1349 1350
	synchronize_rcu();
}

1351
static void mem_cgroup_end_move(struct mem_cgroup *memcg)
1352
{
1353 1354 1355 1356
	/*
	 * Now, mem_cgroup_clear_mc() may call this function with NULL.
	 * We check NULL in callee rather than caller.
	 */
1357 1358
	if (memcg) {
		atomic_dec(&memcg_moving);
1359
		atomic_dec(&memcg->moving_account);
1360
	}
1361
}
1362

1363 1364 1365
/*
 * 2 routines for checking "mem" is under move_account() or not.
 *
1366 1367
 * mem_cgroup_stolen() -  checking whether a cgroup is mc.from or not. This
 *			  is used for avoiding races in accounting.  If true,
1368 1369 1370 1371 1372 1373 1374
 *			  pc->mem_cgroup may be overwritten.
 *
 * mem_cgroup_under_move() - checking a cgroup is mc.from or mc.to or
 *			  under hierarchy of moving cgroups. This is for
 *			  waiting at hith-memory prressure caused by "move".
 */

1375
static bool mem_cgroup_stolen(struct mem_cgroup *memcg)
1376 1377
{
	VM_BUG_ON(!rcu_read_lock_held());
1378
	return atomic_read(&memcg->moving_account) > 0;
1379
}
1380

1381
static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
1382
{
1383 1384
	struct mem_cgroup *from;
	struct mem_cgroup *to;
1385
	bool ret = false;
1386 1387 1388 1389 1390 1391 1392 1393 1394
	/*
	 * Unlike task_move routines, we access mc.to, mc.from not under
	 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
	 */
	spin_lock(&mc.lock);
	from = mc.from;
	to = mc.to;
	if (!from)
		goto unlock;
1395

1396 1397
	ret = mem_cgroup_same_or_subtree(memcg, from)
		|| mem_cgroup_same_or_subtree(memcg, to);
1398 1399
unlock:
	spin_unlock(&mc.lock);
1400 1401 1402
	return ret;
}

1403
static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
1404 1405
{
	if (mc.moving_task && current != mc.moving_task) {
1406
		if (mem_cgroup_under_move(memcg)) {
1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418
			DEFINE_WAIT(wait);
			prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
			/* moving charge context might have finished. */
			if (mc.moving_task)
				schedule();
			finish_wait(&mc.waitq, &wait);
			return true;
		}
	}
	return false;
}

1419 1420 1421 1422
/*
 * Take this lock when
 * - a code tries to modify page's memcg while it's USED.
 * - a code tries to modify page state accounting in a memcg.
1423
 * see mem_cgroup_stolen(), too.
1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436
 */
static void move_lock_mem_cgroup(struct mem_cgroup *memcg,
				  unsigned long *flags)
{
	spin_lock_irqsave(&memcg->move_lock, *flags);
}

static void move_unlock_mem_cgroup(struct mem_cgroup *memcg,
				unsigned long *flags)
{
	spin_unlock_irqrestore(&memcg->move_lock, *flags);
}

1437
#define K(x) ((x) << (PAGE_SHIFT-10))
1438
/**
1439
 * mem_cgroup_print_oom_info: Print OOM information relevant to memory controller.
1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456
 * @memcg: The memory cgroup that went over limit
 * @p: Task that is going to be killed
 *
 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
 * enabled
 */
void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
{
	struct cgroup *task_cgrp;
	struct cgroup *mem_cgrp;
	/*
	 * Need a buffer in BSS, can't rely on allocations. The code relies
	 * on the assumption that OOM is serialized for memory controller.
	 * If this assumption is broken, revisit this code.
	 */
	static char memcg_name[PATH_MAX];
	int ret;
1457 1458
	struct mem_cgroup *iter;
	unsigned int i;
1459

1460
	if (!p)
1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478
		return;

	rcu_read_lock();

	mem_cgrp = memcg->css.cgroup;
	task_cgrp = task_cgroup(p, mem_cgroup_subsys_id);

	ret = cgroup_path(task_cgrp, memcg_name, PATH_MAX);
	if (ret < 0) {
		/*
		 * Unfortunately, we are unable to convert to a useful name
		 * But we'll still print out the usage information
		 */
		rcu_read_unlock();
		goto done;
	}
	rcu_read_unlock();

1479
	pr_info("Task in %s killed", memcg_name);
1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491

	rcu_read_lock();
	ret = cgroup_path(mem_cgrp, memcg_name, PATH_MAX);
	if (ret < 0) {
		rcu_read_unlock();
		goto done;
	}
	rcu_read_unlock();

	/*
	 * Continues from above, so we don't need an KERN_ level
	 */
1492
	pr_cont(" as a result of limit of %s\n", memcg_name);
1493 1494
done:

1495
	pr_info("memory: usage %llukB, limit %llukB, failcnt %llu\n",
1496 1497 1498
		res_counter_read_u64(&memcg->res, RES_USAGE) >> 10,
		res_counter_read_u64(&memcg->res, RES_LIMIT) >> 10,
		res_counter_read_u64(&memcg->res, RES_FAILCNT));
1499
	pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %llu\n",
1500 1501 1502
		res_counter_read_u64(&memcg->memsw, RES_USAGE) >> 10,
		res_counter_read_u64(&memcg->memsw, RES_LIMIT) >> 10,
		res_counter_read_u64(&memcg->memsw, RES_FAILCNT));
1503
	pr_info("kmem: usage %llukB, limit %llukB, failcnt %llu\n",
1504 1505 1506
		res_counter_read_u64(&memcg->kmem, RES_USAGE) >> 10,
		res_counter_read_u64(&memcg->kmem, RES_LIMIT) >> 10,
		res_counter_read_u64(&memcg->kmem, RES_FAILCNT));
1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530

	for_each_mem_cgroup_tree(iter, memcg) {
		pr_info("Memory cgroup stats");

		rcu_read_lock();
		ret = cgroup_path(iter->css.cgroup, memcg_name, PATH_MAX);
		if (!ret)
			pr_cont(" for %s", memcg_name);
		rcu_read_unlock();
		pr_cont(":");

		for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
			if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
				continue;
			pr_cont(" %s:%ldKB", mem_cgroup_stat_names[i],
				K(mem_cgroup_read_stat(iter, i)));
		}

		for (i = 0; i < NR_LRU_LISTS; i++)
			pr_cont(" %s:%luKB", mem_cgroup_lru_names[i],
				K(mem_cgroup_nr_lru_pages(iter, BIT(i))));

		pr_cont("\n");
	}
1531 1532
}

1533 1534 1535 1536
/*
 * This function returns the number of memcg under hierarchy tree. Returns
 * 1(self count) if no children.
 */
1537
static int mem_cgroup_count_children(struct mem_cgroup *memcg)
1538 1539
{
	int num = 0;
K
KAMEZAWA Hiroyuki 已提交
1540 1541
	struct mem_cgroup *iter;

1542
	for_each_mem_cgroup_tree(iter, memcg)
K
KAMEZAWA Hiroyuki 已提交
1543
		num++;
1544 1545 1546
	return num;
}

D
David Rientjes 已提交
1547 1548 1549
/*
 * Return the memory (and swap, if configured) limit for a memcg.
 */
1550
static u64 mem_cgroup_get_limit(struct mem_cgroup *memcg)
D
David Rientjes 已提交
1551 1552 1553
{
	u64 limit;

1554 1555
	limit = res_counter_read_u64(&memcg->res, RES_LIMIT);

D
David Rientjes 已提交
1556
	/*
1557
	 * Do not consider swap space if we cannot swap due to swappiness
D
David Rientjes 已提交
1558
	 */
1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572
	if (mem_cgroup_swappiness(memcg)) {
		u64 memsw;

		limit += total_swap_pages << PAGE_SHIFT;
		memsw = res_counter_read_u64(&memcg->memsw, RES_LIMIT);

		/*
		 * If memsw is finite and limits the amount of swap space
		 * available to this memcg, return that limit.
		 */
		limit = min(limit, memsw);
	}

	return limit;
D
David Rientjes 已提交
1573 1574
}

1575 1576
static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
				     int order)
1577 1578 1579 1580 1581 1582 1583
{
	struct mem_cgroup *iter;
	unsigned long chosen_points = 0;
	unsigned long totalpages;
	unsigned int points = 0;
	struct task_struct *chosen = NULL;

1584
	/*
1585 1586 1587
	 * If current has a pending SIGKILL or is exiting, then automatically
	 * select it.  The goal is to allow it to allocate so that it may
	 * quickly exit and free its memory.
1588
	 */
1589
	if (fatal_signal_pending(current) || current->flags & PF_EXITING) {
1590 1591 1592 1593 1594
		set_thread_flag(TIF_MEMDIE);
		return;
	}

	check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, order, NULL);
1595 1596
	totalpages = mem_cgroup_get_limit(memcg) >> PAGE_SHIFT ? : 1;
	for_each_mem_cgroup_tree(iter, memcg) {
1597
		struct css_task_iter it;
1598 1599
		struct task_struct *task;

1600 1601
		css_task_iter_start(&iter->css, &it);
		while ((task = css_task_iter_next(&it))) {
1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613
			switch (oom_scan_process_thread(task, totalpages, NULL,
							false)) {
			case OOM_SCAN_SELECT:
				if (chosen)
					put_task_struct(chosen);
				chosen = task;
				chosen_points = ULONG_MAX;
				get_task_struct(chosen);
				/* fall through */
			case OOM_SCAN_CONTINUE:
				continue;
			case OOM_SCAN_ABORT:
1614
				css_task_iter_end(&it);
1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630
				mem_cgroup_iter_break(memcg, iter);
				if (chosen)
					put_task_struct(chosen);
				return;
			case OOM_SCAN_OK:
				break;
			};
			points = oom_badness(task, memcg, NULL, totalpages);
			if (points > chosen_points) {
				if (chosen)
					put_task_struct(chosen);
				chosen = task;
				chosen_points = points;
				get_task_struct(chosen);
			}
		}
1631
		css_task_iter_end(&it);
1632 1633 1634 1635 1636 1637 1638 1639 1640
	}

	if (!chosen)
		return;
	points = chosen_points * 1000 / totalpages;
	oom_kill_process(chosen, gfp_mask, order, points, totalpages, memcg,
			 NULL, "Memory cgroup out of memory");
}

1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676
static unsigned long mem_cgroup_reclaim(struct mem_cgroup *memcg,
					gfp_t gfp_mask,
					unsigned long flags)
{
	unsigned long total = 0;
	bool noswap = false;
	int loop;

	if (flags & MEM_CGROUP_RECLAIM_NOSWAP)
		noswap = true;
	if (!(flags & MEM_CGROUP_RECLAIM_SHRINK) && memcg->memsw_is_minimum)
		noswap = true;

	for (loop = 0; loop < MEM_CGROUP_MAX_RECLAIM_LOOPS; loop++) {
		if (loop)
			drain_all_stock_async(memcg);
		total += try_to_free_mem_cgroup_pages(memcg, gfp_mask, noswap);
		/*
		 * Allow limit shrinkers, which are triggered directly
		 * by userspace, to catch signals and stop reclaim
		 * after minimal progress, regardless of the margin.
		 */
		if (total && (flags & MEM_CGROUP_RECLAIM_SHRINK))
			break;
		if (mem_cgroup_margin(memcg))
			break;
		/*
		 * If nothing was reclaimed after two attempts, there
		 * may be no reclaimable pages in this hierarchy.
		 */
		if (loop && !total)
			break;
	}
	return total;
}

1677
#if MAX_NUMNODES > 1
1678 1679
/**
 * test_mem_cgroup_node_reclaimable
W
Wanpeng Li 已提交
1680
 * @memcg: the target memcg
1681 1682 1683 1684 1685 1686 1687
 * @nid: the node ID to be checked.
 * @noswap : specify true here if the user wants flle only information.
 *
 * This function returns whether the specified memcg contains any
 * reclaimable pages on a node. Returns true if there are any reclaimable
 * pages in the node.
 */
1688
static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg,
1689 1690
		int nid, bool noswap)
{
1691
	if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_FILE))
1692 1693 1694
		return true;
	if (noswap || !total_swap_pages)
		return false;
1695
	if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_ANON))
1696 1697 1698 1699
		return true;
	return false;

}
1700 1701 1702 1703 1704 1705 1706

/*
 * Always updating the nodemask is not very good - even if we have an empty
 * list or the wrong list here, we can start from some node and traverse all
 * nodes based on the zonelist. So update the list loosely once per 10 secs.
 *
 */
1707
static void mem_cgroup_may_update_nodemask(struct mem_cgroup *memcg)
1708 1709
{
	int nid;
1710 1711 1712 1713
	/*
	 * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET
	 * pagein/pageout changes since the last update.
	 */
1714
	if (!atomic_read(&memcg->numainfo_events))
1715
		return;
1716
	if (atomic_inc_return(&memcg->numainfo_updating) > 1)
1717 1718 1719
		return;

	/* make a nodemask where this memcg uses memory from */
1720
	memcg->scan_nodes = node_states[N_MEMORY];
1721

1722
	for_each_node_mask(nid, node_states[N_MEMORY]) {
1723

1724 1725
		if (!test_mem_cgroup_node_reclaimable(memcg, nid, false))
			node_clear(nid, memcg->scan_nodes);
1726
	}
1727

1728 1729
	atomic_set(&memcg->numainfo_events, 0);
	atomic_set(&memcg->numainfo_updating, 0);
1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743
}

/*
 * Selecting a node where we start reclaim from. Because what we need is just
 * reducing usage counter, start from anywhere is O,K. Considering
 * memory reclaim from current node, there are pros. and cons.
 *
 * Freeing memory from current node means freeing memory from a node which
 * we'll use or we've used. So, it may make LRU bad. And if several threads
 * hit limits, it will see a contention on a node. But freeing from remote
 * node means more costs for memory reclaim because of memory latency.
 *
 * Now, we use round-robin. Better algorithm is welcomed.
 */
1744
int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
1745 1746 1747
{
	int node;

1748 1749
	mem_cgroup_may_update_nodemask(memcg);
	node = memcg->last_scanned_node;
1750

1751
	node = next_node(node, memcg->scan_nodes);
1752
	if (node == MAX_NUMNODES)
1753
		node = first_node(memcg->scan_nodes);
1754 1755 1756 1757 1758 1759 1760 1761 1762
	/*
	 * We call this when we hit limit, not when pages are added to LRU.
	 * No LRU may hold pages because all pages are UNEVICTABLE or
	 * memcg is too small and all pages are not on LRU. In that case,
	 * we use curret node.
	 */
	if (unlikely(node == MAX_NUMNODES))
		node = numa_node_id();

1763
	memcg->last_scanned_node = node;
1764 1765 1766 1767
	return node;
}

#else
1768
int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
1769 1770 1771
{
	return 0;
}
1772

1773 1774
#endif

1775
/*
1776 1777 1778
 * A group is eligible for the soft limit reclaim under the given root
 * hierarchy if
 * 	a) it is over its soft limit
1779 1780
 * 	b) any parent up the hierarchy is over its soft limit
 */
1781 1782
bool mem_cgroup_soft_reclaim_eligible(struct mem_cgroup *memcg,
		struct mem_cgroup *root)
1783 1784 1785 1786 1787 1788 1789
{
	struct mem_cgroup *parent = memcg;

	if (res_counter_soft_limit_excess(&memcg->res))
		return true;

	/*
1790 1791
	 * If any parent up to the root in the hierarchy is over its soft limit
	 * then we have to obey and reclaim from this group as well.
1792 1793 1794 1795
	 */
	while((parent = parent_mem_cgroup(parent))) {
		if (res_counter_soft_limit_excess(&parent->res))
			return true;
1796 1797
		if (parent == root)
			break;
1798
	}
1799 1800

	return false;
1801 1802
}

K
KAMEZAWA Hiroyuki 已提交
1803 1804 1805
/*
 * Check OOM-Killer is already running under our hierarchy.
 * If someone is running, return false.
1806
 * Has to be called with memcg_oom_lock
K
KAMEZAWA Hiroyuki 已提交
1807
 */
1808
static bool mem_cgroup_oom_lock(struct mem_cgroup *memcg)
K
KAMEZAWA Hiroyuki 已提交
1809
{
1810
	struct mem_cgroup *iter, *failed = NULL;
1811

1812
	for_each_mem_cgroup_tree(iter, memcg) {
1813
		if (iter->oom_lock) {
1814 1815 1816 1817 1818
			/*
			 * this subtree of our hierarchy is already locked
			 * so we cannot give a lock.
			 */
			failed = iter;
1819 1820
			mem_cgroup_iter_break(memcg, iter);
			break;
1821 1822
		} else
			iter->oom_lock = true;
K
KAMEZAWA Hiroyuki 已提交
1823
	}
K
KAMEZAWA Hiroyuki 已提交
1824

1825
	if (!failed)
1826
		return true;
1827 1828 1829 1830 1831

	/*
	 * OK, we failed to lock the whole subtree so we have to clean up
	 * what we set up to the failing subtree
	 */
1832
	for_each_mem_cgroup_tree(iter, memcg) {
1833
		if (iter == failed) {
1834 1835
			mem_cgroup_iter_break(memcg, iter);
			break;
1836 1837 1838
		}
		iter->oom_lock = false;
	}
1839
	return false;
1840
}
1841

1842
/*
1843
 * Has to be called with memcg_oom_lock
1844
 */
1845
static int mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
1846
{
K
KAMEZAWA Hiroyuki 已提交
1847 1848
	struct mem_cgroup *iter;

1849
	for_each_mem_cgroup_tree(iter, memcg)
1850 1851 1852 1853
		iter->oom_lock = false;
	return 0;
}

1854
static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
1855 1856 1857
{
	struct mem_cgroup *iter;

1858
	for_each_mem_cgroup_tree(iter, memcg)
1859 1860 1861
		atomic_inc(&iter->under_oom);
}

1862
static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
1863 1864 1865
{
	struct mem_cgroup *iter;

K
KAMEZAWA Hiroyuki 已提交
1866 1867 1868 1869 1870
	/*
	 * When a new child is created while the hierarchy is under oom,
	 * mem_cgroup_oom_lock() may not be called. We have to use
	 * atomic_add_unless() here.
	 */
1871
	for_each_mem_cgroup_tree(iter, memcg)
1872
		atomic_add_unless(&iter->under_oom, -1, 0);
1873 1874
}

1875
static DEFINE_SPINLOCK(memcg_oom_lock);
K
KAMEZAWA Hiroyuki 已提交
1876 1877
static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);

K
KAMEZAWA Hiroyuki 已提交
1878
struct oom_wait_info {
1879
	struct mem_cgroup *memcg;
K
KAMEZAWA Hiroyuki 已提交
1880 1881 1882 1883 1884 1885
	wait_queue_t	wait;
};

static int memcg_oom_wake_function(wait_queue_t *wait,
	unsigned mode, int sync, void *arg)
{
1886 1887
	struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
	struct mem_cgroup *oom_wait_memcg;
K
KAMEZAWA Hiroyuki 已提交
1888 1889 1890
	struct oom_wait_info *oom_wait_info;

	oom_wait_info = container_of(wait, struct oom_wait_info, wait);
1891
	oom_wait_memcg = oom_wait_info->memcg;
K
KAMEZAWA Hiroyuki 已提交
1892 1893

	/*
1894
	 * Both of oom_wait_info->memcg and wake_memcg are stable under us.
K
KAMEZAWA Hiroyuki 已提交
1895 1896
	 * Then we can use css_is_ancestor without taking care of RCU.
	 */
1897 1898
	if (!mem_cgroup_same_or_subtree(oom_wait_memcg, wake_memcg)
		&& !mem_cgroup_same_or_subtree(wake_memcg, oom_wait_memcg))
K
KAMEZAWA Hiroyuki 已提交
1899 1900 1901 1902
		return 0;
	return autoremove_wake_function(wait, mode, sync, arg);
}

1903
static void memcg_wakeup_oom(struct mem_cgroup *memcg)
K
KAMEZAWA Hiroyuki 已提交
1904
{
1905 1906
	/* for filtering, pass "memcg" as argument. */
	__wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
K
KAMEZAWA Hiroyuki 已提交
1907 1908
}

1909
static void memcg_oom_recover(struct mem_cgroup *memcg)
1910
{
1911 1912
	if (memcg && atomic_read(&memcg->under_oom))
		memcg_wakeup_oom(memcg);
1913 1914
}

K
KAMEZAWA Hiroyuki 已提交
1915 1916 1917
/*
 * try to call OOM killer. returns false if we should exit memory-reclaim loop.
 */
1918 1919
static bool mem_cgroup_handle_oom(struct mem_cgroup *memcg, gfp_t mask,
				  int order)
1920
{
K
KAMEZAWA Hiroyuki 已提交
1921
	struct oom_wait_info owait;
1922
	bool locked, need_to_kill;
K
KAMEZAWA Hiroyuki 已提交
1923

1924
	owait.memcg = memcg;
K
KAMEZAWA Hiroyuki 已提交
1925 1926 1927 1928
	owait.wait.flags = 0;
	owait.wait.func = memcg_oom_wake_function;
	owait.wait.private = current;
	INIT_LIST_HEAD(&owait.wait.task_list);
1929
	need_to_kill = true;
1930
	mem_cgroup_mark_under_oom(memcg);
1931

1932
	/* At first, try to OOM lock hierarchy under memcg.*/
1933
	spin_lock(&memcg_oom_lock);
1934
	locked = mem_cgroup_oom_lock(memcg);
K
KAMEZAWA Hiroyuki 已提交
1935 1936 1937 1938 1939
	/*
	 * Even if signal_pending(), we can't quit charge() loop without
	 * accounting. So, UNINTERRUPTIBLE is appropriate. But SIGKILL
	 * under OOM is always welcomed, use TASK_KILLABLE here.
	 */
1940
	prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
1941
	if (!locked || memcg->oom_kill_disable)
1942 1943
		need_to_kill = false;
	if (locked)
1944
		mem_cgroup_oom_notify(memcg);
1945
	spin_unlock(&memcg_oom_lock);
K
KAMEZAWA Hiroyuki 已提交
1946

1947 1948
	if (need_to_kill) {
		finish_wait(&memcg_oom_waitq, &owait.wait);
1949
		mem_cgroup_out_of_memory(memcg, mask, order);
1950
	} else {
K
KAMEZAWA Hiroyuki 已提交
1951
		schedule();
K
KAMEZAWA Hiroyuki 已提交
1952
		finish_wait(&memcg_oom_waitq, &owait.wait);
K
KAMEZAWA Hiroyuki 已提交
1953
	}
1954
	spin_lock(&memcg_oom_lock);
1955
	if (locked)
1956 1957
		mem_cgroup_oom_unlock(memcg);
	memcg_wakeup_oom(memcg);
1958
	spin_unlock(&memcg_oom_lock);
K
KAMEZAWA Hiroyuki 已提交
1959

1960
	mem_cgroup_unmark_under_oom(memcg);
1961

K
KAMEZAWA Hiroyuki 已提交
1962 1963 1964
	if (test_thread_flag(TIF_MEMDIE) || fatal_signal_pending(current))
		return false;
	/* Give chance to dying process */
1965
	schedule_timeout_uninterruptible(1);
K
KAMEZAWA Hiroyuki 已提交
1966
	return true;
1967 1968
}

1969 1970 1971
/*
 * Currently used to update mapped file statistics, but the routine can be
 * generalized to update other statistics as well.
1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988
 *
 * Notes: Race condition
 *
 * We usually use page_cgroup_lock() for accessing page_cgroup member but
 * it tends to be costly. But considering some conditions, we doesn't need
 * to do so _always_.
 *
 * Considering "charge", lock_page_cgroup() is not required because all
 * file-stat operations happen after a page is attached to radix-tree. There
 * are no race with "charge".
 *
 * Considering "uncharge", we know that memcg doesn't clear pc->mem_cgroup
 * at "uncharge" intentionally. So, we always see valid pc->mem_cgroup even
 * if there are race with "uncharge". Statistics itself is properly handled
 * by flags.
 *
 * Considering "move", this is an only case we see a race. To make the race
1989 1990
 * small, we check mm->moving_account and detect there are possibility of race
 * If there is, we take a lock.
1991
 */
1992

1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005
void __mem_cgroup_begin_update_page_stat(struct page *page,
				bool *locked, unsigned long *flags)
{
	struct mem_cgroup *memcg;
	struct page_cgroup *pc;

	pc = lookup_page_cgroup(page);
again:
	memcg = pc->mem_cgroup;
	if (unlikely(!memcg || !PageCgroupUsed(pc)))
		return;
	/*
	 * If this memory cgroup is not under account moving, we don't
2006
	 * need to take move_lock_mem_cgroup(). Because we already hold
2007
	 * rcu_read_lock(), any calls to move_account will be delayed until
2008
	 * rcu_read_unlock() if mem_cgroup_stolen() == true.
2009
	 */
2010
	if (!mem_cgroup_stolen(memcg))
2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027
		return;

	move_lock_mem_cgroup(memcg, flags);
	if (memcg != pc->mem_cgroup || !PageCgroupUsed(pc)) {
		move_unlock_mem_cgroup(memcg, flags);
		goto again;
	}
	*locked = true;
}

void __mem_cgroup_end_update_page_stat(struct page *page, unsigned long *flags)
{
	struct page_cgroup *pc = lookup_page_cgroup(page);

	/*
	 * It's guaranteed that pc->mem_cgroup never changes while
	 * lock is held because a routine modifies pc->mem_cgroup
2028
	 * should take move_lock_mem_cgroup().
2029 2030 2031 2032
	 */
	move_unlock_mem_cgroup(pc->mem_cgroup, flags);
}

2033 2034
void mem_cgroup_update_page_stat(struct page *page,
				 enum mem_cgroup_page_stat_item idx, int val)
2035
{
2036
	struct mem_cgroup *memcg;
2037
	struct page_cgroup *pc = lookup_page_cgroup(page);
2038
	unsigned long uninitialized_var(flags);
2039

2040
	if (mem_cgroup_disabled())
2041
		return;
2042

2043 2044
	memcg = pc->mem_cgroup;
	if (unlikely(!memcg || !PageCgroupUsed(pc)))
2045
		return;
2046 2047

	switch (idx) {
2048 2049
	case MEMCG_NR_FILE_MAPPED:
		idx = MEM_CGROUP_STAT_FILE_MAPPED;
2050 2051 2052
		break;
	default:
		BUG();
2053
	}
2054

2055
	this_cpu_add(memcg->stat->count[idx], val);
2056
}
2057

2058 2059 2060 2061
/*
 * size of first charge trial. "32" comes from vmscan.c's magic value.
 * TODO: maybe necessary to use big numbers in big irons.
 */
2062
#define CHARGE_BATCH	32U
2063 2064
struct memcg_stock_pcp {
	struct mem_cgroup *cached; /* this never be root cgroup */
2065
	unsigned int nr_pages;
2066
	struct work_struct work;
2067
	unsigned long flags;
2068
#define FLUSHING_CACHED_CHARGE	0
2069 2070
};
static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
2071
static DEFINE_MUTEX(percpu_charge_mutex);
2072

2073 2074 2075 2076 2077 2078 2079 2080 2081 2082
/**
 * consume_stock: Try to consume stocked charge on this cpu.
 * @memcg: memcg to consume from.
 * @nr_pages: how many pages to charge.
 *
 * The charges will only happen if @memcg matches the current cpu's memcg
 * stock, and at least @nr_pages are available in that stock.  Failure to
 * service an allocation will refill the stock.
 *
 * returns true if successful, false otherwise.
2083
 */
2084
static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2085 2086 2087 2088
{
	struct memcg_stock_pcp *stock;
	bool ret = true;

2089 2090 2091
	if (nr_pages > CHARGE_BATCH)
		return false;

2092
	stock = &get_cpu_var(memcg_stock);
2093 2094
	if (memcg == stock->cached && stock->nr_pages >= nr_pages)
		stock->nr_pages -= nr_pages;
2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107
	else /* need to call res_counter_charge */
		ret = false;
	put_cpu_var(memcg_stock);
	return ret;
}

/*
 * Returns stocks cached in percpu to res_counter and reset cached information.
 */
static void drain_stock(struct memcg_stock_pcp *stock)
{
	struct mem_cgroup *old = stock->cached;

2108 2109 2110 2111
	if (stock->nr_pages) {
		unsigned long bytes = stock->nr_pages * PAGE_SIZE;

		res_counter_uncharge(&old->res, bytes);
2112
		if (do_swap_account)
2113 2114
			res_counter_uncharge(&old->memsw, bytes);
		stock->nr_pages = 0;
2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126
	}
	stock->cached = NULL;
}

/*
 * This must be called under preempt disabled or must be called by
 * a thread which is pinned to local cpu.
 */
static void drain_local_stock(struct work_struct *dummy)
{
	struct memcg_stock_pcp *stock = &__get_cpu_var(memcg_stock);
	drain_stock(stock);
2127
	clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
2128 2129
}

2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140
static void __init memcg_stock_init(void)
{
	int cpu;

	for_each_possible_cpu(cpu) {
		struct memcg_stock_pcp *stock =
					&per_cpu(memcg_stock, cpu);
		INIT_WORK(&stock->work, drain_local_stock);
	}
}

2141 2142
/*
 * Cache charges(val) which is from res_counter, to local per_cpu area.
2143
 * This will be consumed by consume_stock() function, later.
2144
 */
2145
static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2146 2147 2148
{
	struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock);

2149
	if (stock->cached != memcg) { /* reset if necessary */
2150
		drain_stock(stock);
2151
		stock->cached = memcg;
2152
	}
2153
	stock->nr_pages += nr_pages;
2154 2155 2156 2157
	put_cpu_var(memcg_stock);
}

/*
2158
 * Drains all per-CPU charge caches for given root_memcg resp. subtree
2159 2160
 * of the hierarchy under it. sync flag says whether we should block
 * until the work is done.
2161
 */
2162
static void drain_all_stock(struct mem_cgroup *root_memcg, bool sync)
2163
{
2164
	int cpu, curcpu;
2165

2166 2167
	/* Notify other cpus that system-wide "drain" is running */
	get_online_cpus();
2168
	curcpu = get_cpu();
2169 2170
	for_each_online_cpu(cpu) {
		struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
2171
		struct mem_cgroup *memcg;
2172

2173 2174
		memcg = stock->cached;
		if (!memcg || !stock->nr_pages)
2175
			continue;
2176
		if (!mem_cgroup_same_or_subtree(root_memcg, memcg))
2177
			continue;
2178 2179 2180 2181 2182 2183
		if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
			if (cpu == curcpu)
				drain_local_stock(&stock->work);
			else
				schedule_work_on(cpu, &stock->work);
		}
2184
	}
2185
	put_cpu();
2186 2187 2188 2189 2190 2191

	if (!sync)
		goto out;

	for_each_online_cpu(cpu) {
		struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
2192
		if (test_bit(FLUSHING_CACHED_CHARGE, &stock->flags))
2193 2194 2195
			flush_work(&stock->work);
	}
out:
2196
 	put_online_cpus();
2197 2198 2199 2200 2201 2202 2203 2204
}

/*
 * Tries to drain stocked charges in other cpus. This function is asynchronous
 * and just put a work per cpu for draining localy on each cpu. Caller can
 * expects some charges will be back to res_counter later but cannot wait for
 * it.
 */
2205
static void drain_all_stock_async(struct mem_cgroup *root_memcg)
2206
{
2207 2208 2209 2210 2211
	/*
	 * If someone calls draining, avoid adding more kworker runs.
	 */
	if (!mutex_trylock(&percpu_charge_mutex))
		return;
2212
	drain_all_stock(root_memcg, false);
2213
	mutex_unlock(&percpu_charge_mutex);
2214 2215 2216
}

/* This is a synchronous drain interface. */
2217
static void drain_all_stock_sync(struct mem_cgroup *root_memcg)
2218 2219
{
	/* called when force_empty is called */
2220
	mutex_lock(&percpu_charge_mutex);
2221
	drain_all_stock(root_memcg, true);
2222
	mutex_unlock(&percpu_charge_mutex);
2223 2224
}

2225 2226 2227 2228
/*
 * This function drains percpu counter value from DEAD cpu and
 * move it to local cpu. Note that this function can be preempted.
 */
2229
static void mem_cgroup_drain_pcp_counter(struct mem_cgroup *memcg, int cpu)
2230 2231 2232
{
	int i;

2233
	spin_lock(&memcg->pcp_counter_lock);
2234
	for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
2235
		long x = per_cpu(memcg->stat->count[i], cpu);
2236

2237 2238
		per_cpu(memcg->stat->count[i], cpu) = 0;
		memcg->nocpu_base.count[i] += x;
2239
	}
2240
	for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
2241
		unsigned long x = per_cpu(memcg->stat->events[i], cpu);
2242

2243 2244
		per_cpu(memcg->stat->events[i], cpu) = 0;
		memcg->nocpu_base.events[i] += x;
2245
	}
2246
	spin_unlock(&memcg->pcp_counter_lock);
2247 2248
}

2249
static int memcg_cpu_hotplug_callback(struct notifier_block *nb,
2250 2251 2252 2253 2254
					unsigned long action,
					void *hcpu)
{
	int cpu = (unsigned long)hcpu;
	struct memcg_stock_pcp *stock;
2255
	struct mem_cgroup *iter;
2256

2257
	if (action == CPU_ONLINE)
2258 2259
		return NOTIFY_OK;

2260
	if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
2261
		return NOTIFY_OK;
2262

2263
	for_each_mem_cgroup(iter)
2264 2265
		mem_cgroup_drain_pcp_counter(iter, cpu);

2266 2267 2268 2269 2270
	stock = &per_cpu(memcg_stock, cpu);
	drain_stock(stock);
	return NOTIFY_OK;
}

2271 2272 2273 2274 2275 2276 2277 2278 2279 2280

/* See __mem_cgroup_try_charge() for details */
enum {
	CHARGE_OK,		/* success */
	CHARGE_RETRY,		/* need to retry but retry is not bad */
	CHARGE_NOMEM,		/* we can't do more. return -ENOMEM */
	CHARGE_WOULDBLOCK,	/* GFP_WAIT wasn't set and no enough res. */
	CHARGE_OOM_DIE,		/* the current is killed because of OOM */
};

2281
static int mem_cgroup_do_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
2282 2283
				unsigned int nr_pages, unsigned int min_pages,
				bool oom_check)
2284
{
2285
	unsigned long csize = nr_pages * PAGE_SIZE;
2286 2287 2288 2289 2290
	struct mem_cgroup *mem_over_limit;
	struct res_counter *fail_res;
	unsigned long flags = 0;
	int ret;

2291
	ret = res_counter_charge(&memcg->res, csize, &fail_res);
2292 2293 2294 2295

	if (likely(!ret)) {
		if (!do_swap_account)
			return CHARGE_OK;
2296
		ret = res_counter_charge(&memcg->memsw, csize, &fail_res);
2297 2298 2299
		if (likely(!ret))
			return CHARGE_OK;

2300
		res_counter_uncharge(&memcg->res, csize);
2301 2302 2303 2304
		mem_over_limit = mem_cgroup_from_res_counter(fail_res, memsw);
		flags |= MEM_CGROUP_RECLAIM_NOSWAP;
	} else
		mem_over_limit = mem_cgroup_from_res_counter(fail_res, res);
2305 2306 2307 2308
	/*
	 * Never reclaim on behalf of optional batching, retry with a
	 * single page instead.
	 */
2309
	if (nr_pages > min_pages)
2310 2311 2312 2313 2314
		return CHARGE_RETRY;

	if (!(gfp_mask & __GFP_WAIT))
		return CHARGE_WOULDBLOCK;

2315 2316 2317
	if (gfp_mask & __GFP_NORETRY)
		return CHARGE_NOMEM;

2318
	ret = mem_cgroup_reclaim(mem_over_limit, gfp_mask, flags);
2319
	if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
2320
		return CHARGE_RETRY;
2321
	/*
2322 2323 2324 2325 2326 2327 2328
	 * Even though the limit is exceeded at this point, reclaim
	 * may have been able to free some pages.  Retry the charge
	 * before killing the task.
	 *
	 * Only for regular pages, though: huge pages are rather
	 * unlikely to succeed so close to the limit, and we fall back
	 * to regular pages anyway in case of failure.
2329
	 */
2330
	if (nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER) && ret)
2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343
		return CHARGE_RETRY;

	/*
	 * At task move, charge accounts can be doubly counted. So, it's
	 * better to wait until the end of task_move if something is going on.
	 */
	if (mem_cgroup_wait_acct_move(mem_over_limit))
		return CHARGE_RETRY;

	/* If we don't need to call oom-killer at el, return immediately */
	if (!oom_check)
		return CHARGE_NOMEM;
	/* check OOM */
2344
	if (!mem_cgroup_handle_oom(mem_over_limit, gfp_mask, get_order(csize)))
2345 2346 2347 2348 2349
		return CHARGE_OOM_DIE;

	return CHARGE_RETRY;
}

2350
/*
2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369
 * __mem_cgroup_try_charge() does
 * 1. detect memcg to be charged against from passed *mm and *ptr,
 * 2. update res_counter
 * 3. call memory reclaim if necessary.
 *
 * In some special case, if the task is fatal, fatal_signal_pending() or
 * has TIF_MEMDIE, this function returns -EINTR while writing root_mem_cgroup
 * to *ptr. There are two reasons for this. 1: fatal threads should quit as soon
 * as possible without any hazards. 2: all pages should have a valid
 * pc->mem_cgroup. If mm is NULL and the caller doesn't pass a valid memcg
 * pointer, that is treated as a charge to root_mem_cgroup.
 *
 * So __mem_cgroup_try_charge() will return
 *  0       ...  on success, filling *ptr with a valid memcg pointer.
 *  -ENOMEM ...  charge failure because of resource limits.
 *  -EINTR  ...  if thread is fatal. *ptr is filled with root_mem_cgroup.
 *
 * Unlike the exported interface, an "oom" parameter is added. if oom==true,
 * the oom-killer can be invoked.
2370
 */
2371
static int __mem_cgroup_try_charge(struct mm_struct *mm,
A
Andrea Arcangeli 已提交
2372
				   gfp_t gfp_mask,
2373
				   unsigned int nr_pages,
2374
				   struct mem_cgroup **ptr,
2375
				   bool oom)
2376
{
2377
	unsigned int batch = max(CHARGE_BATCH, nr_pages);
2378
	int nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES;
2379
	struct mem_cgroup *memcg = NULL;
2380
	int ret;
2381

K
KAMEZAWA Hiroyuki 已提交
2382 2383 2384 2385 2386 2387 2388 2389
	/*
	 * Unlike gloval-vm's OOM-kill, we're not in memory shortage
	 * in system level. So, allow to go ahead dying process in addition to
	 * MEMDIE process.
	 */
	if (unlikely(test_thread_flag(TIF_MEMDIE)
		     || fatal_signal_pending(current)))
		goto bypass;
2390

2391
	/*
2392 2393
	 * We always charge the cgroup the mm_struct belongs to.
	 * The mm_struct's mem_cgroup changes on task migration if the
2394
	 * thread group leader migrates. It's possible that mm is not
2395
	 * set, if so charge the root memcg (happens for pagecache usage).
2396
	 */
2397
	if (!*ptr && !mm)
2398
		*ptr = root_mem_cgroup;
K
KAMEZAWA Hiroyuki 已提交
2399
again:
2400 2401 2402
	if (*ptr) { /* css should be a valid one */
		memcg = *ptr;
		if (mem_cgroup_is_root(memcg))
K
KAMEZAWA Hiroyuki 已提交
2403
			goto done;
2404
		if (consume_stock(memcg, nr_pages))
K
KAMEZAWA Hiroyuki 已提交
2405
			goto done;
2406
		css_get(&memcg->css);
2407
	} else {
K
KAMEZAWA Hiroyuki 已提交
2408
		struct task_struct *p;
2409

K
KAMEZAWA Hiroyuki 已提交
2410 2411 2412
		rcu_read_lock();
		p = rcu_dereference(mm->owner);
		/*
2413
		 * Because we don't have task_lock(), "p" can exit.
2414
		 * In that case, "memcg" can point to root or p can be NULL with
2415 2416 2417 2418 2419 2420
		 * race with swapoff. Then, we have small risk of mis-accouning.
		 * But such kind of mis-account by race always happens because
		 * we don't have cgroup_mutex(). It's overkill and we allo that
		 * small race, here.
		 * (*) swapoff at el will charge against mm-struct not against
		 * task-struct. So, mm->owner can be NULL.
K
KAMEZAWA Hiroyuki 已提交
2421
		 */
2422
		memcg = mem_cgroup_from_task(p);
2423 2424 2425
		if (!memcg)
			memcg = root_mem_cgroup;
		if (mem_cgroup_is_root(memcg)) {
K
KAMEZAWA Hiroyuki 已提交
2426 2427 2428
			rcu_read_unlock();
			goto done;
		}
2429
		if (consume_stock(memcg, nr_pages)) {
K
KAMEZAWA Hiroyuki 已提交
2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441
			/*
			 * It seems dagerous to access memcg without css_get().
			 * But considering how consume_stok works, it's not
			 * necessary. If consume_stock success, some charges
			 * from this memcg are cached on this cpu. So, we
			 * don't need to call css_get()/css_tryget() before
			 * calling consume_stock().
			 */
			rcu_read_unlock();
			goto done;
		}
		/* after here, we may be blocked. we need to get refcnt */
2442
		if (!css_tryget(&memcg->css)) {
K
KAMEZAWA Hiroyuki 已提交
2443 2444 2445 2446 2447
			rcu_read_unlock();
			goto again;
		}
		rcu_read_unlock();
	}
2448

2449 2450
	do {
		bool oom_check;
2451

2452
		/* If killed, bypass charge */
K
KAMEZAWA Hiroyuki 已提交
2453
		if (fatal_signal_pending(current)) {
2454
			css_put(&memcg->css);
2455
			goto bypass;
K
KAMEZAWA Hiroyuki 已提交
2456
		}
2457

2458 2459 2460 2461
		oom_check = false;
		if (oom && !nr_oom_retries) {
			oom_check = true;
			nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES;
2462
		}
2463

2464 2465
		ret = mem_cgroup_do_charge(memcg, gfp_mask, batch, nr_pages,
		    oom_check);
2466 2467 2468 2469
		switch (ret) {
		case CHARGE_OK:
			break;
		case CHARGE_RETRY: /* not in OOM situation but retry */
2470
			batch = nr_pages;
2471 2472
			css_put(&memcg->css);
			memcg = NULL;
K
KAMEZAWA Hiroyuki 已提交
2473
			goto again;
2474
		case CHARGE_WOULDBLOCK: /* !__GFP_WAIT */
2475
			css_put(&memcg->css);
2476 2477
			goto nomem;
		case CHARGE_NOMEM: /* OOM routine works */
K
KAMEZAWA Hiroyuki 已提交
2478
			if (!oom) {
2479
				css_put(&memcg->css);
K
KAMEZAWA Hiroyuki 已提交
2480
				goto nomem;
K
KAMEZAWA Hiroyuki 已提交
2481
			}
2482 2483 2484 2485
			/* If oom, we never return -ENOMEM */
			nr_oom_retries--;
			break;
		case CHARGE_OOM_DIE: /* Killed by OOM Killer */
2486
			css_put(&memcg->css);
K
KAMEZAWA Hiroyuki 已提交
2487
			goto bypass;
2488
		}
2489 2490
	} while (ret != CHARGE_OK);

2491
	if (batch > nr_pages)
2492 2493
		refill_stock(memcg, batch - nr_pages);
	css_put(&memcg->css);
2494
done:
2495
	*ptr = memcg;
2496 2497
	return 0;
nomem:
2498
	*ptr = NULL;
2499
	return -ENOMEM;
K
KAMEZAWA Hiroyuki 已提交
2500
bypass:
2501 2502
	*ptr = root_mem_cgroup;
	return -EINTR;
2503
}
2504

2505 2506 2507 2508 2509
/*
 * Somemtimes we have to undo a charge we got by try_charge().
 * This function is for that and do uncharge, put css's refcnt.
 * gotten by try_charge().
 */
2510
static void __mem_cgroup_cancel_charge(struct mem_cgroup *memcg,
2511
				       unsigned int nr_pages)
2512
{
2513
	if (!mem_cgroup_is_root(memcg)) {
2514 2515
		unsigned long bytes = nr_pages * PAGE_SIZE;

2516
		res_counter_uncharge(&memcg->res, bytes);
2517
		if (do_swap_account)
2518
			res_counter_uncharge(&memcg->memsw, bytes);
2519
	}
2520 2521
}

2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539
/*
 * Cancel chrages in this cgroup....doesn't propagate to parent cgroup.
 * This is useful when moving usage to parent cgroup.
 */
static void __mem_cgroup_cancel_local_charge(struct mem_cgroup *memcg,
					unsigned int nr_pages)
{
	unsigned long bytes = nr_pages * PAGE_SIZE;

	if (mem_cgroup_is_root(memcg))
		return;

	res_counter_uncharge_until(&memcg->res, memcg->res.parent, bytes);
	if (do_swap_account)
		res_counter_uncharge_until(&memcg->memsw,
						memcg->memsw.parent, bytes);
}

2540 2541
/*
 * A helper function to get mem_cgroup from ID. must be called under
T
Tejun Heo 已提交
2542 2543 2544
 * rcu_read_lock().  The caller is responsible for calling css_tryget if
 * the mem_cgroup is used for charging. (dropping refcnt from swap can be
 * called against removed memcg.)
2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555
 */
static struct mem_cgroup *mem_cgroup_lookup(unsigned short id)
{
	struct cgroup_subsys_state *css;

	/* ID 0 is unused ID */
	if (!id)
		return NULL;
	css = css_lookup(&mem_cgroup_subsys, id);
	if (!css)
		return NULL;
2556
	return mem_cgroup_from_css(css);
2557 2558
}

2559
struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
2560
{
2561
	struct mem_cgroup *memcg = NULL;
2562
	struct page_cgroup *pc;
2563
	unsigned short id;
2564 2565
	swp_entry_t ent;

2566 2567 2568
	VM_BUG_ON(!PageLocked(page));

	pc = lookup_page_cgroup(page);
2569
	lock_page_cgroup(pc);
2570
	if (PageCgroupUsed(pc)) {
2571 2572 2573
		memcg = pc->mem_cgroup;
		if (memcg && !css_tryget(&memcg->css))
			memcg = NULL;
2574
	} else if (PageSwapCache(page)) {
2575
		ent.val = page_private(page);
2576
		id = lookup_swap_cgroup_id(ent);
2577
		rcu_read_lock();
2578 2579 2580
		memcg = mem_cgroup_lookup(id);
		if (memcg && !css_tryget(&memcg->css))
			memcg = NULL;
2581
		rcu_read_unlock();
2582
	}
2583
	unlock_page_cgroup(pc);
2584
	return memcg;
2585 2586
}

2587
static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
2588
				       struct page *page,
2589
				       unsigned int nr_pages,
2590 2591
				       enum charge_type ctype,
				       bool lrucare)
2592
{
2593
	struct page_cgroup *pc = lookup_page_cgroup(page);
2594
	struct zone *uninitialized_var(zone);
2595
	struct lruvec *lruvec;
2596
	bool was_on_lru = false;
2597
	bool anon;
2598

2599
	lock_page_cgroup(pc);
2600
	VM_BUG_ON(PageCgroupUsed(pc));
2601 2602 2603 2604
	/*
	 * we don't need page_cgroup_lock about tail pages, becase they are not
	 * accessed by any other context at this point.
	 */
2605 2606 2607 2608 2609 2610 2611 2612 2613

	/*
	 * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page
	 * may already be on some other mem_cgroup's LRU.  Take care of it.
	 */
	if (lrucare) {
		zone = page_zone(page);
		spin_lock_irq(&zone->lru_lock);
		if (PageLRU(page)) {
2614
			lruvec = mem_cgroup_zone_lruvec(zone, pc->mem_cgroup);
2615
			ClearPageLRU(page);
2616
			del_page_from_lru_list(page, lruvec, page_lru(page));
2617 2618 2619 2620
			was_on_lru = true;
		}
	}

2621
	pc->mem_cgroup = memcg;
2622 2623 2624 2625 2626 2627 2628
	/*
	 * We access a page_cgroup asynchronously without lock_page_cgroup().
	 * Especially when a page_cgroup is taken from a page, pc->mem_cgroup
	 * is accessed after testing USED bit. To make pc->mem_cgroup visible
	 * before USED bit, we need memory barrier here.
	 * See mem_cgroup_add_lru_list(), etc.
 	 */
K
KAMEZAWA Hiroyuki 已提交
2629
	smp_wmb();
2630
	SetPageCgroupUsed(pc);
2631

2632 2633
	if (lrucare) {
		if (was_on_lru) {
2634
			lruvec = mem_cgroup_zone_lruvec(zone, pc->mem_cgroup);
2635 2636
			VM_BUG_ON(PageLRU(page));
			SetPageLRU(page);
2637
			add_page_to_lru_list(page, lruvec, page_lru(page));
2638 2639 2640 2641
		}
		spin_unlock_irq(&zone->lru_lock);
	}

2642
	if (ctype == MEM_CGROUP_CHARGE_TYPE_ANON)
2643 2644 2645 2646
		anon = true;
	else
		anon = false;

2647
	mem_cgroup_charge_statistics(memcg, page, anon, nr_pages);
2648
	unlock_page_cgroup(pc);
2649

2650
	/*
2651
	 * "charge_statistics" updated event counter.
2652
	 */
2653
	memcg_check_events(memcg, page);
2654
}
2655

2656 2657
static DEFINE_MUTEX(set_limit_mutex);

2658 2659 2660 2661 2662 2663 2664
#ifdef CONFIG_MEMCG_KMEM
static inline bool memcg_can_account_kmem(struct mem_cgroup *memcg)
{
	return !mem_cgroup_disabled() && !mem_cgroup_is_root(memcg) &&
		(memcg->kmem_account_flags & KMEM_ACCOUNTED_MASK);
}

G
Glauber Costa 已提交
2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677
/*
 * This is a bit cumbersome, but it is rarely used and avoids a backpointer
 * in the memcg_cache_params struct.
 */
static struct kmem_cache *memcg_params_to_cache(struct memcg_cache_params *p)
{
	struct kmem_cache *cachep;

	VM_BUG_ON(p->is_root_cache);
	cachep = p->root_cache;
	return cachep->memcg_params->memcg_caches[memcg_cache_id(p->memcg)];
}

2678
#ifdef CONFIG_SLABINFO
2679 2680
static int mem_cgroup_slabinfo_read(struct cgroup_subsys_state *css,
				    struct cftype *cft, struct seq_file *m)
2681
{
2682
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698
	struct memcg_cache_params *params;

	if (!memcg_can_account_kmem(memcg))
		return -EIO;

	print_slabinfo_header(m);

	mutex_lock(&memcg->slab_caches_mutex);
	list_for_each_entry(params, &memcg->memcg_slab_caches, list)
		cache_show(memcg_params_to_cache(params), m);
	mutex_unlock(&memcg->slab_caches_mutex);

	return 0;
}
#endif

2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751
static int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, u64 size)
{
	struct res_counter *fail_res;
	struct mem_cgroup *_memcg;
	int ret = 0;
	bool may_oom;

	ret = res_counter_charge(&memcg->kmem, size, &fail_res);
	if (ret)
		return ret;

	/*
	 * Conditions under which we can wait for the oom_killer. Those are
	 * the same conditions tested by the core page allocator
	 */
	may_oom = (gfp & __GFP_FS) && !(gfp & __GFP_NORETRY);

	_memcg = memcg;
	ret = __mem_cgroup_try_charge(NULL, gfp, size >> PAGE_SHIFT,
				      &_memcg, may_oom);

	if (ret == -EINTR)  {
		/*
		 * __mem_cgroup_try_charge() chosed to bypass to root due to
		 * OOM kill or fatal signal.  Since our only options are to
		 * either fail the allocation or charge it to this cgroup, do
		 * it as a temporary condition. But we can't fail. From a
		 * kmem/slab perspective, the cache has already been selected,
		 * by mem_cgroup_kmem_get_cache(), so it is too late to change
		 * our minds.
		 *
		 * This condition will only trigger if the task entered
		 * memcg_charge_kmem in a sane state, but was OOM-killed during
		 * __mem_cgroup_try_charge() above. Tasks that were already
		 * dying when the allocation triggers should have been already
		 * directed to the root cgroup in memcontrol.h
		 */
		res_counter_charge_nofail(&memcg->res, size, &fail_res);
		if (do_swap_account)
			res_counter_charge_nofail(&memcg->memsw, size,
						  &fail_res);
		ret = 0;
	} else if (ret)
		res_counter_uncharge(&memcg->kmem, size);

	return ret;
}

static void memcg_uncharge_kmem(struct mem_cgroup *memcg, u64 size)
{
	res_counter_uncharge(&memcg->res, size);
	if (do_swap_account)
		res_counter_uncharge(&memcg->memsw, size);
2752 2753 2754 2755 2756

	/* Not down to 0 */
	if (res_counter_uncharge(&memcg->kmem, size))
		return;

2757 2758 2759 2760 2761 2762 2763 2764
	/*
	 * Releases a reference taken in kmem_cgroup_css_offline in case
	 * this last uncharge is racing with the offlining code or it is
	 * outliving the memcg existence.
	 *
	 * The memory barrier imposed by test&clear is paired with the
	 * explicit one in memcg_kmem_mark_dead().
	 */
2765
	if (memcg_kmem_test_and_clear_dead(memcg))
2766
		css_put(&memcg->css);
2767 2768
}

2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788
void memcg_cache_list_add(struct mem_cgroup *memcg, struct kmem_cache *cachep)
{
	if (!memcg)
		return;

	mutex_lock(&memcg->slab_caches_mutex);
	list_add(&cachep->memcg_params->list, &memcg->memcg_slab_caches);
	mutex_unlock(&memcg->slab_caches_mutex);
}

/*
 * helper for acessing a memcg's index. It will be used as an index in the
 * child cache array in kmem_cache, and also to derive its name. This function
 * will return -1 when this is not a kmem-limited memcg.
 */
int memcg_cache_id(struct mem_cgroup *memcg)
{
	return memcg ? memcg->kmemcg_id : -1;
}

2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851
/*
 * This ends up being protected by the set_limit mutex, during normal
 * operation, because that is its main call site.
 *
 * But when we create a new cache, we can call this as well if its parent
 * is kmem-limited. That will have to hold set_limit_mutex as well.
 */
int memcg_update_cache_sizes(struct mem_cgroup *memcg)
{
	int num, ret;

	num = ida_simple_get(&kmem_limited_groups,
				0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL);
	if (num < 0)
		return num;
	/*
	 * After this point, kmem_accounted (that we test atomically in
	 * the beginning of this conditional), is no longer 0. This
	 * guarantees only one process will set the following boolean
	 * to true. We don't need test_and_set because we're protected
	 * by the set_limit_mutex anyway.
	 */
	memcg_kmem_set_activated(memcg);

	ret = memcg_update_all_caches(num+1);
	if (ret) {
		ida_simple_remove(&kmem_limited_groups, num);
		memcg_kmem_clear_activated(memcg);
		return ret;
	}

	memcg->kmemcg_id = num;
	INIT_LIST_HEAD(&memcg->memcg_slab_caches);
	mutex_init(&memcg->slab_caches_mutex);
	return 0;
}

static size_t memcg_caches_array_size(int num_groups)
{
	ssize_t size;
	if (num_groups <= 0)
		return 0;

	size = 2 * num_groups;
	if (size < MEMCG_CACHES_MIN_SIZE)
		size = MEMCG_CACHES_MIN_SIZE;
	else if (size > MEMCG_CACHES_MAX_SIZE)
		size = MEMCG_CACHES_MAX_SIZE;

	return size;
}

/*
 * We should update the current array size iff all caches updates succeed. This
 * can only be done from the slab side. The slab mutex needs to be held when
 * calling this.
 */
void memcg_update_array_size(int num)
{
	if (num > memcg_limited_groups_array_size)
		memcg_limited_groups_array_size = memcg_caches_array_size(num);
}

2852 2853
static void kmem_cache_destroy_work_func(struct work_struct *w);

2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864
int memcg_update_cache_size(struct kmem_cache *s, int num_groups)
{
	struct memcg_cache_params *cur_params = s->memcg_params;

	VM_BUG_ON(s->memcg_params && !s->memcg_params->is_root_cache);

	if (num_groups > memcg_limited_groups_array_size) {
		int i;
		ssize_t size = memcg_caches_array_size(num_groups);

		size *= sizeof(void *);
2865
		size += offsetof(struct memcg_cache_params, memcg_caches);
2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904

		s->memcg_params = kzalloc(size, GFP_KERNEL);
		if (!s->memcg_params) {
			s->memcg_params = cur_params;
			return -ENOMEM;
		}

		s->memcg_params->is_root_cache = true;

		/*
		 * There is the chance it will be bigger than
		 * memcg_limited_groups_array_size, if we failed an allocation
		 * in a cache, in which case all caches updated before it, will
		 * have a bigger array.
		 *
		 * But if that is the case, the data after
		 * memcg_limited_groups_array_size is certainly unused
		 */
		for (i = 0; i < memcg_limited_groups_array_size; i++) {
			if (!cur_params->memcg_caches[i])
				continue;
			s->memcg_params->memcg_caches[i] =
						cur_params->memcg_caches[i];
		}

		/*
		 * Ideally, we would wait until all caches succeed, and only
		 * then free the old one. But this is not worth the extra
		 * pointer per-cache we'd have to have for this.
		 *
		 * It is not a big deal if some caches are left with a size
		 * bigger than the others. And all updates will reset this
		 * anyway.
		 */
		kfree(cur_params);
	}
	return 0;
}

G
Glauber Costa 已提交
2905 2906
int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s,
			 struct kmem_cache *root_cache)
2907
{
2908
	size_t size;
2909 2910 2911 2912

	if (!memcg_kmem_enabled())
		return 0;

2913 2914
	if (!memcg) {
		size = offsetof(struct memcg_cache_params, memcg_caches);
2915
		size += memcg_limited_groups_array_size * sizeof(void *);
2916 2917
	} else
		size = sizeof(struct memcg_cache_params);
2918

2919 2920 2921 2922
	s->memcg_params = kzalloc(size, GFP_KERNEL);
	if (!s->memcg_params)
		return -ENOMEM;

G
Glauber Costa 已提交
2923
	if (memcg) {
2924
		s->memcg_params->memcg = memcg;
G
Glauber Costa 已提交
2925
		s->memcg_params->root_cache = root_cache;
2926 2927
		INIT_WORK(&s->memcg_params->destroy,
				kmem_cache_destroy_work_func);
2928 2929 2930
	} else
		s->memcg_params->is_root_cache = true;

2931 2932 2933 2934 2935
	return 0;
}

void memcg_release_cache(struct kmem_cache *s)
{
2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959
	struct kmem_cache *root;
	struct mem_cgroup *memcg;
	int id;

	/*
	 * This happens, for instance, when a root cache goes away before we
	 * add any memcg.
	 */
	if (!s->memcg_params)
		return;

	if (s->memcg_params->is_root_cache)
		goto out;

	memcg = s->memcg_params->memcg;
	id  = memcg_cache_id(memcg);

	root = s->memcg_params->root_cache;
	root->memcg_params->memcg_caches[id] = NULL;

	mutex_lock(&memcg->slab_caches_mutex);
	list_del(&s->memcg_params->list);
	mutex_unlock(&memcg->slab_caches_mutex);

2960
	css_put(&memcg->css);
2961
out:
2962 2963 2964
	kfree(s->memcg_params);
}

2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995
/*
 * During the creation a new cache, we need to disable our accounting mechanism
 * altogether. This is true even if we are not creating, but rather just
 * enqueing new caches to be created.
 *
 * This is because that process will trigger allocations; some visible, like
 * explicit kmallocs to auxiliary data structures, name strings and internal
 * cache structures; some well concealed, like INIT_WORK() that can allocate
 * objects during debug.
 *
 * If any allocation happens during memcg_kmem_get_cache, we will recurse back
 * to it. This may not be a bounded recursion: since the first cache creation
 * failed to complete (waiting on the allocation), we'll just try to create the
 * cache again, failing at the same point.
 *
 * memcg_kmem_get_cache is prepared to abort after seeing a positive count of
 * memcg_kmem_skip_account. So we enclose anything that might allocate memory
 * inside the following two functions.
 */
static inline void memcg_stop_kmem_account(void)
{
	VM_BUG_ON(!current->mm);
	current->memcg_kmem_skip_account++;
}

static inline void memcg_resume_kmem_account(void)
{
	VM_BUG_ON(!current->mm);
	current->memcg_kmem_skip_account--;
}

G
Glauber Costa 已提交
2996 2997 2998 2999 3000 3001 3002 3003 3004
static void kmem_cache_destroy_work_func(struct work_struct *w)
{
	struct kmem_cache *cachep;
	struct memcg_cache_params *p;

	p = container_of(w, struct memcg_cache_params, destroy);

	cachep = memcg_params_to_cache(p);

G
Glauber Costa 已提交
3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025
	/*
	 * If we get down to 0 after shrink, we could delete right away.
	 * However, memcg_release_pages() already puts us back in the workqueue
	 * in that case. If we proceed deleting, we'll get a dangling
	 * reference, and removing the object from the workqueue in that case
	 * is unnecessary complication. We are not a fast path.
	 *
	 * Note that this case is fundamentally different from racing with
	 * shrink_slab(): if memcg_cgroup_destroy_cache() is called in
	 * kmem_cache_shrink, not only we would be reinserting a dead cache
	 * into the queue, but doing so from inside the worker racing to
	 * destroy it.
	 *
	 * So if we aren't down to zero, we'll just schedule a worker and try
	 * again
	 */
	if (atomic_read(&cachep->memcg_params->nr_pages) != 0) {
		kmem_cache_shrink(cachep);
		if (atomic_read(&cachep->memcg_params->nr_pages) == 0)
			return;
	} else
G
Glauber Costa 已提交
3026 3027 3028 3029 3030 3031 3032 3033
		kmem_cache_destroy(cachep);
}

void mem_cgroup_destroy_cache(struct kmem_cache *cachep)
{
	if (!cachep->memcg_params->dead)
		return;

G
Glauber Costa 已提交
3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053
	/*
	 * There are many ways in which we can get here.
	 *
	 * We can get to a memory-pressure situation while the delayed work is
	 * still pending to run. The vmscan shrinkers can then release all
	 * cache memory and get us to destruction. If this is the case, we'll
	 * be executed twice, which is a bug (the second time will execute over
	 * bogus data). In this case, cancelling the work should be fine.
	 *
	 * But we can also get here from the worker itself, if
	 * kmem_cache_shrink is enough to shake all the remaining objects and
	 * get the page count to 0. In this case, we'll deadlock if we try to
	 * cancel the work (the worker runs with an internal lock held, which
	 * is the same lock we would hold for cancel_work_sync().)
	 *
	 * Since we can't possibly know who got us here, just refrain from
	 * running if there is already work pending
	 */
	if (work_pending(&cachep->memcg_params->destroy))
		return;
G
Glauber Costa 已提交
3054 3055 3056 3057 3058 3059 3060
	/*
	 * We have to defer the actual destroying to a workqueue, because
	 * we might currently be in a context that cannot sleep.
	 */
	schedule_work(&cachep->memcg_params->destroy);
}

3061 3062 3063 3064 3065 3066 3067 3068 3069
/*
 * This lock protects updaters, not readers. We want readers to be as fast as
 * they can, and they will either see NULL or a valid cache value. Our model
 * allow them to see NULL, in which case the root memcg will be selected.
 *
 * We need this lock because multiple allocations to the same cache from a non
 * will span more than one worker. Only one of them can create the cache.
 */
static DEFINE_MUTEX(memcg_cache_mutex);
3070

3071 3072 3073
/*
 * Called with memcg_cache_mutex held
 */
3074 3075 3076 3077
static struct kmem_cache *kmem_cache_dup(struct mem_cgroup *memcg,
					 struct kmem_cache *s)
{
	struct kmem_cache *new;
3078
	static char *tmp_name = NULL;
3079

3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097
	lockdep_assert_held(&memcg_cache_mutex);

	/*
	 * kmem_cache_create_memcg duplicates the given name and
	 * cgroup_name for this name requires RCU context.
	 * This static temporary buffer is used to prevent from
	 * pointless shortliving allocation.
	 */
	if (!tmp_name) {
		tmp_name = kmalloc(PATH_MAX, GFP_KERNEL);
		if (!tmp_name)
			return NULL;
	}

	rcu_read_lock();
	snprintf(tmp_name, PATH_MAX, "%s(%d:%s)", s->name,
			 memcg_cache_id(memcg), cgroup_name(memcg->css.cgroup));
	rcu_read_unlock();
3098

3099
	new = kmem_cache_create_memcg(memcg, tmp_name, s->object_size, s->align,
G
Glauber Costa 已提交
3100
				      (s->flags & ~SLAB_PANIC), s->ctor, s);
3101

3102 3103 3104
	if (new)
		new->allocflags |= __GFP_KMEMCG;

3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119
	return new;
}

static struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg,
						  struct kmem_cache *cachep)
{
	struct kmem_cache *new_cachep;
	int idx;

	BUG_ON(!memcg_can_account_kmem(memcg));

	idx = memcg_cache_id(memcg);

	mutex_lock(&memcg_cache_mutex);
	new_cachep = cachep->memcg_params->memcg_caches[idx];
3120 3121
	if (new_cachep) {
		css_put(&memcg->css);
3122
		goto out;
3123
	}
3124 3125 3126 3127

	new_cachep = kmem_cache_dup(memcg, cachep);
	if (new_cachep == NULL) {
		new_cachep = cachep;
3128
		css_put(&memcg->css);
3129 3130 3131
		goto out;
	}

G
Glauber Costa 已提交
3132
	atomic_set(&new_cachep->memcg_params->nr_pages , 0);
3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144

	cachep->memcg_params->memcg_caches[idx] = new_cachep;
	/*
	 * the readers won't lock, make sure everybody sees the updated value,
	 * so they won't put stuff in the queue again for no reason
	 */
	wmb();
out:
	mutex_unlock(&memcg_cache_mutex);
	return new_cachep;
}

3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183
void kmem_cache_destroy_memcg_children(struct kmem_cache *s)
{
	struct kmem_cache *c;
	int i;

	if (!s->memcg_params)
		return;
	if (!s->memcg_params->is_root_cache)
		return;

	/*
	 * If the cache is being destroyed, we trust that there is no one else
	 * requesting objects from it. Even if there are, the sanity checks in
	 * kmem_cache_destroy should caught this ill-case.
	 *
	 * Still, we don't want anyone else freeing memcg_caches under our
	 * noses, which can happen if a new memcg comes to life. As usual,
	 * we'll take the set_limit_mutex to protect ourselves against this.
	 */
	mutex_lock(&set_limit_mutex);
	for (i = 0; i < memcg_limited_groups_array_size; i++) {
		c = s->memcg_params->memcg_caches[i];
		if (!c)
			continue;

		/*
		 * We will now manually delete the caches, so to avoid races
		 * we need to cancel all pending destruction workers and
		 * proceed with destruction ourselves.
		 *
		 * kmem_cache_destroy() will call kmem_cache_shrink internally,
		 * and that could spawn the workers again: it is likely that
		 * the cache still have active pages until this very moment.
		 * This would lead us back to mem_cgroup_destroy_cache.
		 *
		 * But that will not execute at all if the "dead" flag is not
		 * set, so flip it down to guarantee we are in control.
		 */
		c->memcg_params->dead = false;
G
Glauber Costa 已提交
3184
		cancel_work_sync(&c->memcg_params->destroy);
3185 3186 3187 3188 3189
		kmem_cache_destroy(c);
	}
	mutex_unlock(&set_limit_mutex);
}

3190 3191 3192 3193 3194 3195
struct create_work {
	struct mem_cgroup *memcg;
	struct kmem_cache *cachep;
	struct work_struct work;
};

G
Glauber Costa 已提交
3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212
static void mem_cgroup_destroy_all_caches(struct mem_cgroup *memcg)
{
	struct kmem_cache *cachep;
	struct memcg_cache_params *params;

	if (!memcg_kmem_is_active(memcg))
		return;

	mutex_lock(&memcg->slab_caches_mutex);
	list_for_each_entry(params, &memcg->memcg_slab_caches, list) {
		cachep = memcg_params_to_cache(params);
		cachep->memcg_params->dead = true;
		schedule_work(&cachep->memcg_params->destroy);
	}
	mutex_unlock(&memcg->slab_caches_mutex);
}

3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224
static void memcg_create_cache_work_func(struct work_struct *w)
{
	struct create_work *cw;

	cw = container_of(w, struct create_work, work);
	memcg_create_kmem_cache(cw->memcg, cw->cachep);
	kfree(cw);
}

/*
 * Enqueue the creation of a per-memcg kmem_cache.
 */
3225 3226
static void __memcg_create_cache_enqueue(struct mem_cgroup *memcg,
					 struct kmem_cache *cachep)
3227 3228 3229 3230
{
	struct create_work *cw;

	cw = kmalloc(sizeof(struct create_work), GFP_NOWAIT);
3231 3232
	if (cw == NULL) {
		css_put(&memcg->css);
3233 3234 3235 3236 3237 3238 3239 3240 3241 3242
		return;
	}

	cw->memcg = memcg;
	cw->cachep = cachep;

	INIT_WORK(&cw->work, memcg_create_cache_work_func);
	schedule_work(&cw->work);
}

3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260
static void memcg_create_cache_enqueue(struct mem_cgroup *memcg,
				       struct kmem_cache *cachep)
{
	/*
	 * We need to stop accounting when we kmalloc, because if the
	 * corresponding kmalloc cache is not yet created, the first allocation
	 * in __memcg_create_cache_enqueue will recurse.
	 *
	 * However, it is better to enclose the whole function. Depending on
	 * the debugging options enabled, INIT_WORK(), for instance, can
	 * trigger an allocation. This too, will make us recurse. Because at
	 * this point we can't allow ourselves back into memcg_kmem_get_cache,
	 * the safest choice is to do it like this, wrapping the whole function.
	 */
	memcg_stop_kmem_account();
	__memcg_create_cache_enqueue(memcg, cachep);
	memcg_resume_kmem_account();
}
3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282
/*
 * Return the kmem_cache we're supposed to use for a slab allocation.
 * We try to use the current memcg's version of the cache.
 *
 * If the cache does not exist yet, if we are the first user of it,
 * we either create it immediately, if possible, or create it asynchronously
 * in a workqueue.
 * In the latter case, we will let the current allocation go through with
 * the original cache.
 *
 * Can't be called in interrupt context or from kernel threads.
 * This function needs to be called with rcu_read_lock() held.
 */
struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep,
					  gfp_t gfp)
{
	struct mem_cgroup *memcg;
	int idx;

	VM_BUG_ON(!cachep->memcg_params);
	VM_BUG_ON(!cachep->memcg_params->is_root_cache);

3283 3284 3285
	if (!current->mm || current->memcg_kmem_skip_account)
		return cachep;

3286 3287 3288 3289
	rcu_read_lock();
	memcg = mem_cgroup_from_task(rcu_dereference(current->mm->owner));

	if (!memcg_can_account_kmem(memcg))
3290
		goto out;
3291 3292 3293 3294 3295 3296 3297 3298

	idx = memcg_cache_id(memcg);

	/*
	 * barrier to mare sure we're always seeing the up to date value.  The
	 * code updating memcg_caches will issue a write barrier to match this.
	 */
	read_barrier_depends();
3299 3300 3301
	if (likely(cachep->memcg_params->memcg_caches[idx])) {
		cachep = cachep->memcg_params->memcg_caches[idx];
		goto out;
3302 3303
	}

3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330
	/* The corresponding put will be done in the workqueue. */
	if (!css_tryget(&memcg->css))
		goto out;
	rcu_read_unlock();

	/*
	 * If we are in a safe context (can wait, and not in interrupt
	 * context), we could be be predictable and return right away.
	 * This would guarantee that the allocation being performed
	 * already belongs in the new cache.
	 *
	 * However, there are some clashes that can arrive from locking.
	 * For instance, because we acquire the slab_mutex while doing
	 * kmem_cache_dup, this means no further allocation could happen
	 * with the slab_mutex held.
	 *
	 * Also, because cache creation issue get_online_cpus(), this
	 * creates a lock chain: memcg_slab_mutex -> cpu_hotplug_mutex,
	 * that ends up reversed during cpu hotplug. (cpuset allocates
	 * a bunch of GFP_KERNEL memory during cpuup). Due to all that,
	 * better to defer everything.
	 */
	memcg_create_cache_enqueue(memcg, cachep);
	return cachep;
out:
	rcu_read_unlock();
	return cachep;
3331 3332 3333
}
EXPORT_SYMBOL(__memcg_kmem_get_cache);

3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354
/*
 * We need to verify if the allocation against current->mm->owner's memcg is
 * possible for the given order. But the page is not allocated yet, so we'll
 * need a further commit step to do the final arrangements.
 *
 * It is possible for the task to switch cgroups in this mean time, so at
 * commit time, we can't rely on task conversion any longer.  We'll then use
 * the handle argument to return to the caller which cgroup we should commit
 * against. We could also return the memcg directly and avoid the pointer
 * passing, but a boolean return value gives better semantics considering
 * the compiled-out case as well.
 *
 * Returning true means the allocation is possible.
 */
bool
__memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **_memcg, int order)
{
	struct mem_cgroup *memcg;
	int ret;

	*_memcg = NULL;
3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382

	/*
	 * Disabling accounting is only relevant for some specific memcg
	 * internal allocations. Therefore we would initially not have such
	 * check here, since direct calls to the page allocator that are marked
	 * with GFP_KMEMCG only happen outside memcg core. We are mostly
	 * concerned with cache allocations, and by having this test at
	 * memcg_kmem_get_cache, we are already able to relay the allocation to
	 * the root cache and bypass the memcg cache altogether.
	 *
	 * There is one exception, though: the SLUB allocator does not create
	 * large order caches, but rather service large kmallocs directly from
	 * the page allocator. Therefore, the following sequence when backed by
	 * the SLUB allocator:
	 *
	 * 	memcg_stop_kmem_account();
	 * 	kmalloc(<large_number>)
	 * 	memcg_resume_kmem_account();
	 *
	 * would effectively ignore the fact that we should skip accounting,
	 * since it will drive us directly to this function without passing
	 * through the cache selector memcg_kmem_get_cache. Such large
	 * allocations are extremely rare but can happen, for instance, for the
	 * cache arrays. We bring this test here.
	 */
	if (!current->mm || current->memcg_kmem_skip_account)
		return true;

3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456
	memcg = try_get_mem_cgroup_from_mm(current->mm);

	/*
	 * very rare case described in mem_cgroup_from_task. Unfortunately there
	 * isn't much we can do without complicating this too much, and it would
	 * be gfp-dependent anyway. Just let it go
	 */
	if (unlikely(!memcg))
		return true;

	if (!memcg_can_account_kmem(memcg)) {
		css_put(&memcg->css);
		return true;
	}

	ret = memcg_charge_kmem(memcg, gfp, PAGE_SIZE << order);
	if (!ret)
		*_memcg = memcg;

	css_put(&memcg->css);
	return (ret == 0);
}

void __memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg,
			      int order)
{
	struct page_cgroup *pc;

	VM_BUG_ON(mem_cgroup_is_root(memcg));

	/* The page allocation failed. Revert */
	if (!page) {
		memcg_uncharge_kmem(memcg, PAGE_SIZE << order);
		return;
	}

	pc = lookup_page_cgroup(page);
	lock_page_cgroup(pc);
	pc->mem_cgroup = memcg;
	SetPageCgroupUsed(pc);
	unlock_page_cgroup(pc);
}

void __memcg_kmem_uncharge_pages(struct page *page, int order)
{
	struct mem_cgroup *memcg = NULL;
	struct page_cgroup *pc;


	pc = lookup_page_cgroup(page);
	/*
	 * Fast unlocked return. Theoretically might have changed, have to
	 * check again after locking.
	 */
	if (!PageCgroupUsed(pc))
		return;

	lock_page_cgroup(pc);
	if (PageCgroupUsed(pc)) {
		memcg = pc->mem_cgroup;
		ClearPageCgroupUsed(pc);
	}
	unlock_page_cgroup(pc);

	/*
	 * We trust that only if there is a memcg associated with the page, it
	 * is a valid allocation
	 */
	if (!memcg)
		return;

	VM_BUG_ON(mem_cgroup_is_root(memcg));
	memcg_uncharge_kmem(memcg, PAGE_SIZE << order);
}
G
Glauber Costa 已提交
3457 3458 3459 3460
#else
static inline void mem_cgroup_destroy_all_caches(struct mem_cgroup *memcg)
{
}
3461 3462
#endif /* CONFIG_MEMCG_KMEM */

3463 3464
#ifdef CONFIG_TRANSPARENT_HUGEPAGE

3465
#define PCGF_NOCOPY_AT_SPLIT (1 << PCG_LOCK | 1 << PCG_MIGRATION)
3466 3467
/*
 * Because tail pages are not marked as "used", set it. We're under
3468 3469 3470
 * zone->lru_lock, 'splitting on pmd' and compound_lock.
 * charge/uncharge will be never happen and move_account() is done under
 * compound_lock(), so we don't have to take care of races.
3471
 */
3472
void mem_cgroup_split_huge_fixup(struct page *head)
3473 3474
{
	struct page_cgroup *head_pc = lookup_page_cgroup(head);
3475
	struct page_cgroup *pc;
3476
	struct mem_cgroup *memcg;
3477
	int i;
3478

3479 3480
	if (mem_cgroup_disabled())
		return;
3481 3482

	memcg = head_pc->mem_cgroup;
3483 3484
	for (i = 1; i < HPAGE_PMD_NR; i++) {
		pc = head_pc + i;
3485
		pc->mem_cgroup = memcg;
3486 3487 3488
		smp_wmb();/* see __commit_charge() */
		pc->flags = head_pc->flags & ~PCGF_NOCOPY_AT_SPLIT;
	}
3489 3490
	__this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE],
		       HPAGE_PMD_NR);
3491
}
3492
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
3493

3494
/**
3495
 * mem_cgroup_move_account - move account of the page
3496
 * @page: the page
3497
 * @nr_pages: number of regular pages (>1 for huge pages)
3498 3499 3500 3501 3502
 * @pc:	page_cgroup of the page.
 * @from: mem_cgroup which the page is moved from.
 * @to:	mem_cgroup which the page is moved to. @from != @to.
 *
 * The caller must confirm following.
K
KAMEZAWA Hiroyuki 已提交
3503
 * - page is not on LRU (isolate_page() is useful.)
3504
 * - compound_lock is held when nr_pages > 1
3505
 *
3506 3507
 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
 * from old cgroup.
3508
 */
3509 3510 3511 3512
static int mem_cgroup_move_account(struct page *page,
				   unsigned int nr_pages,
				   struct page_cgroup *pc,
				   struct mem_cgroup *from,
3513
				   struct mem_cgroup *to)
3514
{
3515 3516
	unsigned long flags;
	int ret;
3517
	bool anon = PageAnon(page);
3518

3519
	VM_BUG_ON(from == to);
3520
	VM_BUG_ON(PageLRU(page));
3521 3522 3523 3524 3525 3526 3527
	/*
	 * The page is isolated from LRU. So, collapse function
	 * will not handle this page. But page splitting can happen.
	 * Do this check under compound_page_lock(). The caller should
	 * hold it.
	 */
	ret = -EBUSY;
3528
	if (nr_pages > 1 && !PageTransHuge(page))
3529 3530 3531 3532 3533 3534 3535 3536
		goto out;

	lock_page_cgroup(pc);

	ret = -EINVAL;
	if (!PageCgroupUsed(pc) || pc->mem_cgroup != from)
		goto unlock;

3537
	move_lock_mem_cgroup(from, &flags);
3538

3539
	if (!anon && page_mapped(page)) {
3540 3541 3542 3543 3544
		/* Update mapped_file data for mem_cgroup */
		preempt_disable();
		__this_cpu_dec(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
		__this_cpu_inc(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
		preempt_enable();
3545
	}
3546
	mem_cgroup_charge_statistics(from, page, anon, -nr_pages);
3547

3548
	/* caller should have done css_get */
K
KAMEZAWA Hiroyuki 已提交
3549
	pc->mem_cgroup = to;
3550
	mem_cgroup_charge_statistics(to, page, anon, nr_pages);
3551
	move_unlock_mem_cgroup(from, &flags);
3552 3553
	ret = 0;
unlock:
3554
	unlock_page_cgroup(pc);
3555 3556 3557
	/*
	 * check events
	 */
3558 3559
	memcg_check_events(to, page);
	memcg_check_events(from, page);
3560
out:
3561 3562 3563
	return ret;
}

3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583
/**
 * mem_cgroup_move_parent - moves page to the parent group
 * @page: the page to move
 * @pc: page_cgroup of the page
 * @child: page's cgroup
 *
 * move charges to its parent or the root cgroup if the group has no
 * parent (aka use_hierarchy==0).
 * Although this might fail (get_page_unless_zero, isolate_lru_page or
 * mem_cgroup_move_account fails) the failure is always temporary and
 * it signals a race with a page removal/uncharge or migration. In the
 * first case the page is on the way out and it will vanish from the LRU
 * on the next attempt and the call should be retried later.
 * Isolation from the LRU fails only if page has been isolated from
 * the LRU since we looked at it and that usually means either global
 * reclaim or migration going on. The page will either get back to the
 * LRU or vanish.
 * Finaly mem_cgroup_move_account fails only if the page got uncharged
 * (!PageCgroupUsed) or moved to a different group. The page will
 * disappear in the next attempt.
3584
 */
3585 3586
static int mem_cgroup_move_parent(struct page *page,
				  struct page_cgroup *pc,
3587
				  struct mem_cgroup *child)
3588 3589
{
	struct mem_cgroup *parent;
3590
	unsigned int nr_pages;
3591
	unsigned long uninitialized_var(flags);
3592 3593
	int ret;

3594
	VM_BUG_ON(mem_cgroup_is_root(child));
3595

3596 3597 3598 3599 3600
	ret = -EBUSY;
	if (!get_page_unless_zero(page))
		goto out;
	if (isolate_lru_page(page))
		goto put;
3601

3602
	nr_pages = hpage_nr_pages(page);
K
KAMEZAWA Hiroyuki 已提交
3603

3604 3605 3606 3607 3608 3609
	parent = parent_mem_cgroup(child);
	/*
	 * If no parent, move charges to root cgroup.
	 */
	if (!parent)
		parent = root_mem_cgroup;
3610

3611 3612
	if (nr_pages > 1) {
		VM_BUG_ON(!PageTransHuge(page));
3613
		flags = compound_lock_irqsave(page);
3614
	}
3615

3616
	ret = mem_cgroup_move_account(page, nr_pages,
3617
				pc, child, parent);
3618 3619
	if (!ret)
		__mem_cgroup_cancel_local_charge(child, nr_pages);
3620

3621
	if (nr_pages > 1)
3622
		compound_unlock_irqrestore(page, flags);
K
KAMEZAWA Hiroyuki 已提交
3623
	putback_lru_page(page);
3624
put:
3625
	put_page(page);
3626
out:
3627 3628 3629
	return ret;
}

3630 3631 3632 3633 3634 3635 3636
/*
 * Charge the memory controller for page usage.
 * Return
 * 0 if the charge was successful
 * < 0 if the cgroup is over its limit
 */
static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
3637
				gfp_t gfp_mask, enum charge_type ctype)
3638
{
3639
	struct mem_cgroup *memcg = NULL;
3640
	unsigned int nr_pages = 1;
3641
	bool oom = true;
3642
	int ret;
A
Andrea Arcangeli 已提交
3643

A
Andrea Arcangeli 已提交
3644
	if (PageTransHuge(page)) {
3645
		nr_pages <<= compound_order(page);
A
Andrea Arcangeli 已提交
3646
		VM_BUG_ON(!PageTransHuge(page));
3647 3648 3649 3650 3651
		/*
		 * Never OOM-kill a process for a huge page.  The
		 * fault handler will fall back to regular pages.
		 */
		oom = false;
A
Andrea Arcangeli 已提交
3652
	}
3653

3654
	ret = __mem_cgroup_try_charge(mm, gfp_mask, nr_pages, &memcg, oom);
3655
	if (ret == -ENOMEM)
3656
		return ret;
3657
	__mem_cgroup_commit_charge(memcg, page, nr_pages, ctype, false);
3658 3659 3660
	return 0;
}

3661 3662
int mem_cgroup_newpage_charge(struct page *page,
			      struct mm_struct *mm, gfp_t gfp_mask)
3663
{
3664
	if (mem_cgroup_disabled())
3665
		return 0;
3666 3667 3668
	VM_BUG_ON(page_mapped(page));
	VM_BUG_ON(page->mapping && !PageAnon(page));
	VM_BUG_ON(!mm);
3669
	return mem_cgroup_charge_common(page, mm, gfp_mask,
3670
					MEM_CGROUP_CHARGE_TYPE_ANON);
3671 3672
}

3673 3674 3675
/*
 * While swap-in, try_charge -> commit or cancel, the page is locked.
 * And when try_charge() successfully returns, one refcnt to memcg without
3676
 * struct page_cgroup is acquired. This refcnt will be consumed by
3677 3678
 * "commit()" or removed by "cancel()"
 */
3679 3680 3681 3682
static int __mem_cgroup_try_charge_swapin(struct mm_struct *mm,
					  struct page *page,
					  gfp_t mask,
					  struct mem_cgroup **memcgp)
3683
{
3684
	struct mem_cgroup *memcg;
3685
	struct page_cgroup *pc;
3686
	int ret;
3687

3688 3689 3690 3691 3692 3693 3694 3695 3696 3697
	pc = lookup_page_cgroup(page);
	/*
	 * Every swap fault against a single page tries to charge the
	 * page, bail as early as possible.  shmem_unuse() encounters
	 * already charged pages, too.  The USED bit is protected by
	 * the page lock, which serializes swap cache removal, which
	 * in turn serializes uncharging.
	 */
	if (PageCgroupUsed(pc))
		return 0;
3698 3699
	if (!do_swap_account)
		goto charge_cur_mm;
3700 3701
	memcg = try_get_mem_cgroup_from_page(page);
	if (!memcg)
3702
		goto charge_cur_mm;
3703 3704
	*memcgp = memcg;
	ret = __mem_cgroup_try_charge(NULL, mask, 1, memcgp, true);
3705
	css_put(&memcg->css);
3706 3707
	if (ret == -EINTR)
		ret = 0;
3708
	return ret;
3709
charge_cur_mm:
3710 3711 3712 3713
	ret = __mem_cgroup_try_charge(mm, mask, 1, memcgp, true);
	if (ret == -EINTR)
		ret = 0;
	return ret;
3714 3715
}

3716 3717 3718 3719 3720 3721
int mem_cgroup_try_charge_swapin(struct mm_struct *mm, struct page *page,
				 gfp_t gfp_mask, struct mem_cgroup **memcgp)
{
	*memcgp = NULL;
	if (mem_cgroup_disabled())
		return 0;
3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735
	/*
	 * A racing thread's fault, or swapoff, may have already
	 * updated the pte, and even removed page from swap cache: in
	 * those cases unuse_pte()'s pte_same() test will fail; but
	 * there's also a KSM case which does need to charge the page.
	 */
	if (!PageSwapCache(page)) {
		int ret;

		ret = __mem_cgroup_try_charge(mm, gfp_mask, 1, memcgp, true);
		if (ret == -EINTR)
			ret = 0;
		return ret;
	}
3736 3737 3738
	return __mem_cgroup_try_charge_swapin(mm, page, gfp_mask, memcgp);
}

3739 3740 3741 3742 3743 3744 3745 3746 3747
void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg)
{
	if (mem_cgroup_disabled())
		return;
	if (!memcg)
		return;
	__mem_cgroup_cancel_charge(memcg, 1);
}

D
Daisuke Nishimura 已提交
3748
static void
3749
__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *memcg,
D
Daisuke Nishimura 已提交
3750
					enum charge_type ctype)
3751
{
3752
	if (mem_cgroup_disabled())
3753
		return;
3754
	if (!memcg)
3755
		return;
3756

3757
	__mem_cgroup_commit_charge(memcg, page, 1, ctype, true);
3758 3759 3760
	/*
	 * Now swap is on-memory. This means this page may be
	 * counted both as mem and swap....double count.
3761 3762 3763
	 * Fix it by uncharging from memsw. Basically, this SwapCache is stable
	 * under lock_page(). But in do_swap_page()::memory.c, reuse_swap_page()
	 * may call delete_from_swap_cache() before reach here.
3764
	 */
3765
	if (do_swap_account && PageSwapCache(page)) {
3766
		swp_entry_t ent = {.val = page_private(page)};
3767
		mem_cgroup_uncharge_swap(ent);
3768
	}
3769 3770
}

3771 3772
void mem_cgroup_commit_charge_swapin(struct page *page,
				     struct mem_cgroup *memcg)
D
Daisuke Nishimura 已提交
3773
{
3774
	__mem_cgroup_commit_charge_swapin(page, memcg,
3775
					  MEM_CGROUP_CHARGE_TYPE_ANON);
D
Daisuke Nishimura 已提交
3776 3777
}

3778 3779
int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
				gfp_t gfp_mask)
3780
{
3781 3782 3783 3784
	struct mem_cgroup *memcg = NULL;
	enum charge_type type = MEM_CGROUP_CHARGE_TYPE_CACHE;
	int ret;

3785
	if (mem_cgroup_disabled())
3786 3787 3788 3789 3790 3791 3792
		return 0;
	if (PageCompound(page))
		return 0;

	if (!PageSwapCache(page))
		ret = mem_cgroup_charge_common(page, mm, gfp_mask, type);
	else { /* page is swapcache/shmem */
3793 3794
		ret = __mem_cgroup_try_charge_swapin(mm, page,
						     gfp_mask, &memcg);
3795 3796 3797 3798
		if (!ret)
			__mem_cgroup_commit_charge_swapin(page, memcg, type);
	}
	return ret;
3799 3800
}

3801
static void mem_cgroup_do_uncharge(struct mem_cgroup *memcg,
3802 3803
				   unsigned int nr_pages,
				   const enum charge_type ctype)
3804 3805 3806
{
	struct memcg_batch_info *batch = NULL;
	bool uncharge_memsw = true;
3807

3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818
	/* If swapout, usage of swap doesn't decrease */
	if (!do_swap_account || ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
		uncharge_memsw = false;

	batch = &current->memcg_batch;
	/*
	 * In usual, we do css_get() when we remember memcg pointer.
	 * But in this case, we keep res->usage until end of a series of
	 * uncharges. Then, it's ok to ignore memcg's refcnt.
	 */
	if (!batch->memcg)
3819
		batch->memcg = memcg;
3820 3821
	/*
	 * do_batch > 0 when unmapping pages or inode invalidate/truncate.
L
Lucas De Marchi 已提交
3822
	 * In those cases, all pages freed continuously can be expected to be in
3823 3824 3825 3826 3827 3828 3829 3830
	 * the same cgroup and we have chance to coalesce uncharges.
	 * But we do uncharge one by one if this is killed by OOM(TIF_MEMDIE)
	 * because we want to do uncharge as soon as possible.
	 */

	if (!batch->do_batch || test_thread_flag(TIF_MEMDIE))
		goto direct_uncharge;

3831
	if (nr_pages > 1)
A
Andrea Arcangeli 已提交
3832 3833
		goto direct_uncharge;

3834 3835 3836 3837 3838
	/*
	 * In typical case, batch->memcg == mem. This means we can
	 * merge a series of uncharges to an uncharge of res_counter.
	 * If not, we uncharge res_counter ony by one.
	 */
3839
	if (batch->memcg != memcg)
3840 3841
		goto direct_uncharge;
	/* remember freed charge and uncharge it later */
3842
	batch->nr_pages++;
3843
	if (uncharge_memsw)
3844
		batch->memsw_nr_pages++;
3845 3846
	return;
direct_uncharge:
3847
	res_counter_uncharge(&memcg->res, nr_pages * PAGE_SIZE);
3848
	if (uncharge_memsw)
3849 3850 3851
		res_counter_uncharge(&memcg->memsw, nr_pages * PAGE_SIZE);
	if (unlikely(batch->memcg != memcg))
		memcg_oom_recover(memcg);
3852
}
3853

3854
/*
3855
 * uncharge if !page_mapped(page)
3856
 */
3857
static struct mem_cgroup *
3858 3859
__mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype,
			     bool end_migration)
3860
{
3861
	struct mem_cgroup *memcg = NULL;
3862 3863
	unsigned int nr_pages = 1;
	struct page_cgroup *pc;
3864
	bool anon;
3865

3866
	if (mem_cgroup_disabled())
3867
		return NULL;
3868

A
Andrea Arcangeli 已提交
3869
	if (PageTransHuge(page)) {
3870
		nr_pages <<= compound_order(page);
A
Andrea Arcangeli 已提交
3871 3872
		VM_BUG_ON(!PageTransHuge(page));
	}
3873
	/*
3874
	 * Check if our page_cgroup is valid
3875
	 */
3876
	pc = lookup_page_cgroup(page);
3877
	if (unlikely(!PageCgroupUsed(pc)))
3878
		return NULL;
3879

3880
	lock_page_cgroup(pc);
K
KAMEZAWA Hiroyuki 已提交
3881

3882
	memcg = pc->mem_cgroup;
3883

K
KAMEZAWA Hiroyuki 已提交
3884 3885 3886
	if (!PageCgroupUsed(pc))
		goto unlock_out;

3887 3888
	anon = PageAnon(page);

K
KAMEZAWA Hiroyuki 已提交
3889
	switch (ctype) {
3890
	case MEM_CGROUP_CHARGE_TYPE_ANON:
3891 3892 3893 3894 3895
		/*
		 * Generally PageAnon tells if it's the anon statistics to be
		 * updated; but sometimes e.g. mem_cgroup_uncharge_page() is
		 * used before page reached the stage of being marked PageAnon.
		 */
3896 3897
		anon = true;
		/* fallthrough */
K
KAMEZAWA Hiroyuki 已提交
3898
	case MEM_CGROUP_CHARGE_TYPE_DROP:
3899
		/* See mem_cgroup_prepare_migration() */
3900 3901 3902 3903 3904 3905 3906 3907 3908 3909
		if (page_mapped(page))
			goto unlock_out;
		/*
		 * Pages under migration may not be uncharged.  But
		 * end_migration() /must/ be the one uncharging the
		 * unused post-migration page and so it has to call
		 * here with the migration bit still set.  See the
		 * res_counter handling below.
		 */
		if (!end_migration && PageCgroupMigration(pc))
K
KAMEZAWA Hiroyuki 已提交
3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920
			goto unlock_out;
		break;
	case MEM_CGROUP_CHARGE_TYPE_SWAPOUT:
		if (!PageAnon(page)) {	/* Shared memory */
			if (page->mapping && !page_is_file_cache(page))
				goto unlock_out;
		} else if (page_mapped(page)) /* Anon */
				goto unlock_out;
		break;
	default:
		break;
3921
	}
K
KAMEZAWA Hiroyuki 已提交
3922

3923
	mem_cgroup_charge_statistics(memcg, page, anon, -nr_pages);
K
KAMEZAWA Hiroyuki 已提交
3924

3925
	ClearPageCgroupUsed(pc);
3926 3927 3928 3929 3930 3931
	/*
	 * pc->mem_cgroup is not cleared here. It will be accessed when it's
	 * freed from LRU. This is safe because uncharged page is expected not
	 * to be reused (freed soon). Exception is SwapCache, it's handled by
	 * special functions.
	 */
3932

3933
	unlock_page_cgroup(pc);
K
KAMEZAWA Hiroyuki 已提交
3934
	/*
3935
	 * even after unlock, we have memcg->res.usage here and this memcg
L
Li Zefan 已提交
3936
	 * will never be freed, so it's safe to call css_get().
K
KAMEZAWA Hiroyuki 已提交
3937
	 */
3938
	memcg_check_events(memcg, page);
K
KAMEZAWA Hiroyuki 已提交
3939
	if (do_swap_account && ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT) {
3940
		mem_cgroup_swap_statistics(memcg, true);
L
Li Zefan 已提交
3941
		css_get(&memcg->css);
K
KAMEZAWA Hiroyuki 已提交
3942
	}
3943 3944 3945 3946 3947 3948
	/*
	 * Migration does not charge the res_counter for the
	 * replacement page, so leave it alone when phasing out the
	 * page that is unused after the migration.
	 */
	if (!end_migration && !mem_cgroup_is_root(memcg))
3949
		mem_cgroup_do_uncharge(memcg, nr_pages, ctype);
3950

3951
	return memcg;
K
KAMEZAWA Hiroyuki 已提交
3952 3953 3954

unlock_out:
	unlock_page_cgroup(pc);
3955
	return NULL;
3956 3957
}

3958 3959
void mem_cgroup_uncharge_page(struct page *page)
{
3960 3961 3962
	/* early check. */
	if (page_mapped(page))
		return;
3963
	VM_BUG_ON(page->mapping && !PageAnon(page));
3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975
	/*
	 * If the page is in swap cache, uncharge should be deferred
	 * to the swap path, which also properly accounts swap usage
	 * and handles memcg lifetime.
	 *
	 * Note that this check is not stable and reclaim may add the
	 * page to swap cache at any time after this.  However, if the
	 * page is not in swap cache by the time page->mapcount hits
	 * 0, there won't be any page table references to the swap
	 * slot, and reclaim will free it and not actually write the
	 * page to disk.
	 */
3976 3977
	if (PageSwapCache(page))
		return;
3978
	__mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_ANON, false);
3979 3980 3981 3982 3983
}

void mem_cgroup_uncharge_cache_page(struct page *page)
{
	VM_BUG_ON(page_mapped(page));
3984
	VM_BUG_ON(page->mapping);
3985
	__mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE, false);
3986 3987
}

3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001
/*
 * Batch_start/batch_end is called in unmap_page_range/invlidate/trucate.
 * In that cases, pages are freed continuously and we can expect pages
 * are in the same memcg. All these calls itself limits the number of
 * pages freed at once, then uncharge_start/end() is called properly.
 * This may be called prural(2) times in a context,
 */

void mem_cgroup_uncharge_start(void)
{
	current->memcg_batch.do_batch++;
	/* We can do nest. */
	if (current->memcg_batch.do_batch == 1) {
		current->memcg_batch.memcg = NULL;
4002 4003
		current->memcg_batch.nr_pages = 0;
		current->memcg_batch.memsw_nr_pages = 0;
4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023
	}
}

void mem_cgroup_uncharge_end(void)
{
	struct memcg_batch_info *batch = &current->memcg_batch;

	if (!batch->do_batch)
		return;

	batch->do_batch--;
	if (batch->do_batch) /* If stacked, do nothing. */
		return;

	if (!batch->memcg)
		return;
	/*
	 * This "batch->memcg" is valid without any css_get/put etc...
	 * bacause we hide charges behind us.
	 */
4024 4025 4026 4027 4028 4029
	if (batch->nr_pages)
		res_counter_uncharge(&batch->memcg->res,
				     batch->nr_pages * PAGE_SIZE);
	if (batch->memsw_nr_pages)
		res_counter_uncharge(&batch->memcg->memsw,
				     batch->memsw_nr_pages * PAGE_SIZE);
4030
	memcg_oom_recover(batch->memcg);
4031 4032 4033 4034
	/* forget this pointer (for sanity check) */
	batch->memcg = NULL;
}

4035
#ifdef CONFIG_SWAP
4036
/*
4037
 * called after __delete_from_swap_cache() and drop "page" account.
4038 4039
 * memcg information is recorded to swap_cgroup of "ent"
 */
K
KAMEZAWA Hiroyuki 已提交
4040 4041
void
mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
4042 4043
{
	struct mem_cgroup *memcg;
K
KAMEZAWA Hiroyuki 已提交
4044 4045 4046 4047 4048
	int ctype = MEM_CGROUP_CHARGE_TYPE_SWAPOUT;

	if (!swapout) /* this was a swap cache but the swap is unused ! */
		ctype = MEM_CGROUP_CHARGE_TYPE_DROP;

4049
	memcg = __mem_cgroup_uncharge_common(page, ctype, false);
4050

K
KAMEZAWA Hiroyuki 已提交
4051 4052
	/*
	 * record memcg information,  if swapout && memcg != NULL,
L
Li Zefan 已提交
4053
	 * css_get() was called in uncharge().
K
KAMEZAWA Hiroyuki 已提交
4054 4055
	 */
	if (do_swap_account && swapout && memcg)
4056
		swap_cgroup_record(ent, css_id(&memcg->css));
4057
}
4058
#endif
4059

A
Andrew Morton 已提交
4060
#ifdef CONFIG_MEMCG_SWAP
4061 4062 4063 4064 4065
/*
 * called from swap_entry_free(). remove record in swap_cgroup and
 * uncharge "memsw" account.
 */
void mem_cgroup_uncharge_swap(swp_entry_t ent)
K
KAMEZAWA Hiroyuki 已提交
4066
{
4067
	struct mem_cgroup *memcg;
4068
	unsigned short id;
4069 4070 4071 4072

	if (!do_swap_account)
		return;

4073 4074 4075
	id = swap_cgroup_record(ent, 0);
	rcu_read_lock();
	memcg = mem_cgroup_lookup(id);
4076
	if (memcg) {
4077 4078 4079 4080
		/*
		 * We uncharge this because swap is freed.
		 * This memcg can be obsolete one. We avoid calling css_tryget
		 */
4081
		if (!mem_cgroup_is_root(memcg))
4082
			res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
4083
		mem_cgroup_swap_statistics(memcg, false);
L
Li Zefan 已提交
4084
		css_put(&memcg->css);
4085
	}
4086
	rcu_read_unlock();
K
KAMEZAWA Hiroyuki 已提交
4087
}
4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103

/**
 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
 * @entry: swap entry to be moved
 * @from:  mem_cgroup which the entry is moved from
 * @to:  mem_cgroup which the entry is moved to
 *
 * It succeeds only when the swap_cgroup's record for this entry is the same
 * as the mem_cgroup's id of @from.
 *
 * Returns 0 on success, -EINVAL on failure.
 *
 * The caller must have charged to @to, IOW, called res_counter_charge() about
 * both res and memsw, and called css_get().
 */
static int mem_cgroup_move_swap_account(swp_entry_t entry,
4104
				struct mem_cgroup *from, struct mem_cgroup *to)
4105 4106 4107 4108 4109 4110 4111 4112
{
	unsigned short old_id, new_id;

	old_id = css_id(&from->css);
	new_id = css_id(&to->css);

	if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
		mem_cgroup_swap_statistics(from, false);
4113
		mem_cgroup_swap_statistics(to, true);
4114
		/*
4115 4116 4117
		 * This function is only called from task migration context now.
		 * It postpones res_counter and refcount handling till the end
		 * of task migration(mem_cgroup_clear_mc()) for performance
L
Li Zefan 已提交
4118 4119 4120 4121 4122 4123
		 * improvement. But we cannot postpone css_get(to)  because if
		 * the process that has been moved to @to does swap-in, the
		 * refcount of @to might be decreased to 0.
		 *
		 * We are in attach() phase, so the cgroup is guaranteed to be
		 * alive, so we can just call css_get().
4124
		 */
L
Li Zefan 已提交
4125
		css_get(&to->css);
4126 4127 4128 4129 4130 4131
		return 0;
	}
	return -EINVAL;
}
#else
static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
4132
				struct mem_cgroup *from, struct mem_cgroup *to)
4133 4134 4135
{
	return -EINVAL;
}
4136
#endif
K
KAMEZAWA Hiroyuki 已提交
4137

4138
/*
4139 4140
 * Before starting migration, account PAGE_SIZE to mem_cgroup that the old
 * page belongs to.
4141
 */
4142 4143
void mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
				  struct mem_cgroup **memcgp)
4144
{
4145
	struct mem_cgroup *memcg = NULL;
4146
	unsigned int nr_pages = 1;
4147
	struct page_cgroup *pc;
4148
	enum charge_type ctype;
4149

4150
	*memcgp = NULL;
4151

4152
	if (mem_cgroup_disabled())
4153
		return;
4154

4155 4156 4157
	if (PageTransHuge(page))
		nr_pages <<= compound_order(page);

4158 4159 4160
	pc = lookup_page_cgroup(page);
	lock_page_cgroup(pc);
	if (PageCgroupUsed(pc)) {
4161 4162
		memcg = pc->mem_cgroup;
		css_get(&memcg->css);
4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193
		/*
		 * At migrating an anonymous page, its mapcount goes down
		 * to 0 and uncharge() will be called. But, even if it's fully
		 * unmapped, migration may fail and this page has to be
		 * charged again. We set MIGRATION flag here and delay uncharge
		 * until end_migration() is called
		 *
		 * Corner Case Thinking
		 * A)
		 * When the old page was mapped as Anon and it's unmap-and-freed
		 * while migration was ongoing.
		 * If unmap finds the old page, uncharge() of it will be delayed
		 * until end_migration(). If unmap finds a new page, it's
		 * uncharged when it make mapcount to be 1->0. If unmap code
		 * finds swap_migration_entry, the new page will not be mapped
		 * and end_migration() will find it(mapcount==0).
		 *
		 * B)
		 * When the old page was mapped but migraion fails, the kernel
		 * remaps it. A charge for it is kept by MIGRATION flag even
		 * if mapcount goes down to 0. We can do remap successfully
		 * without charging it again.
		 *
		 * C)
		 * The "old" page is under lock_page() until the end of
		 * migration, so, the old page itself will not be swapped-out.
		 * If the new page is swapped out before end_migraton, our
		 * hook to usual swap-out path will catch the event.
		 */
		if (PageAnon(page))
			SetPageCgroupMigration(pc);
4194
	}
4195
	unlock_page_cgroup(pc);
4196 4197 4198 4199
	/*
	 * If the page is not charged at this point,
	 * we return here.
	 */
4200
	if (!memcg)
4201
		return;
4202

4203
	*memcgp = memcg;
4204 4205 4206 4207 4208 4209 4210
	/*
	 * We charge new page before it's used/mapped. So, even if unlock_page()
	 * is called before end_migration, we can catch all events on this new
	 * page. In the case new page is migrated but not remapped, new page's
	 * mapcount will be finally 0 and we call uncharge in end_migration().
	 */
	if (PageAnon(page))
4211
		ctype = MEM_CGROUP_CHARGE_TYPE_ANON;
4212
	else
4213
		ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
4214 4215 4216 4217 4218
	/*
	 * The page is committed to the memcg, but it's not actually
	 * charged to the res_counter since we plan on replacing the
	 * old one and only one page is going to be left afterwards.
	 */
4219
	__mem_cgroup_commit_charge(memcg, newpage, nr_pages, ctype, false);
4220
}
4221

4222
/* remove redundant charge if migration failed*/
4223
void mem_cgroup_end_migration(struct mem_cgroup *memcg,
4224
	struct page *oldpage, struct page *newpage, bool migration_ok)
4225
{
4226
	struct page *used, *unused;
4227
	struct page_cgroup *pc;
4228
	bool anon;
4229

4230
	if (!memcg)
4231
		return;
4232

4233
	if (!migration_ok) {
4234 4235
		used = oldpage;
		unused = newpage;
4236
	} else {
4237
		used = newpage;
4238 4239
		unused = oldpage;
	}
4240
	anon = PageAnon(used);
4241 4242 4243 4244
	__mem_cgroup_uncharge_common(unused,
				     anon ? MEM_CGROUP_CHARGE_TYPE_ANON
				     : MEM_CGROUP_CHARGE_TYPE_CACHE,
				     true);
4245
	css_put(&memcg->css);
4246
	/*
4247 4248 4249
	 * We disallowed uncharge of pages under migration because mapcount
	 * of the page goes down to zero, temporarly.
	 * Clear the flag and check the page should be charged.
4250
	 */
4251 4252 4253 4254 4255
	pc = lookup_page_cgroup(oldpage);
	lock_page_cgroup(pc);
	ClearPageCgroupMigration(pc);
	unlock_page_cgroup(pc);

4256
	/*
4257 4258 4259 4260 4261 4262
	 * If a page is a file cache, radix-tree replacement is very atomic
	 * and we can skip this check. When it was an Anon page, its mapcount
	 * goes down to 0. But because we added MIGRATION flage, it's not
	 * uncharged yet. There are several case but page->mapcount check
	 * and USED bit check in mem_cgroup_uncharge_page() will do enough
	 * check. (see prepare_charge() also)
4263
	 */
4264
	if (anon)
4265
		mem_cgroup_uncharge_page(used);
4266
}
4267

4268 4269 4270 4271 4272 4273 4274 4275
/*
 * At replace page cache, newpage is not under any memcg but it's on
 * LRU. So, this function doesn't touch res_counter but handles LRU
 * in correct way. Both pages are locked so we cannot race with uncharge.
 */
void mem_cgroup_replace_page_cache(struct page *oldpage,
				  struct page *newpage)
{
4276
	struct mem_cgroup *memcg = NULL;
4277 4278 4279 4280 4281 4282 4283 4284 4285
	struct page_cgroup *pc;
	enum charge_type type = MEM_CGROUP_CHARGE_TYPE_CACHE;

	if (mem_cgroup_disabled())
		return;

	pc = lookup_page_cgroup(oldpage);
	/* fix accounting on old pages */
	lock_page_cgroup(pc);
4286 4287
	if (PageCgroupUsed(pc)) {
		memcg = pc->mem_cgroup;
4288
		mem_cgroup_charge_statistics(memcg, oldpage, false, -1);
4289 4290
		ClearPageCgroupUsed(pc);
	}
4291 4292
	unlock_page_cgroup(pc);

4293 4294 4295 4296 4297 4298
	/*
	 * When called from shmem_replace_page(), in some cases the
	 * oldpage has already been charged, and in some cases not.
	 */
	if (!memcg)
		return;
4299 4300 4301 4302 4303
	/*
	 * Even if newpage->mapping was NULL before starting replacement,
	 * the newpage may be on LRU(or pagevec for LRU) already. We lock
	 * LRU while we overwrite pc->mem_cgroup.
	 */
4304
	__mem_cgroup_commit_charge(memcg, newpage, 1, type, true);
4305 4306
}

4307 4308 4309 4310 4311 4312
#ifdef CONFIG_DEBUG_VM
static struct page_cgroup *lookup_page_cgroup_used(struct page *page)
{
	struct page_cgroup *pc;

	pc = lookup_page_cgroup(page);
4313 4314 4315 4316 4317
	/*
	 * Can be NULL while feeding pages into the page allocator for
	 * the first time, i.e. during boot or memory hotplug;
	 * or when mem_cgroup_disabled().
	 */
4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336
	if (likely(pc) && PageCgroupUsed(pc))
		return pc;
	return NULL;
}

bool mem_cgroup_bad_page_check(struct page *page)
{
	if (mem_cgroup_disabled())
		return false;

	return lookup_page_cgroup_used(page) != NULL;
}

void mem_cgroup_print_bad_page(struct page *page)
{
	struct page_cgroup *pc;

	pc = lookup_page_cgroup_used(page);
	if (pc) {
4337 4338
		pr_alert("pc:%p pc->flags:%lx pc->mem_cgroup:%p\n",
			 pc, pc->flags, pc->mem_cgroup);
4339 4340 4341 4342
	}
}
#endif

4343
static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
4344
				unsigned long long val)
4345
{
4346
	int retry_count;
4347
	u64 memswlimit, memlimit;
4348
	int ret = 0;
4349 4350
	int children = mem_cgroup_count_children(memcg);
	u64 curusage, oldusage;
4351
	int enlarge;
4352 4353 4354 4355 4356 4357 4358 4359 4360

	/*
	 * For keeping hierarchical_reclaim simple, how long we should retry
	 * is depends on callers. We set our retry-count to be function
	 * of # of children which we should visit in this loop.
	 */
	retry_count = MEM_CGROUP_RECLAIM_RETRIES * children;

	oldusage = res_counter_read_u64(&memcg->res, RES_USAGE);
4361

4362
	enlarge = 0;
4363
	while (retry_count) {
4364 4365 4366 4367
		if (signal_pending(current)) {
			ret = -EINTR;
			break;
		}
4368 4369 4370
		/*
		 * Rather than hide all in some function, I do this in
		 * open coded manner. You see what this really does.
4371
		 * We have to guarantee memcg->res.limit <= memcg->memsw.limit.
4372 4373 4374 4375 4376 4377
		 */
		mutex_lock(&set_limit_mutex);
		memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
		if (memswlimit < val) {
			ret = -EINVAL;
			mutex_unlock(&set_limit_mutex);
4378 4379
			break;
		}
4380 4381 4382 4383 4384

		memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
		if (memlimit < val)
			enlarge = 1;

4385
		ret = res_counter_set_limit(&memcg->res, val);
4386 4387 4388 4389 4390 4391
		if (!ret) {
			if (memswlimit == val)
				memcg->memsw_is_minimum = true;
			else
				memcg->memsw_is_minimum = false;
		}
4392 4393 4394 4395 4396
		mutex_unlock(&set_limit_mutex);

		if (!ret)
			break;

4397 4398
		mem_cgroup_reclaim(memcg, GFP_KERNEL,
				   MEM_CGROUP_RECLAIM_SHRINK);
4399 4400 4401 4402 4403 4404
		curusage = res_counter_read_u64(&memcg->res, RES_USAGE);
		/* Usage is reduced ? */
  		if (curusage >= oldusage)
			retry_count--;
		else
			oldusage = curusage;
4405
	}
4406 4407
	if (!ret && enlarge)
		memcg_oom_recover(memcg);
4408

4409 4410 4411
	return ret;
}

L
Li Zefan 已提交
4412 4413
static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
					unsigned long long val)
4414
{
4415
	int retry_count;
4416
	u64 memlimit, memswlimit, oldusage, curusage;
4417 4418
	int children = mem_cgroup_count_children(memcg);
	int ret = -EBUSY;
4419
	int enlarge = 0;
4420

4421 4422 4423
	/* see mem_cgroup_resize_res_limit */
 	retry_count = children * MEM_CGROUP_RECLAIM_RETRIES;
	oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
4424 4425 4426 4427 4428 4429 4430 4431
	while (retry_count) {
		if (signal_pending(current)) {
			ret = -EINTR;
			break;
		}
		/*
		 * Rather than hide all in some function, I do this in
		 * open coded manner. You see what this really does.
4432
		 * We have to guarantee memcg->res.limit <= memcg->memsw.limit.
4433 4434 4435 4436 4437 4438 4439 4440
		 */
		mutex_lock(&set_limit_mutex);
		memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
		if (memlimit > val) {
			ret = -EINVAL;
			mutex_unlock(&set_limit_mutex);
			break;
		}
4441 4442 4443
		memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
		if (memswlimit < val)
			enlarge = 1;
4444
		ret = res_counter_set_limit(&memcg->memsw, val);
4445 4446 4447 4448 4449 4450
		if (!ret) {
			if (memlimit == val)
				memcg->memsw_is_minimum = true;
			else
				memcg->memsw_is_minimum = false;
		}
4451 4452 4453 4454 4455
		mutex_unlock(&set_limit_mutex);

		if (!ret)
			break;

4456 4457 4458
		mem_cgroup_reclaim(memcg, GFP_KERNEL,
				   MEM_CGROUP_RECLAIM_NOSWAP |
				   MEM_CGROUP_RECLAIM_SHRINK);
4459
		curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
4460
		/* Usage is reduced ? */
4461
		if (curusage >= oldusage)
4462
			retry_count--;
4463 4464
		else
			oldusage = curusage;
4465
	}
4466 4467
	if (!ret && enlarge)
		memcg_oom_recover(memcg);
4468 4469 4470
	return ret;
}

4471 4472 4473 4474 4475 4476 4477
/**
 * mem_cgroup_force_empty_list - clears LRU of a group
 * @memcg: group to clear
 * @node: NUMA node
 * @zid: zone id
 * @lru: lru to to clear
 *
4478
 * Traverse a specified page_cgroup list and try to drop them all.  This doesn't
4479 4480
 * reclaim the pages page themselves - pages are moved to the parent (or root)
 * group.
4481
 */
4482
static void mem_cgroup_force_empty_list(struct mem_cgroup *memcg,
K
KAMEZAWA Hiroyuki 已提交
4483
				int node, int zid, enum lru_list lru)
4484
{
4485
	struct lruvec *lruvec;
4486
	unsigned long flags;
4487
	struct list_head *list;
4488 4489
	struct page *busy;
	struct zone *zone;
4490

K
KAMEZAWA Hiroyuki 已提交
4491
	zone = &NODE_DATA(node)->node_zones[zid];
4492 4493
	lruvec = mem_cgroup_zone_lruvec(zone, memcg);
	list = &lruvec->lists[lru];
4494

4495
	busy = NULL;
4496
	do {
4497
		struct page_cgroup *pc;
4498 4499
		struct page *page;

K
KAMEZAWA Hiroyuki 已提交
4500
		spin_lock_irqsave(&zone->lru_lock, flags);
4501
		if (list_empty(list)) {
K
KAMEZAWA Hiroyuki 已提交
4502
			spin_unlock_irqrestore(&zone->lru_lock, flags);
4503
			break;
4504
		}
4505 4506 4507
		page = list_entry(list->prev, struct page, lru);
		if (busy == page) {
			list_move(&page->lru, list);
4508
			busy = NULL;
K
KAMEZAWA Hiroyuki 已提交
4509
			spin_unlock_irqrestore(&zone->lru_lock, flags);
4510 4511
			continue;
		}
K
KAMEZAWA Hiroyuki 已提交
4512
		spin_unlock_irqrestore(&zone->lru_lock, flags);
4513

4514
		pc = lookup_page_cgroup(page);
4515

4516
		if (mem_cgroup_move_parent(page, pc, memcg)) {
4517
			/* found lock contention or "pc" is obsolete. */
4518
			busy = page;
4519 4520 4521
			cond_resched();
		} else
			busy = NULL;
4522
	} while (!list_empty(list));
4523 4524 4525
}

/*
4526 4527
 * make mem_cgroup's charge to be 0 if there is no task by moving
 * all the charges and pages to the parent.
4528
 * This enables deleting this mem_cgroup.
4529 4530
 *
 * Caller is responsible for holding css reference on the memcg.
4531
 */
4532
static void mem_cgroup_reparent_charges(struct mem_cgroup *memcg)
4533
{
4534
	int node, zid;
4535
	u64 usage;
4536

4537
	do {
4538 4539
		/* This is for making all *used* pages to be on LRU. */
		lru_add_drain_all();
4540 4541
		drain_all_stock_sync(memcg);
		mem_cgroup_start_move(memcg);
4542
		for_each_node_state(node, N_MEMORY) {
4543
			for (zid = 0; zid < MAX_NR_ZONES; zid++) {
H
Hugh Dickins 已提交
4544 4545
				enum lru_list lru;
				for_each_lru(lru) {
4546
					mem_cgroup_force_empty_list(memcg,
H
Hugh Dickins 已提交
4547
							node, zid, lru);
4548
				}
4549
			}
4550
		}
4551 4552
		mem_cgroup_end_move(memcg);
		memcg_oom_recover(memcg);
4553
		cond_resched();
4554

4555
		/*
4556 4557 4558 4559 4560
		 * Kernel memory may not necessarily be trackable to a specific
		 * process. So they are not migrated, and therefore we can't
		 * expect their value to drop to 0 here.
		 * Having res filled up with kmem only is enough.
		 *
4561 4562 4563 4564 4565 4566
		 * This is a safety check because mem_cgroup_force_empty_list
		 * could have raced with mem_cgroup_replace_page_cache callers
		 * so the lru seemed empty but the page could have been added
		 * right after the check. RES_USAGE should be safe as we always
		 * charge before adding to the LRU.
		 */
4567 4568 4569
		usage = res_counter_read_u64(&memcg->res, RES_USAGE) -
			res_counter_read_u64(&memcg->kmem, RES_USAGE);
	} while (usage > 0);
4570 4571
}

4572 4573 4574 4575 4576 4577 4578
/*
 * This mainly exists for tests during the setting of set of use_hierarchy.
 * Since this is the very setting we are changing, the current hierarchy value
 * is meaningless
 */
static inline bool __memcg_has_children(struct mem_cgroup *memcg)
{
4579
	struct cgroup_subsys_state *pos;
4580 4581

	/* bounce at first found */
4582
	css_for_each_child(pos, &memcg->css)
4583 4584 4585 4586 4587
		return true;
	return false;
}

/*
4588 4589
 * Must be called with memcg_create_mutex held, unless the cgroup is guaranteed
 * to be already dead (as in mem_cgroup_force_empty, for instance).  This is
4590 4591 4592 4593 4594 4595 4596 4597 4598
 * from mem_cgroup_count_children(), in the sense that we don't really care how
 * many children we have; we only need to know if we have any.  It also counts
 * any memcg without hierarchy as infertile.
 */
static inline bool memcg_has_children(struct mem_cgroup *memcg)
{
	return memcg->use_hierarchy && __memcg_has_children(memcg);
}

4599 4600 4601 4602 4603 4604 4605 4606 4607 4608
/*
 * Reclaims as many pages from the given memcg as possible and moves
 * the rest to the parent.
 *
 * Caller is responsible for holding css reference for memcg.
 */
static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
{
	int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
	struct cgroup *cgrp = memcg->css.cgroup;
4609

4610
	/* returns EBUSY if there is a task or if we come here twice. */
4611 4612 4613
	if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children))
		return -EBUSY;

4614 4615
	/* we call try-to-free pages for make this cgroup empty */
	lru_add_drain_all();
4616
	/* try to free all pages in this cgroup */
4617
	while (nr_retries && res_counter_read_u64(&memcg->res, RES_USAGE) > 0) {
4618
		int progress;
4619

4620 4621 4622
		if (signal_pending(current))
			return -EINTR;

4623
		progress = try_to_free_mem_cgroup_pages(memcg, GFP_KERNEL,
4624
						false);
4625
		if (!progress) {
4626
			nr_retries--;
4627
			/* maybe some writeback is necessary */
4628
			congestion_wait(BLK_RW_ASYNC, HZ/10);
4629
		}
4630 4631

	}
K
KAMEZAWA Hiroyuki 已提交
4632
	lru_add_drain();
4633 4634 4635
	mem_cgroup_reparent_charges(memcg);

	return 0;
4636 4637
}

4638 4639
static int mem_cgroup_force_empty_write(struct cgroup_subsys_state *css,
					unsigned int event)
4640
{
4641
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4642

4643 4644
	if (mem_cgroup_is_root(memcg))
		return -EINVAL;
4645
	return mem_cgroup_force_empty(memcg);
4646 4647
}

4648 4649
static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
				     struct cftype *cft)
4650
{
4651
	return mem_cgroup_from_css(css)->use_hierarchy;
4652 4653
}

4654 4655
static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
				      struct cftype *cft, u64 val)
4656 4657
{
	int retval = 0;
4658
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
T
Tejun Heo 已提交
4659
	struct mem_cgroup *parent_memcg = mem_cgroup_from_css(css_parent(&memcg->css));
4660

4661
	mutex_lock(&memcg_create_mutex);
4662 4663 4664 4665

	if (memcg->use_hierarchy == val)
		goto out;

4666
	/*
4667
	 * If parent's use_hierarchy is set, we can't make any modifications
4668 4669 4670 4671 4672 4673
	 * in the child subtrees. If it is unset, then the change can
	 * occur, provided the current cgroup has no children.
	 *
	 * For the root cgroup, parent_mem is NULL, we allow value to be
	 * set if there are no children.
	 */
4674
	if ((!parent_memcg || !parent_memcg->use_hierarchy) &&
4675
				(val == 1 || val == 0)) {
4676
		if (!__memcg_has_children(memcg))
4677
			memcg->use_hierarchy = val;
4678 4679 4680 4681
		else
			retval = -EBUSY;
	} else
		retval = -EINVAL;
4682 4683

out:
4684
	mutex_unlock(&memcg_create_mutex);
4685 4686 4687 4688

	return retval;
}

4689

4690
static unsigned long mem_cgroup_recursive_stat(struct mem_cgroup *memcg,
4691
					       enum mem_cgroup_stat_index idx)
4692
{
K
KAMEZAWA Hiroyuki 已提交
4693
	struct mem_cgroup *iter;
4694
	long val = 0;
4695

4696
	/* Per-cpu values can be negative, use a signed accumulator */
4697
	for_each_mem_cgroup_tree(iter, memcg)
K
KAMEZAWA Hiroyuki 已提交
4698 4699 4700 4701 4702
		val += mem_cgroup_read_stat(iter, idx);

	if (val < 0) /* race ? */
		val = 0;
	return val;
4703 4704
}

4705
static inline u64 mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
4706
{
K
KAMEZAWA Hiroyuki 已提交
4707
	u64 val;
4708

4709
	if (!mem_cgroup_is_root(memcg)) {
4710
		if (!swap)
4711
			return res_counter_read_u64(&memcg->res, RES_USAGE);
4712
		else
4713
			return res_counter_read_u64(&memcg->memsw, RES_USAGE);
4714 4715
	}

4716 4717 4718 4719
	/*
	 * Transparent hugepages are still accounted for in MEM_CGROUP_STAT_RSS
	 * as well as in MEM_CGROUP_STAT_RSS_HUGE.
	 */
4720 4721
	val = mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_CACHE);
	val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_RSS);
4722

K
KAMEZAWA Hiroyuki 已提交
4723
	if (swap)
4724
		val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_SWAP);
4725 4726 4727 4728

	return val << PAGE_SHIFT;
}

4729 4730 4731
static ssize_t mem_cgroup_read(struct cgroup_subsys_state *css,
			       struct cftype *cft, struct file *file,
			       char __user *buf, size_t nbytes, loff_t *ppos)
B
Balbir Singh 已提交
4732
{
4733
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4734
	char str[64];
4735
	u64 val;
G
Glauber Costa 已提交
4736 4737
	int name, len;
	enum res_type type;
4738 4739 4740

	type = MEMFILE_TYPE(cft->private);
	name = MEMFILE_ATTR(cft->private);
4741

4742 4743
	switch (type) {
	case _MEM:
4744
		if (name == RES_USAGE)
4745
			val = mem_cgroup_usage(memcg, false);
4746
		else
4747
			val = res_counter_read_u64(&memcg->res, name);
4748 4749
		break;
	case _MEMSWAP:
4750
		if (name == RES_USAGE)
4751
			val = mem_cgroup_usage(memcg, true);
4752
		else
4753
			val = res_counter_read_u64(&memcg->memsw, name);
4754
		break;
4755 4756 4757
	case _KMEM:
		val = res_counter_read_u64(&memcg->kmem, name);
		break;
4758 4759 4760
	default:
		BUG();
	}
4761 4762 4763

	len = scnprintf(str, sizeof(str), "%llu\n", (unsigned long long)val);
	return simple_read_from_buffer(buf, nbytes, ppos, str, len);
B
Balbir Singh 已提交
4764
}
4765

4766
static int memcg_update_kmem_limit(struct cgroup_subsys_state *css, u64 val)
4767 4768 4769
{
	int ret = -EINVAL;
#ifdef CONFIG_MEMCG_KMEM
4770
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782
	/*
	 * For simplicity, we won't allow this to be disabled.  It also can't
	 * be changed if the cgroup has children already, or if tasks had
	 * already joined.
	 *
	 * If tasks join before we set the limit, a person looking at
	 * kmem.usage_in_bytes will have no way to determine when it took
	 * place, which makes the value quite meaningless.
	 *
	 * After it first became limited, changes in the value of the limit are
	 * of course permitted.
	 */
4783
	mutex_lock(&memcg_create_mutex);
4784 4785
	mutex_lock(&set_limit_mutex);
	if (!memcg->kmem_account_flags && val != RESOURCE_MAX) {
4786
		if (cgroup_task_count(css->cgroup) || memcg_has_children(memcg)) {
4787 4788 4789 4790 4791 4792
			ret = -EBUSY;
			goto out;
		}
		ret = res_counter_set_limit(&memcg->kmem, val);
		VM_BUG_ON(ret);

4793 4794 4795 4796 4797
		ret = memcg_update_cache_sizes(memcg);
		if (ret) {
			res_counter_set_limit(&memcg->kmem, RESOURCE_MAX);
			goto out;
		}
4798 4799 4800 4801 4802 4803
		static_key_slow_inc(&memcg_kmem_enabled_key);
		/*
		 * setting the active bit after the inc will guarantee no one
		 * starts accounting before all call sites are patched
		 */
		memcg_kmem_set_active(memcg);
4804 4805 4806 4807
	} else
		ret = res_counter_set_limit(&memcg->kmem, val);
out:
	mutex_unlock(&set_limit_mutex);
4808
	mutex_unlock(&memcg_create_mutex);
4809 4810 4811 4812
#endif
	return ret;
}

4813
#ifdef CONFIG_MEMCG_KMEM
4814
static int memcg_propagate_kmem(struct mem_cgroup *memcg)
4815
{
4816
	int ret = 0;
4817 4818
	struct mem_cgroup *parent = parent_mem_cgroup(memcg);
	if (!parent)
4819 4820
		goto out;

4821
	memcg->kmem_account_flags = parent->kmem_account_flags;
4822 4823 4824 4825 4826 4827 4828 4829 4830 4831
	/*
	 * When that happen, we need to disable the static branch only on those
	 * memcgs that enabled it. To achieve this, we would be forced to
	 * complicate the code by keeping track of which memcgs were the ones
	 * that actually enabled limits, and which ones got it from its
	 * parents.
	 *
	 * It is a lot simpler just to do static_key_slow_inc() on every child
	 * that is accounted.
	 */
4832 4833 4834 4835
	if (!memcg_kmem_is_active(memcg))
		goto out;

	/*
4836 4837 4838
	 * __mem_cgroup_free() will issue static_key_slow_dec() because this
	 * memcg is active already. If the later initialization fails then the
	 * cgroup core triggers the cleanup so we do not have to do it here.
4839 4840 4841 4842
	 */
	static_key_slow_inc(&memcg_kmem_enabled_key);

	mutex_lock(&set_limit_mutex);
4843
	memcg_stop_kmem_account();
4844
	ret = memcg_update_cache_sizes(memcg);
4845
	memcg_resume_kmem_account();
4846 4847 4848
	mutex_unlock(&set_limit_mutex);
out:
	return ret;
4849
}
4850
#endif /* CONFIG_MEMCG_KMEM */
4851

4852 4853 4854 4855
/*
 * The user of this function is...
 * RES_LIMIT.
 */
4856
static int mem_cgroup_write(struct cgroup_subsys_state *css, struct cftype *cft,
4857
			    const char *buffer)
B
Balbir Singh 已提交
4858
{
4859
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
G
Glauber Costa 已提交
4860 4861
	enum res_type type;
	int name;
4862 4863 4864
	unsigned long long val;
	int ret;

4865 4866
	type = MEMFILE_TYPE(cft->private);
	name = MEMFILE_ATTR(cft->private);
4867

4868
	switch (name) {
4869
	case RES_LIMIT:
4870 4871 4872 4873
		if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
			ret = -EINVAL;
			break;
		}
4874 4875
		/* This function does all necessary parse...reuse it */
		ret = res_counter_memparse_write_strategy(buffer, &val);
4876 4877 4878
		if (ret)
			break;
		if (type == _MEM)
4879
			ret = mem_cgroup_resize_limit(memcg, val);
4880
		else if (type == _MEMSWAP)
4881
			ret = mem_cgroup_resize_memsw_limit(memcg, val);
4882
		else if (type == _KMEM)
4883
			ret = memcg_update_kmem_limit(css, val);
4884 4885
		else
			return -EINVAL;
4886
		break;
4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900
	case RES_SOFT_LIMIT:
		ret = res_counter_memparse_write_strategy(buffer, &val);
		if (ret)
			break;
		/*
		 * For memsw, soft limits are hard to implement in terms
		 * of semantics, for now, we support soft limits for
		 * control without swap
		 */
		if (type == _MEM)
			ret = res_counter_set_soft_limit(&memcg->res, val);
		else
			ret = -EINVAL;
		break;
4901 4902 4903 4904 4905
	default:
		ret = -EINVAL; /* should be BUG() ? */
		break;
	}
	return ret;
B
Balbir Singh 已提交
4906 4907
}

4908 4909 4910 4911 4912 4913 4914 4915 4916 4917
static void memcg_get_hierarchical_limit(struct mem_cgroup *memcg,
		unsigned long long *mem_limit, unsigned long long *memsw_limit)
{
	unsigned long long min_limit, min_memsw_limit, tmp;

	min_limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
	min_memsw_limit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
	if (!memcg->use_hierarchy)
		goto out;

T
Tejun Heo 已提交
4918 4919
	while (css_parent(&memcg->css)) {
		memcg = mem_cgroup_from_css(css_parent(&memcg->css));
4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931
		if (!memcg->use_hierarchy)
			break;
		tmp = res_counter_read_u64(&memcg->res, RES_LIMIT);
		min_limit = min(min_limit, tmp);
		tmp = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
		min_memsw_limit = min(min_memsw_limit, tmp);
	}
out:
	*mem_limit = min_limit;
	*memsw_limit = min_memsw_limit;
}

4932
static int mem_cgroup_reset(struct cgroup_subsys_state *css, unsigned int event)
4933
{
4934
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
G
Glauber Costa 已提交
4935 4936
	int name;
	enum res_type type;
4937

4938 4939
	type = MEMFILE_TYPE(event);
	name = MEMFILE_ATTR(event);
4940

4941
	switch (name) {
4942
	case RES_MAX_USAGE:
4943
		if (type == _MEM)
4944
			res_counter_reset_max(&memcg->res);
4945
		else if (type == _MEMSWAP)
4946
			res_counter_reset_max(&memcg->memsw);
4947 4948 4949 4950
		else if (type == _KMEM)
			res_counter_reset_max(&memcg->kmem);
		else
			return -EINVAL;
4951 4952
		break;
	case RES_FAILCNT:
4953
		if (type == _MEM)
4954
			res_counter_reset_failcnt(&memcg->res);
4955
		else if (type == _MEMSWAP)
4956
			res_counter_reset_failcnt(&memcg->memsw);
4957 4958 4959 4960
		else if (type == _KMEM)
			res_counter_reset_failcnt(&memcg->kmem);
		else
			return -EINVAL;
4961 4962
		break;
	}
4963

4964
	return 0;
4965 4966
}

4967
static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
4968 4969
					struct cftype *cft)
{
4970
	return mem_cgroup_from_css(css)->move_charge_at_immigrate;
4971 4972
}

4973
#ifdef CONFIG_MMU
4974
static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
4975 4976
					struct cftype *cft, u64 val)
{
4977
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4978 4979 4980

	if (val >= (1 << NR_MOVE_TYPE))
		return -EINVAL;
4981

4982
	/*
4983 4984 4985 4986
	 * No kind of locking is needed in here, because ->can_attach() will
	 * check this value once in the beginning of the process, and then carry
	 * on with stale data. This means that changes to this value will only
	 * affect task migrations starting after the change.
4987
	 */
4988
	memcg->move_charge_at_immigrate = val;
4989 4990
	return 0;
}
4991
#else
4992
static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
4993 4994 4995 4996 4997
					struct cftype *cft, u64 val)
{
	return -ENOSYS;
}
#endif
4998

4999
#ifdef CONFIG_NUMA
5000 5001
static int memcg_numa_stat_show(struct cgroup_subsys_state *css,
				struct cftype *cft, struct seq_file *m)
5002 5003 5004 5005
{
	int nid;
	unsigned long total_nr, file_nr, anon_nr, unevictable_nr;
	unsigned long node_nr;
5006
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5007

5008
	total_nr = mem_cgroup_nr_lru_pages(memcg, LRU_ALL);
5009
	seq_printf(m, "total=%lu", total_nr);
5010
	for_each_node_state(nid, N_MEMORY) {
5011
		node_nr = mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL);
5012 5013 5014 5015
		seq_printf(m, " N%d=%lu", nid, node_nr);
	}
	seq_putc(m, '\n');

5016
	file_nr = mem_cgroup_nr_lru_pages(memcg, LRU_ALL_FILE);
5017
	seq_printf(m, "file=%lu", file_nr);
5018
	for_each_node_state(nid, N_MEMORY) {
5019
		node_nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
5020
				LRU_ALL_FILE);
5021 5022 5023 5024
		seq_printf(m, " N%d=%lu", nid, node_nr);
	}
	seq_putc(m, '\n');

5025
	anon_nr = mem_cgroup_nr_lru_pages(memcg, LRU_ALL_ANON);
5026
	seq_printf(m, "anon=%lu", anon_nr);
5027
	for_each_node_state(nid, N_MEMORY) {
5028
		node_nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
5029
				LRU_ALL_ANON);
5030 5031 5032 5033
		seq_printf(m, " N%d=%lu", nid, node_nr);
	}
	seq_putc(m, '\n');

5034
	unevictable_nr = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_UNEVICTABLE));
5035
	seq_printf(m, "unevictable=%lu", unevictable_nr);
5036
	for_each_node_state(nid, N_MEMORY) {
5037
		node_nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
5038
				BIT(LRU_UNEVICTABLE));
5039 5040 5041 5042 5043 5044 5045
		seq_printf(m, " N%d=%lu", nid, node_nr);
	}
	seq_putc(m, '\n');
	return 0;
}
#endif /* CONFIG_NUMA */

5046 5047 5048 5049 5050
static inline void mem_cgroup_lru_names_not_uptodate(void)
{
	BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS);
}

5051
static int memcg_stat_show(struct cgroup_subsys_state *css, struct cftype *cft,
5052
				 struct seq_file *m)
5053
{
5054
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5055 5056
	struct mem_cgroup *mi;
	unsigned int i;
5057

5058
	for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
5059
		if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
5060
			continue;
5061 5062
		seq_printf(m, "%s %ld\n", mem_cgroup_stat_names[i],
			   mem_cgroup_read_stat(memcg, i) * PAGE_SIZE);
5063
	}
L
Lee Schermerhorn 已提交
5064

5065 5066 5067 5068 5069 5070 5071 5072
	for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++)
		seq_printf(m, "%s %lu\n", mem_cgroup_events_names[i],
			   mem_cgroup_read_events(memcg, i));

	for (i = 0; i < NR_LRU_LISTS; i++)
		seq_printf(m, "%s %lu\n", mem_cgroup_lru_names[i],
			   mem_cgroup_nr_lru_pages(memcg, BIT(i)) * PAGE_SIZE);

K
KAMEZAWA Hiroyuki 已提交
5073
	/* Hierarchical information */
5074 5075
	{
		unsigned long long limit, memsw_limit;
5076
		memcg_get_hierarchical_limit(memcg, &limit, &memsw_limit);
5077
		seq_printf(m, "hierarchical_memory_limit %llu\n", limit);
5078
		if (do_swap_account)
5079 5080
			seq_printf(m, "hierarchical_memsw_limit %llu\n",
				   memsw_limit);
5081
	}
K
KOSAKI Motohiro 已提交
5082

5083 5084 5085
	for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
		long long val = 0;

5086
		if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
5087
			continue;
5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107
		for_each_mem_cgroup_tree(mi, memcg)
			val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE;
		seq_printf(m, "total_%s %lld\n", mem_cgroup_stat_names[i], val);
	}

	for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
		unsigned long long val = 0;

		for_each_mem_cgroup_tree(mi, memcg)
			val += mem_cgroup_read_events(mi, i);
		seq_printf(m, "total_%s %llu\n",
			   mem_cgroup_events_names[i], val);
	}

	for (i = 0; i < NR_LRU_LISTS; i++) {
		unsigned long long val = 0;

		for_each_mem_cgroup_tree(mi, memcg)
			val += mem_cgroup_nr_lru_pages(mi, BIT(i)) * PAGE_SIZE;
		seq_printf(m, "total_%s %llu\n", mem_cgroup_lru_names[i], val);
5108
	}
K
KAMEZAWA Hiroyuki 已提交
5109

K
KOSAKI Motohiro 已提交
5110 5111 5112 5113
#ifdef CONFIG_DEBUG_VM
	{
		int nid, zid;
		struct mem_cgroup_per_zone *mz;
5114
		struct zone_reclaim_stat *rstat;
K
KOSAKI Motohiro 已提交
5115 5116 5117 5118 5119
		unsigned long recent_rotated[2] = {0, 0};
		unsigned long recent_scanned[2] = {0, 0};

		for_each_online_node(nid)
			for (zid = 0; zid < MAX_NR_ZONES; zid++) {
5120
				mz = mem_cgroup_zoneinfo(memcg, nid, zid);
5121
				rstat = &mz->lruvec.reclaim_stat;
K
KOSAKI Motohiro 已提交
5122

5123 5124 5125 5126
				recent_rotated[0] += rstat->recent_rotated[0];
				recent_rotated[1] += rstat->recent_rotated[1];
				recent_scanned[0] += rstat->recent_scanned[0];
				recent_scanned[1] += rstat->recent_scanned[1];
K
KOSAKI Motohiro 已提交
5127
			}
5128 5129 5130 5131
		seq_printf(m, "recent_rotated_anon %lu\n", recent_rotated[0]);
		seq_printf(m, "recent_rotated_file %lu\n", recent_rotated[1]);
		seq_printf(m, "recent_scanned_anon %lu\n", recent_scanned[0]);
		seq_printf(m, "recent_scanned_file %lu\n", recent_scanned[1]);
K
KOSAKI Motohiro 已提交
5132 5133 5134
	}
#endif

5135 5136 5137
	return 0;
}

5138 5139
static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
				      struct cftype *cft)
K
KOSAKI Motohiro 已提交
5140
{
5141
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
K
KOSAKI Motohiro 已提交
5142

5143
	return mem_cgroup_swappiness(memcg);
K
KOSAKI Motohiro 已提交
5144 5145
}

5146 5147
static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
				       struct cftype *cft, u64 val)
K
KOSAKI Motohiro 已提交
5148
{
5149
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
T
Tejun Heo 已提交
5150
	struct mem_cgroup *parent = mem_cgroup_from_css(css_parent(&memcg->css));
K
KOSAKI Motohiro 已提交
5151

T
Tejun Heo 已提交
5152
	if (val > 100 || !parent)
K
KOSAKI Motohiro 已提交
5153 5154
		return -EINVAL;

5155
	mutex_lock(&memcg_create_mutex);
5156

K
KOSAKI Motohiro 已提交
5157
	/* If under hierarchy, only empty-root can set this value */
5158
	if ((parent->use_hierarchy) || memcg_has_children(memcg)) {
5159
		mutex_unlock(&memcg_create_mutex);
K
KOSAKI Motohiro 已提交
5160
		return -EINVAL;
5161
	}
K
KOSAKI Motohiro 已提交
5162 5163 5164

	memcg->swappiness = val;

5165
	mutex_unlock(&memcg_create_mutex);
5166

K
KOSAKI Motohiro 已提交
5167 5168 5169
	return 0;
}

5170 5171 5172 5173 5174 5175 5176 5177
static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
{
	struct mem_cgroup_threshold_ary *t;
	u64 usage;
	int i;

	rcu_read_lock();
	if (!swap)
5178
		t = rcu_dereference(memcg->thresholds.primary);
5179
	else
5180
		t = rcu_dereference(memcg->memsw_thresholds.primary);
5181 5182 5183 5184 5185 5186 5187

	if (!t)
		goto unlock;

	usage = mem_cgroup_usage(memcg, swap);

	/*
5188
	 * current_threshold points to threshold just below or equal to usage.
5189 5190 5191
	 * If it's not true, a threshold was crossed after last
	 * call of __mem_cgroup_threshold().
	 */
5192
	i = t->current_threshold;
5193 5194 5195 5196 5197 5198 5199 5200 5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214 5215

	/*
	 * Iterate backward over array of thresholds starting from
	 * current_threshold and check if a threshold is crossed.
	 * If none of thresholds below usage is crossed, we read
	 * only one element of the array here.
	 */
	for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
		eventfd_signal(t->entries[i].eventfd, 1);

	/* i = current_threshold + 1 */
	i++;

	/*
	 * Iterate forward over array of thresholds starting from
	 * current_threshold+1 and check if a threshold is crossed.
	 * If none of thresholds above usage is crossed, we read
	 * only one element of the array here.
	 */
	for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
		eventfd_signal(t->entries[i].eventfd, 1);

	/* Update current_threshold */
5216
	t->current_threshold = i - 1;
5217 5218 5219 5220 5221 5222
unlock:
	rcu_read_unlock();
}

static void mem_cgroup_threshold(struct mem_cgroup *memcg)
{
5223 5224 5225 5226 5227 5228 5229
	while (memcg) {
		__mem_cgroup_threshold(memcg, false);
		if (do_swap_account)
			__mem_cgroup_threshold(memcg, true);

		memcg = parent_mem_cgroup(memcg);
	}
5230 5231 5232 5233 5234 5235 5236
}

static int compare_thresholds(const void *a, const void *b)
{
	const struct mem_cgroup_threshold *_a = a;
	const struct mem_cgroup_threshold *_b = b;

5237 5238 5239 5240 5241 5242 5243
	if (_a->threshold > _b->threshold)
		return 1;

	if (_a->threshold < _b->threshold)
		return -1;

	return 0;
5244 5245
}

5246
static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
K
KAMEZAWA Hiroyuki 已提交
5247 5248 5249
{
	struct mem_cgroup_eventfd_list *ev;

5250
	list_for_each_entry(ev, &memcg->oom_notify, list)
K
KAMEZAWA Hiroyuki 已提交
5251 5252 5253 5254
		eventfd_signal(ev->eventfd, 1);
	return 0;
}

5255
static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
K
KAMEZAWA Hiroyuki 已提交
5256
{
K
KAMEZAWA Hiroyuki 已提交
5257 5258
	struct mem_cgroup *iter;

5259
	for_each_mem_cgroup_tree(iter, memcg)
K
KAMEZAWA Hiroyuki 已提交
5260
		mem_cgroup_oom_notify_cb(iter);
K
KAMEZAWA Hiroyuki 已提交
5261 5262
}

5263
static int mem_cgroup_usage_register_event(struct cgroup_subsys_state *css,
K
KAMEZAWA Hiroyuki 已提交
5264
	struct cftype *cft, struct eventfd_ctx *eventfd, const char *args)
5265
{
5266
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5267 5268
	struct mem_cgroup_thresholds *thresholds;
	struct mem_cgroup_threshold_ary *new;
G
Glauber Costa 已提交
5269
	enum res_type type = MEMFILE_TYPE(cft->private);
5270
	u64 threshold, usage;
5271
	int i, size, ret;
5272 5273 5274 5275 5276 5277

	ret = res_counter_memparse_write_strategy(args, &threshold);
	if (ret)
		return ret;

	mutex_lock(&memcg->thresholds_lock);
5278

5279
	if (type == _MEM)
5280
		thresholds = &memcg->thresholds;
5281
	else if (type == _MEMSWAP)
5282
		thresholds = &memcg->memsw_thresholds;
5283 5284 5285 5286 5287 5288
	else
		BUG();

	usage = mem_cgroup_usage(memcg, type == _MEMSWAP);

	/* Check if a threshold crossed before adding a new one */
5289
	if (thresholds->primary)
5290 5291
		__mem_cgroup_threshold(memcg, type == _MEMSWAP);

5292
	size = thresholds->primary ? thresholds->primary->size + 1 : 1;
5293 5294

	/* Allocate memory for new array of thresholds */
5295
	new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold),
5296
			GFP_KERNEL);
5297
	if (!new) {
5298 5299 5300
		ret = -ENOMEM;
		goto unlock;
	}
5301
	new->size = size;
5302 5303

	/* Copy thresholds (if any) to new array */
5304 5305
	if (thresholds->primary) {
		memcpy(new->entries, thresholds->primary->entries, (size - 1) *
5306
				sizeof(struct mem_cgroup_threshold));
5307 5308
	}

5309
	/* Add new threshold */
5310 5311
	new->entries[size - 1].eventfd = eventfd;
	new->entries[size - 1].threshold = threshold;
5312 5313

	/* Sort thresholds. Registering of new threshold isn't time-critical */
5314
	sort(new->entries, size, sizeof(struct mem_cgroup_threshold),
5315 5316 5317
			compare_thresholds, NULL);

	/* Find current threshold */
5318
	new->current_threshold = -1;
5319
	for (i = 0; i < size; i++) {
5320
		if (new->entries[i].threshold <= usage) {
5321
			/*
5322 5323
			 * new->current_threshold will not be used until
			 * rcu_assign_pointer(), so it's safe to increment
5324 5325
			 * it here.
			 */
5326
			++new->current_threshold;
5327 5328
		} else
			break;
5329 5330
	}

5331 5332 5333 5334 5335
	/* Free old spare buffer and save old primary buffer as spare */
	kfree(thresholds->spare);
	thresholds->spare = thresholds->primary;

	rcu_assign_pointer(thresholds->primary, new);
5336

5337
	/* To be sure that nobody uses thresholds */
5338 5339 5340 5341 5342 5343 5344 5345
	synchronize_rcu();

unlock:
	mutex_unlock(&memcg->thresholds_lock);

	return ret;
}

5346
static void mem_cgroup_usage_unregister_event(struct cgroup_subsys_state *css,
K
KAMEZAWA Hiroyuki 已提交
5347
	struct cftype *cft, struct eventfd_ctx *eventfd)
5348
{
5349
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5350 5351
	struct mem_cgroup_thresholds *thresholds;
	struct mem_cgroup_threshold_ary *new;
G
Glauber Costa 已提交
5352
	enum res_type type = MEMFILE_TYPE(cft->private);
5353
	u64 usage;
5354
	int i, j, size;
5355 5356 5357

	mutex_lock(&memcg->thresholds_lock);
	if (type == _MEM)
5358
		thresholds = &memcg->thresholds;
5359
	else if (type == _MEMSWAP)
5360
		thresholds = &memcg->memsw_thresholds;
5361 5362 5363
	else
		BUG();

5364 5365 5366
	if (!thresholds->primary)
		goto unlock;

5367 5368 5369 5370 5371 5372
	usage = mem_cgroup_usage(memcg, type == _MEMSWAP);

	/* Check if a threshold crossed before removing */
	__mem_cgroup_threshold(memcg, type == _MEMSWAP);

	/* Calculate new number of threshold */
5373 5374 5375
	size = 0;
	for (i = 0; i < thresholds->primary->size; i++) {
		if (thresholds->primary->entries[i].eventfd != eventfd)
5376 5377 5378
			size++;
	}

5379
	new = thresholds->spare;
5380

5381 5382
	/* Set thresholds array to NULL if we don't have thresholds */
	if (!size) {
5383 5384
		kfree(new);
		new = NULL;
5385
		goto swap_buffers;
5386 5387
	}

5388
	new->size = size;
5389 5390

	/* Copy thresholds and find current threshold */
5391 5392 5393
	new->current_threshold = -1;
	for (i = 0, j = 0; i < thresholds->primary->size; i++) {
		if (thresholds->primary->entries[i].eventfd == eventfd)
5394 5395
			continue;

5396
		new->entries[j] = thresholds->primary->entries[i];
5397
		if (new->entries[j].threshold <= usage) {
5398
			/*
5399
			 * new->current_threshold will not be used
5400 5401 5402
			 * until rcu_assign_pointer(), so it's safe to increment
			 * it here.
			 */
5403
			++new->current_threshold;
5404 5405 5406 5407
		}
		j++;
	}

5408
swap_buffers:
5409 5410
	/* Swap primary and spare array */
	thresholds->spare = thresholds->primary;
5411 5412 5413 5414 5415 5416
	/* If all events are unregistered, free the spare array */
	if (!new) {
		kfree(thresholds->spare);
		thresholds->spare = NULL;
	}

5417
	rcu_assign_pointer(thresholds->primary, new);
5418

5419
	/* To be sure that nobody uses thresholds */
5420
	synchronize_rcu();
5421
unlock:
5422 5423
	mutex_unlock(&memcg->thresholds_lock);
}
5424

5425
static int mem_cgroup_oom_register_event(struct cgroup_subsys_state *css,
K
KAMEZAWA Hiroyuki 已提交
5426 5427
	struct cftype *cft, struct eventfd_ctx *eventfd, const char *args)
{
5428
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
K
KAMEZAWA Hiroyuki 已提交
5429
	struct mem_cgroup_eventfd_list *event;
G
Glauber Costa 已提交
5430
	enum res_type type = MEMFILE_TYPE(cft->private);
K
KAMEZAWA Hiroyuki 已提交
5431 5432 5433 5434 5435 5436

	BUG_ON(type != _OOM_TYPE);
	event = kmalloc(sizeof(*event),	GFP_KERNEL);
	if (!event)
		return -ENOMEM;

5437
	spin_lock(&memcg_oom_lock);
K
KAMEZAWA Hiroyuki 已提交
5438 5439 5440 5441 5442

	event->eventfd = eventfd;
	list_add(&event->list, &memcg->oom_notify);

	/* already in OOM ? */
5443
	if (atomic_read(&memcg->under_oom))
K
KAMEZAWA Hiroyuki 已提交
5444
		eventfd_signal(eventfd, 1);
5445
	spin_unlock(&memcg_oom_lock);
K
KAMEZAWA Hiroyuki 已提交
5446 5447 5448 5449

	return 0;
}

5450
static void mem_cgroup_oom_unregister_event(struct cgroup_subsys_state *css,
K
KAMEZAWA Hiroyuki 已提交
5451 5452
	struct cftype *cft, struct eventfd_ctx *eventfd)
{
5453
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
K
KAMEZAWA Hiroyuki 已提交
5454
	struct mem_cgroup_eventfd_list *ev, *tmp;
G
Glauber Costa 已提交
5455
	enum res_type type = MEMFILE_TYPE(cft->private);
K
KAMEZAWA Hiroyuki 已提交
5456 5457 5458

	BUG_ON(type != _OOM_TYPE);

5459
	spin_lock(&memcg_oom_lock);
K
KAMEZAWA Hiroyuki 已提交
5460

5461
	list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
K
KAMEZAWA Hiroyuki 已提交
5462 5463 5464 5465 5466 5467
		if (ev->eventfd == eventfd) {
			list_del(&ev->list);
			kfree(ev);
		}
	}

5468
	spin_unlock(&memcg_oom_lock);
K
KAMEZAWA Hiroyuki 已提交
5469 5470
}

5471
static int mem_cgroup_oom_control_read(struct cgroup_subsys_state *css,
5472 5473
	struct cftype *cft,  struct cgroup_map_cb *cb)
{
5474
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5475

5476
	cb->fill(cb, "oom_kill_disable", memcg->oom_kill_disable);
5477

5478
	if (atomic_read(&memcg->under_oom))
5479 5480 5481 5482 5483 5484
		cb->fill(cb, "under_oom", 1);
	else
		cb->fill(cb, "under_oom", 0);
	return 0;
}

5485
static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
5486 5487
	struct cftype *cft, u64 val)
{
5488
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
T
Tejun Heo 已提交
5489
	struct mem_cgroup *parent = mem_cgroup_from_css(css_parent(&memcg->css));
5490 5491

	/* cannot set to root cgroup and only 0 and 1 are allowed */
T
Tejun Heo 已提交
5492
	if (!parent || !((val == 0) || (val == 1)))
5493 5494
		return -EINVAL;

5495
	mutex_lock(&memcg_create_mutex);
5496
	/* oom-kill-disable is a flag for subhierarchy. */
5497
	if ((parent->use_hierarchy) || memcg_has_children(memcg)) {
5498
		mutex_unlock(&memcg_create_mutex);
5499 5500
		return -EINVAL;
	}
5501
	memcg->oom_kill_disable = val;
5502
	if (!val)
5503
		memcg_oom_recover(memcg);
5504
	mutex_unlock(&memcg_create_mutex);
5505 5506 5507
	return 0;
}

A
Andrew Morton 已提交
5508
#ifdef CONFIG_MEMCG_KMEM
5509
static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
5510
{
5511 5512
	int ret;

5513
	memcg->kmemcg_id = -1;
5514 5515 5516
	ret = memcg_propagate_kmem(memcg);
	if (ret)
		return ret;
5517

5518
	return mem_cgroup_sockets_init(memcg, ss);
5519
}
5520

5521
static void memcg_destroy_kmem(struct mem_cgroup *memcg)
G
Glauber Costa 已提交
5522
{
5523
	mem_cgroup_sockets_destroy(memcg);
5524 5525 5526 5527 5528 5529 5530 5531 5532 5533 5534 5535 5536 5537 5538 5539 5540 5541 5542 5543 5544 5545 5546 5547 5548 5549
}

static void kmem_cgroup_css_offline(struct mem_cgroup *memcg)
{
	if (!memcg_kmem_is_active(memcg))
		return;

	/*
	 * kmem charges can outlive the cgroup. In the case of slab
	 * pages, for instance, a page contain objects from various
	 * processes. As we prevent from taking a reference for every
	 * such allocation we have to be careful when doing uncharge
	 * (see memcg_uncharge_kmem) and here during offlining.
	 *
	 * The idea is that that only the _last_ uncharge which sees
	 * the dead memcg will drop the last reference. An additional
	 * reference is taken here before the group is marked dead
	 * which is then paired with css_put during uncharge resp. here.
	 *
	 * Although this might sound strange as this path is called from
	 * css_offline() when the referencemight have dropped down to 0
	 * and shouldn't be incremented anymore (css_tryget would fail)
	 * we do not have other options because of the kmem allocations
	 * lifetime.
	 */
	css_get(&memcg->css);
5550 5551 5552 5553 5554 5555 5556

	memcg_kmem_mark_dead(memcg);

	if (res_counter_read_u64(&memcg->kmem, RES_USAGE) != 0)
		return;

	if (memcg_kmem_test_and_clear_dead(memcg))
5557
		css_put(&memcg->css);
G
Glauber Costa 已提交
5558
}
5559
#else
5560
static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
5561 5562 5563
{
	return 0;
}
G
Glauber Costa 已提交
5564

5565 5566 5567 5568 5569
static void memcg_destroy_kmem(struct mem_cgroup *memcg)
{
}

static void kmem_cgroup_css_offline(struct mem_cgroup *memcg)
G
Glauber Costa 已提交
5570 5571
{
}
5572 5573
#endif

B
Balbir Singh 已提交
5574 5575
static struct cftype mem_cgroup_files[] = {
	{
5576
		.name = "usage_in_bytes",
5577
		.private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
5578
		.read = mem_cgroup_read,
K
KAMEZAWA Hiroyuki 已提交
5579 5580
		.register_event = mem_cgroup_usage_register_event,
		.unregister_event = mem_cgroup_usage_unregister_event,
B
Balbir Singh 已提交
5581
	},
5582 5583
	{
		.name = "max_usage_in_bytes",
5584
		.private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
5585
		.trigger = mem_cgroup_reset,
5586
		.read = mem_cgroup_read,
5587
	},
B
Balbir Singh 已提交
5588
	{
5589
		.name = "limit_in_bytes",
5590
		.private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
5591
		.write_string = mem_cgroup_write,
5592
		.read = mem_cgroup_read,
B
Balbir Singh 已提交
5593
	},
5594 5595 5596 5597
	{
		.name = "soft_limit_in_bytes",
		.private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
		.write_string = mem_cgroup_write,
5598
		.read = mem_cgroup_read,
5599
	},
B
Balbir Singh 已提交
5600 5601
	{
		.name = "failcnt",
5602
		.private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
5603
		.trigger = mem_cgroup_reset,
5604
		.read = mem_cgroup_read,
B
Balbir Singh 已提交
5605
	},
5606 5607
	{
		.name = "stat",
5608
		.read_seq_string = memcg_stat_show,
5609
	},
5610 5611 5612 5613
	{
		.name = "force_empty",
		.trigger = mem_cgroup_force_empty_write,
	},
5614 5615
	{
		.name = "use_hierarchy",
5616
		.flags = CFTYPE_INSANE,
5617 5618 5619
		.write_u64 = mem_cgroup_hierarchy_write,
		.read_u64 = mem_cgroup_hierarchy_read,
	},
K
KOSAKI Motohiro 已提交
5620 5621 5622 5623 5624
	{
		.name = "swappiness",
		.read_u64 = mem_cgroup_swappiness_read,
		.write_u64 = mem_cgroup_swappiness_write,
	},
5625 5626 5627 5628 5629
	{
		.name = "move_charge_at_immigrate",
		.read_u64 = mem_cgroup_move_charge_read,
		.write_u64 = mem_cgroup_move_charge_write,
	},
K
KAMEZAWA Hiroyuki 已提交
5630 5631
	{
		.name = "oom_control",
5632 5633
		.read_map = mem_cgroup_oom_control_read,
		.write_u64 = mem_cgroup_oom_control_write,
K
KAMEZAWA Hiroyuki 已提交
5634 5635 5636 5637
		.register_event = mem_cgroup_oom_register_event,
		.unregister_event = mem_cgroup_oom_unregister_event,
		.private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
	},
5638 5639 5640 5641 5642
	{
		.name = "pressure_level",
		.register_event = vmpressure_register_event,
		.unregister_event = vmpressure_unregister_event,
	},
5643 5644 5645
#ifdef CONFIG_NUMA
	{
		.name = "numa_stat",
5646
		.read_seq_string = memcg_numa_stat_show,
5647 5648
	},
#endif
5649 5650 5651 5652 5653 5654 5655 5656 5657 5658 5659 5660 5661 5662 5663 5664 5665 5666 5667 5668 5669 5670 5671 5672
#ifdef CONFIG_MEMCG_KMEM
	{
		.name = "kmem.limit_in_bytes",
		.private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
		.write_string = mem_cgroup_write,
		.read = mem_cgroup_read,
	},
	{
		.name = "kmem.usage_in_bytes",
		.private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
		.read = mem_cgroup_read,
	},
	{
		.name = "kmem.failcnt",
		.private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
		.trigger = mem_cgroup_reset,
		.read = mem_cgroup_read,
	},
	{
		.name = "kmem.max_usage_in_bytes",
		.private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
		.trigger = mem_cgroup_reset,
		.read = mem_cgroup_read,
	},
5673 5674 5675 5676 5677 5678
#ifdef CONFIG_SLABINFO
	{
		.name = "kmem.slabinfo",
		.read_seq_string = mem_cgroup_slabinfo_read,
	},
#endif
5679
#endif
5680
	{ },	/* terminate */
5681
};
5682

5683 5684 5685 5686 5687 5688 5689 5690 5691 5692 5693 5694 5695 5696 5697 5698 5699 5700 5701 5702 5703 5704 5705 5706 5707 5708 5709 5710 5711 5712
#ifdef CONFIG_MEMCG_SWAP
static struct cftype memsw_cgroup_files[] = {
	{
		.name = "memsw.usage_in_bytes",
		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
		.read = mem_cgroup_read,
		.register_event = mem_cgroup_usage_register_event,
		.unregister_event = mem_cgroup_usage_unregister_event,
	},
	{
		.name = "memsw.max_usage_in_bytes",
		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
		.trigger = mem_cgroup_reset,
		.read = mem_cgroup_read,
	},
	{
		.name = "memsw.limit_in_bytes",
		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
		.write_string = mem_cgroup_write,
		.read = mem_cgroup_read,
	},
	{
		.name = "memsw.failcnt",
		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
		.trigger = mem_cgroup_reset,
		.read = mem_cgroup_read,
	},
	{ },	/* terminate */
};
#endif
5713
static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
5714 5715
{
	struct mem_cgroup_per_node *pn;
5716
	struct mem_cgroup_per_zone *mz;
5717
	int zone, tmp = node;
5718 5719 5720 5721 5722 5723 5724 5725
	/*
	 * This routine is called against possible nodes.
	 * But it's BUG to call kmalloc() against offline node.
	 *
	 * TODO: this routine can waste much memory for nodes which will
	 *       never be onlined. It's better to use memory hotplug callback
	 *       function.
	 */
5726 5727
	if (!node_state(node, N_NORMAL_MEMORY))
		tmp = -1;
5728
	pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
5729 5730
	if (!pn)
		return 1;
5731 5732 5733

	for (zone = 0; zone < MAX_NR_ZONES; zone++) {
		mz = &pn->zoneinfo[zone];
5734
		lruvec_init(&mz->lruvec);
5735
		mz->memcg = memcg;
5736
	}
5737
	memcg->nodeinfo[node] = pn;
5738 5739 5740
	return 0;
}

5741
static void free_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
5742
{
5743
	kfree(memcg->nodeinfo[node]);
5744 5745
}

5746 5747
static struct mem_cgroup *mem_cgroup_alloc(void)
{
5748
	struct mem_cgroup *memcg;
5749
	size_t size = memcg_size();
5750

5751
	/* Can be very big if nr_node_ids is very big */
5752
	if (size < PAGE_SIZE)
5753
		memcg = kzalloc(size, GFP_KERNEL);
5754
	else
5755
		memcg = vzalloc(size);
5756

5757
	if (!memcg)
5758 5759
		return NULL;

5760 5761
	memcg->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
	if (!memcg->stat)
5762
		goto out_free;
5763 5764
	spin_lock_init(&memcg->pcp_counter_lock);
	return memcg;
5765 5766 5767

out_free:
	if (size < PAGE_SIZE)
5768
		kfree(memcg);
5769
	else
5770
		vfree(memcg);
5771
	return NULL;
5772 5773
}

5774
/*
5775 5776 5777 5778 5779 5780 5781 5782
 * At destroying mem_cgroup, references from swap_cgroup can remain.
 * (scanning all at force_empty is too costly...)
 *
 * Instead of clearing all references at force_empty, we remember
 * the number of reference from swap_cgroup and free mem_cgroup when
 * it goes down to 0.
 *
 * Removal of cgroup itself succeeds regardless of refs from swap.
5783
 */
5784 5785

static void __mem_cgroup_free(struct mem_cgroup *memcg)
5786
{
5787
	int node;
5788
	size_t size = memcg_size();
5789

5790 5791 5792 5793 5794 5795 5796
	free_css_id(&mem_cgroup_subsys, &memcg->css);

	for_each_node(node)
		free_mem_cgroup_per_zone_info(memcg, node);

	free_percpu(memcg->stat);

5797 5798 5799 5800 5801 5802 5803 5804 5805 5806 5807
	/*
	 * We need to make sure that (at least for now), the jump label
	 * destruction code runs outside of the cgroup lock. This is because
	 * get_online_cpus(), which is called from the static_branch update,
	 * can't be called inside the cgroup_lock. cpusets are the ones
	 * enforcing this dependency, so if they ever change, we might as well.
	 *
	 * schedule_work() will guarantee this happens. Be careful if you need
	 * to move this code around, and make sure it is outside
	 * the cgroup_lock.
	 */
5808
	disarm_static_keys(memcg);
5809 5810 5811 5812
	if (size < PAGE_SIZE)
		kfree(memcg);
	else
		vfree(memcg);
5813
}
5814

5815 5816 5817
/*
 * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled.
 */
G
Glauber Costa 已提交
5818
struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
5819
{
5820
	if (!memcg->res.parent)
5821
		return NULL;
5822
	return mem_cgroup_from_res_counter(memcg->res.parent, res);
5823
}
G
Glauber Costa 已提交
5824
EXPORT_SYMBOL(parent_mem_cgroup);
5825

L
Li Zefan 已提交
5826
static struct cgroup_subsys_state * __ref
5827
mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
B
Balbir Singh 已提交
5828
{
5829
	struct mem_cgroup *memcg;
K
KAMEZAWA Hiroyuki 已提交
5830
	long error = -ENOMEM;
5831
	int node;
B
Balbir Singh 已提交
5832

5833 5834
	memcg = mem_cgroup_alloc();
	if (!memcg)
K
KAMEZAWA Hiroyuki 已提交
5835
		return ERR_PTR(error);
5836

B
Bob Liu 已提交
5837
	for_each_node(node)
5838
		if (alloc_mem_cgroup_per_zone_info(memcg, node))
5839
			goto free_out;
5840

5841
	/* root ? */
5842
	if (parent_css == NULL) {
5843
		root_mem_cgroup = memcg;
5844 5845 5846
		res_counter_init(&memcg->res, NULL);
		res_counter_init(&memcg->memsw, NULL);
		res_counter_init(&memcg->kmem, NULL);
5847
	}
5848

5849 5850 5851 5852 5853
	memcg->last_scanned_node = MAX_NUMNODES;
	INIT_LIST_HEAD(&memcg->oom_notify);
	memcg->move_charge_at_immigrate = 0;
	mutex_init(&memcg->thresholds_lock);
	spin_lock_init(&memcg->move_lock);
5854
	vmpressure_init(&memcg->vmpressure);
5855 5856 5857 5858 5859 5860 5861 5862 5863

	return &memcg->css;

free_out:
	__mem_cgroup_free(memcg);
	return ERR_PTR(error);
}

static int
5864
mem_cgroup_css_online(struct cgroup_subsys_state *css)
5865
{
5866 5867
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
	struct mem_cgroup *parent = mem_cgroup_from_css(css_parent(css));
5868 5869
	int error = 0;

T
Tejun Heo 已提交
5870
	if (!parent)
5871 5872
		return 0;

5873
	mutex_lock(&memcg_create_mutex);
5874 5875 5876 5877 5878 5879

	memcg->use_hierarchy = parent->use_hierarchy;
	memcg->oom_kill_disable = parent->oom_kill_disable;
	memcg->swappiness = mem_cgroup_swappiness(parent);

	if (parent->use_hierarchy) {
5880 5881
		res_counter_init(&memcg->res, &parent->res);
		res_counter_init(&memcg->memsw, &parent->memsw);
5882
		res_counter_init(&memcg->kmem, &parent->kmem);
5883

5884
		/*
5885 5886
		 * No need to take a reference to the parent because cgroup
		 * core guarantees its existence.
5887
		 */
5888
	} else {
5889 5890
		res_counter_init(&memcg->res, NULL);
		res_counter_init(&memcg->memsw, NULL);
5891
		res_counter_init(&memcg->kmem, NULL);
5892 5893 5894 5895 5896
		/*
		 * Deeper hierachy with use_hierarchy == false doesn't make
		 * much sense so let cgroup subsystem know about this
		 * unfortunate state in our controller.
		 */
5897
		if (parent != root_mem_cgroup)
5898
			mem_cgroup_subsys.broken_hierarchy = true;
5899
	}
5900 5901

	error = memcg_init_kmem(memcg, &mem_cgroup_subsys);
5902
	mutex_unlock(&memcg_create_mutex);
5903
	return error;
B
Balbir Singh 已提交
5904 5905
}

M
Michal Hocko 已提交
5906 5907 5908 5909 5910 5911 5912 5913
/*
 * Announce all parents that a group from their hierarchy is gone.
 */
static void mem_cgroup_invalidate_reclaim_iterators(struct mem_cgroup *memcg)
{
	struct mem_cgroup *parent = memcg;

	while ((parent = parent_mem_cgroup(parent)))
5914
		mem_cgroup_iter_invalidate(parent);
M
Michal Hocko 已提交
5915 5916 5917 5918 5919 5920

	/*
	 * if the root memcg is not hierarchical we have to check it
	 * explicitely.
	 */
	if (!root_mem_cgroup->use_hierarchy)
5921
		mem_cgroup_iter_invalidate(root_mem_cgroup);
M
Michal Hocko 已提交
5922 5923
}

5924
static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
5925
{
5926
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5927

5928 5929
	kmem_cgroup_css_offline(memcg);

M
Michal Hocko 已提交
5930
	mem_cgroup_invalidate_reclaim_iterators(memcg);
5931
	mem_cgroup_reparent_charges(memcg);
G
Glauber Costa 已提交
5932
	mem_cgroup_destroy_all_caches(memcg);
5933
	vmpressure_cleanup(&memcg->vmpressure);
5934 5935
}

5936
static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
B
Balbir Singh 已提交
5937
{
5938
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5939

5940
	memcg_destroy_kmem(memcg);
5941
	__mem_cgroup_free(memcg);
B
Balbir Singh 已提交
5942 5943
}

5944
#ifdef CONFIG_MMU
5945
/* Handlers for move charge at task migration. */
5946 5947
#define PRECHARGE_COUNT_AT_ONCE	256
static int mem_cgroup_do_precharge(unsigned long count)
5948
{
5949 5950
	int ret = 0;
	int batch_count = PRECHARGE_COUNT_AT_ONCE;
5951
	struct mem_cgroup *memcg = mc.to;
5952

5953
	if (mem_cgroup_is_root(memcg)) {
5954 5955 5956 5957 5958 5959 5960 5961
		mc.precharge += count;
		/* we don't need css_get for root */
		return ret;
	}
	/* try to charge at once */
	if (count > 1) {
		struct res_counter *dummy;
		/*
5962
		 * "memcg" cannot be under rmdir() because we've already checked
5963 5964 5965 5966
		 * by cgroup_lock_live_cgroup() that it is not removed and we
		 * are still under the same cgroup_mutex. So we can postpone
		 * css_get().
		 */
5967
		if (res_counter_charge(&memcg->res, PAGE_SIZE * count, &dummy))
5968
			goto one_by_one;
5969
		if (do_swap_account && res_counter_charge(&memcg->memsw,
5970
						PAGE_SIZE * count, &dummy)) {
5971
			res_counter_uncharge(&memcg->res, PAGE_SIZE * count);
5972 5973 5974 5975 5976 5977 5978 5979 5980 5981 5982 5983 5984 5985 5986 5987
			goto one_by_one;
		}
		mc.precharge += count;
		return ret;
	}
one_by_one:
	/* fall back to one by one charge */
	while (count--) {
		if (signal_pending(current)) {
			ret = -EINTR;
			break;
		}
		if (!batch_count--) {
			batch_count = PRECHARGE_COUNT_AT_ONCE;
			cond_resched();
		}
5988 5989
		ret = __mem_cgroup_try_charge(NULL,
					GFP_KERNEL, 1, &memcg, false);
5990
		if (ret)
5991
			/* mem_cgroup_clear_mc() will do uncharge later */
5992
			return ret;
5993 5994
		mc.precharge++;
	}
5995 5996 5997 5998
	return ret;
}

/**
5999
 * get_mctgt_type - get target type of moving charge
6000 6001 6002
 * @vma: the vma the pte to be checked belongs
 * @addr: the address corresponding to the pte to be checked
 * @ptent: the pte to be checked
6003
 * @target: the pointer the target page or swap ent will be stored(can be NULL)
6004 6005 6006 6007 6008 6009
 *
 * Returns
 *   0(MC_TARGET_NONE): if the pte is not a target for move charge.
 *   1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
 *     move charge. if @target is not NULL, the page is stored in target->page
 *     with extra refcnt got(Callers should handle it).
6010 6011 6012
 *   2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
 *     target for charge migration. if @target is not NULL, the entry is stored
 *     in target->ent.
6013 6014 6015 6016 6017
 *
 * Called with pte lock held.
 */
union mc_target {
	struct page	*page;
6018
	swp_entry_t	ent;
6019 6020 6021
};

enum mc_target_type {
6022
	MC_TARGET_NONE = 0,
6023
	MC_TARGET_PAGE,
6024
	MC_TARGET_SWAP,
6025 6026
};

D
Daisuke Nishimura 已提交
6027 6028
static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
						unsigned long addr, pte_t ptent)
6029
{
D
Daisuke Nishimura 已提交
6030
	struct page *page = vm_normal_page(vma, addr, ptent);
6031

D
Daisuke Nishimura 已提交
6032 6033 6034 6035
	if (!page || !page_mapped(page))
		return NULL;
	if (PageAnon(page)) {
		/* we don't move shared anon */
6036
		if (!move_anon())
D
Daisuke Nishimura 已提交
6037
			return NULL;
6038 6039
	} else if (!move_file())
		/* we ignore mapcount for file pages */
D
Daisuke Nishimura 已提交
6040 6041 6042 6043 6044 6045 6046
		return NULL;
	if (!get_page_unless_zero(page))
		return NULL;

	return page;
}

6047
#ifdef CONFIG_SWAP
D
Daisuke Nishimura 已提交
6048 6049 6050 6051 6052 6053 6054 6055
static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
			unsigned long addr, pte_t ptent, swp_entry_t *entry)
{
	struct page *page = NULL;
	swp_entry_t ent = pte_to_swp_entry(ptent);

	if (!move_anon() || non_swap_entry(ent))
		return NULL;
6056 6057 6058 6059
	/*
	 * Because lookup_swap_cache() updates some statistics counter,
	 * we call find_get_page() with swapper_space directly.
	 */
6060
	page = find_get_page(swap_address_space(ent), ent.val);
D
Daisuke Nishimura 已提交
6061 6062 6063 6064 6065
	if (do_swap_account)
		entry->val = ent.val;

	return page;
}
6066 6067 6068 6069 6070 6071 6072
#else
static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
			unsigned long addr, pte_t ptent, swp_entry_t *entry)
{
	return NULL;
}
#endif
D
Daisuke Nishimura 已提交
6073

6074 6075 6076 6077 6078 6079 6080 6081 6082 6083 6084 6085 6086 6087 6088 6089 6090 6091 6092
static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
			unsigned long addr, pte_t ptent, swp_entry_t *entry)
{
	struct page *page = NULL;
	struct address_space *mapping;
	pgoff_t pgoff;

	if (!vma->vm_file) /* anonymous vma */
		return NULL;
	if (!move_file())
		return NULL;

	mapping = vma->vm_file->f_mapping;
	if (pte_none(ptent))
		pgoff = linear_page_index(vma, addr);
	else /* pte_file(ptent) is true */
		pgoff = pte_to_pgoff(ptent);

	/* page is moved even if it's not RSS of this task(page-faulted). */
6093 6094 6095 6096 6097 6098
	page = find_get_page(mapping, pgoff);

#ifdef CONFIG_SWAP
	/* shmem/tmpfs may report page out on swap: account for that too. */
	if (radix_tree_exceptional_entry(page)) {
		swp_entry_t swap = radix_to_swp_entry(page);
6099
		if (do_swap_account)
6100
			*entry = swap;
6101
		page = find_get_page(swap_address_space(swap), swap.val);
6102
	}
6103
#endif
6104 6105 6106
	return page;
}

6107
static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
D
Daisuke Nishimura 已提交
6108 6109 6110 6111
		unsigned long addr, pte_t ptent, union mc_target *target)
{
	struct page *page = NULL;
	struct page_cgroup *pc;
6112
	enum mc_target_type ret = MC_TARGET_NONE;
D
Daisuke Nishimura 已提交
6113 6114 6115 6116 6117 6118
	swp_entry_t ent = { .val = 0 };

	if (pte_present(ptent))
		page = mc_handle_present_pte(vma, addr, ptent);
	else if (is_swap_pte(ptent))
		page = mc_handle_swap_pte(vma, addr, ptent, &ent);
6119 6120
	else if (pte_none(ptent) || pte_file(ptent))
		page = mc_handle_file_pte(vma, addr, ptent, &ent);
D
Daisuke Nishimura 已提交
6121 6122

	if (!page && !ent.val)
6123
		return ret;
6124 6125 6126 6127 6128 6129 6130 6131 6132 6133 6134 6135 6136 6137 6138
	if (page) {
		pc = lookup_page_cgroup(page);
		/*
		 * Do only loose check w/o page_cgroup lock.
		 * mem_cgroup_move_account() checks the pc is valid or not under
		 * the lock.
		 */
		if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) {
			ret = MC_TARGET_PAGE;
			if (target)
				target->page = page;
		}
		if (!ret || !target)
			put_page(page);
	}
D
Daisuke Nishimura 已提交
6139 6140
	/* There is a swap entry and a page doesn't exist or isn't charged */
	if (ent.val && !ret &&
6141
			css_id(&mc.from->css) == lookup_swap_cgroup_id(ent)) {
6142 6143 6144
		ret = MC_TARGET_SWAP;
		if (target)
			target->ent = ent;
6145 6146 6147 6148
	}
	return ret;
}

6149 6150 6151 6152 6153 6154 6155 6156 6157 6158 6159 6160 6161 6162 6163 6164 6165 6166 6167 6168 6169 6170 6171 6172 6173 6174 6175 6176 6177 6178 6179 6180 6181 6182 6183
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/*
 * We don't consider swapping or file mapped pages because THP does not
 * support them for now.
 * Caller should make sure that pmd_trans_huge(pmd) is true.
 */
static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
		unsigned long addr, pmd_t pmd, union mc_target *target)
{
	struct page *page = NULL;
	struct page_cgroup *pc;
	enum mc_target_type ret = MC_TARGET_NONE;

	page = pmd_page(pmd);
	VM_BUG_ON(!page || !PageHead(page));
	if (!move_anon())
		return ret;
	pc = lookup_page_cgroup(page);
	if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) {
		ret = MC_TARGET_PAGE;
		if (target) {
			get_page(page);
			target->page = page;
		}
	}
	return ret;
}
#else
static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
		unsigned long addr, pmd_t pmd, union mc_target *target)
{
	return MC_TARGET_NONE;
}
#endif

6184 6185 6186 6187 6188 6189 6190 6191
static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
					unsigned long addr, unsigned long end,
					struct mm_walk *walk)
{
	struct vm_area_struct *vma = walk->private;
	pte_t *pte;
	spinlock_t *ptl;

6192 6193 6194 6195
	if (pmd_trans_huge_lock(pmd, vma) == 1) {
		if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
			mc.precharge += HPAGE_PMD_NR;
		spin_unlock(&vma->vm_mm->page_table_lock);
6196
		return 0;
6197
	}
6198

6199 6200
	if (pmd_trans_unstable(pmd))
		return 0;
6201 6202
	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
	for (; addr != end; pte++, addr += PAGE_SIZE)
6203
		if (get_mctgt_type(vma, addr, *pte, NULL))
6204 6205 6206 6207
			mc.precharge++;	/* increment precharge temporarily */
	pte_unmap_unlock(pte - 1, ptl);
	cond_resched();

6208 6209 6210
	return 0;
}

6211 6212 6213 6214 6215
static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
{
	unsigned long precharge;
	struct vm_area_struct *vma;

6216
	down_read(&mm->mmap_sem);
6217 6218 6219 6220 6221 6222 6223 6224 6225 6226 6227
	for (vma = mm->mmap; vma; vma = vma->vm_next) {
		struct mm_walk mem_cgroup_count_precharge_walk = {
			.pmd_entry = mem_cgroup_count_precharge_pte_range,
			.mm = mm,
			.private = vma,
		};
		if (is_vm_hugetlb_page(vma))
			continue;
		walk_page_range(vma->vm_start, vma->vm_end,
					&mem_cgroup_count_precharge_walk);
	}
6228
	up_read(&mm->mmap_sem);
6229 6230 6231 6232 6233 6234 6235 6236 6237

	precharge = mc.precharge;
	mc.precharge = 0;

	return precharge;
}

static int mem_cgroup_precharge_mc(struct mm_struct *mm)
{
6238 6239 6240 6241 6242
	unsigned long precharge = mem_cgroup_count_precharge(mm);

	VM_BUG_ON(mc.moving_task);
	mc.moving_task = current;
	return mem_cgroup_do_precharge(precharge);
6243 6244
}

6245 6246
/* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
static void __mem_cgroup_clear_mc(void)
6247
{
6248 6249
	struct mem_cgroup *from = mc.from;
	struct mem_cgroup *to = mc.to;
L
Li Zefan 已提交
6250
	int i;
6251

6252
	/* we must uncharge all the leftover precharges from mc.to */
6253 6254 6255 6256 6257 6258 6259 6260 6261 6262 6263
	if (mc.precharge) {
		__mem_cgroup_cancel_charge(mc.to, mc.precharge);
		mc.precharge = 0;
	}
	/*
	 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
	 * we must uncharge here.
	 */
	if (mc.moved_charge) {
		__mem_cgroup_cancel_charge(mc.from, mc.moved_charge);
		mc.moved_charge = 0;
6264
	}
6265 6266 6267 6268 6269 6270
	/* we must fixup refcnts and charges */
	if (mc.moved_swap) {
		/* uncharge swap account from the old cgroup */
		if (!mem_cgroup_is_root(mc.from))
			res_counter_uncharge(&mc.from->memsw,
						PAGE_SIZE * mc.moved_swap);
L
Li Zefan 已提交
6271 6272 6273

		for (i = 0; i < mc.moved_swap; i++)
			css_put(&mc.from->css);
6274 6275 6276 6277 6278 6279 6280 6281 6282

		if (!mem_cgroup_is_root(mc.to)) {
			/*
			 * we charged both to->res and to->memsw, so we should
			 * uncharge to->res.
			 */
			res_counter_uncharge(&mc.to->res,
						PAGE_SIZE * mc.moved_swap);
		}
L
Li Zefan 已提交
6283
		/* we've already done css_get(mc.to) */
6284 6285
		mc.moved_swap = 0;
	}
6286 6287 6288 6289 6290 6291 6292 6293 6294 6295 6296 6297 6298 6299 6300
	memcg_oom_recover(from);
	memcg_oom_recover(to);
	wake_up_all(&mc.waitq);
}

static void mem_cgroup_clear_mc(void)
{
	struct mem_cgroup *from = mc.from;

	/*
	 * we must clear moving_task before waking up waiters at the end of
	 * task migration.
	 */
	mc.moving_task = NULL;
	__mem_cgroup_clear_mc();
6301
	spin_lock(&mc.lock);
6302 6303
	mc.from = NULL;
	mc.to = NULL;
6304
	spin_unlock(&mc.lock);
6305
	mem_cgroup_end_move(from);
6306 6307
}

6308
static int mem_cgroup_can_attach(struct cgroup_subsys_state *css,
6309
				 struct cgroup_taskset *tset)
6310
{
6311
	struct task_struct *p = cgroup_taskset_first(tset);
6312
	int ret = 0;
6313
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6314
	unsigned long move_charge_at_immigrate;
6315

6316 6317 6318 6319 6320 6321 6322
	/*
	 * We are now commited to this value whatever it is. Changes in this
	 * tunable will only affect upcoming migrations, not the current one.
	 * So we need to save it, and keep it going.
	 */
	move_charge_at_immigrate  = memcg->move_charge_at_immigrate;
	if (move_charge_at_immigrate) {
6323 6324 6325
		struct mm_struct *mm;
		struct mem_cgroup *from = mem_cgroup_from_task(p);

6326
		VM_BUG_ON(from == memcg);
6327 6328 6329 6330 6331

		mm = get_task_mm(p);
		if (!mm)
			return 0;
		/* We move charges only when we move a owner of the mm */
6332 6333 6334 6335
		if (mm->owner == p) {
			VM_BUG_ON(mc.from);
			VM_BUG_ON(mc.to);
			VM_BUG_ON(mc.precharge);
6336
			VM_BUG_ON(mc.moved_charge);
6337
			VM_BUG_ON(mc.moved_swap);
6338
			mem_cgroup_start_move(from);
6339
			spin_lock(&mc.lock);
6340
			mc.from = from;
6341
			mc.to = memcg;
6342
			mc.immigrate_flags = move_charge_at_immigrate;
6343
			spin_unlock(&mc.lock);
6344
			/* We set mc.moving_task later */
6345 6346 6347 6348

			ret = mem_cgroup_precharge_mc(mm);
			if (ret)
				mem_cgroup_clear_mc();
6349 6350
		}
		mmput(mm);
6351 6352 6353 6354
	}
	return ret;
}

6355
static void mem_cgroup_cancel_attach(struct cgroup_subsys_state *css,
6356
				     struct cgroup_taskset *tset)
6357
{
6358
	mem_cgroup_clear_mc();
6359 6360
}

6361 6362 6363
static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
				unsigned long addr, unsigned long end,
				struct mm_walk *walk)
6364
{
6365 6366 6367 6368
	int ret = 0;
	struct vm_area_struct *vma = walk->private;
	pte_t *pte;
	spinlock_t *ptl;
6369 6370 6371 6372
	enum mc_target_type target_type;
	union mc_target target;
	struct page *page;
	struct page_cgroup *pc;
6373

6374 6375 6376 6377 6378 6379 6380 6381 6382 6383 6384
	/*
	 * We don't take compound_lock() here but no race with splitting thp
	 * happens because:
	 *  - if pmd_trans_huge_lock() returns 1, the relevant thp is not
	 *    under splitting, which means there's no concurrent thp split,
	 *  - if another thread runs into split_huge_page() just after we
	 *    entered this if-block, the thread must wait for page table lock
	 *    to be unlocked in __split_huge_page_splitting(), where the main
	 *    part of thp split is not executed yet.
	 */
	if (pmd_trans_huge_lock(pmd, vma) == 1) {
6385
		if (mc.precharge < HPAGE_PMD_NR) {
6386 6387 6388 6389 6390 6391 6392 6393 6394
			spin_unlock(&vma->vm_mm->page_table_lock);
			return 0;
		}
		target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
		if (target_type == MC_TARGET_PAGE) {
			page = target.page;
			if (!isolate_lru_page(page)) {
				pc = lookup_page_cgroup(page);
				if (!mem_cgroup_move_account(page, HPAGE_PMD_NR,
6395
							pc, mc.from, mc.to)) {
6396 6397 6398 6399 6400 6401 6402 6403
					mc.precharge -= HPAGE_PMD_NR;
					mc.moved_charge += HPAGE_PMD_NR;
				}
				putback_lru_page(page);
			}
			put_page(page);
		}
		spin_unlock(&vma->vm_mm->page_table_lock);
6404
		return 0;
6405 6406
	}

6407 6408
	if (pmd_trans_unstable(pmd))
		return 0;
6409 6410 6411 6412
retry:
	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
	for (; addr != end; addr += PAGE_SIZE) {
		pte_t ptent = *(pte++);
6413
		swp_entry_t ent;
6414 6415 6416 6417

		if (!mc.precharge)
			break;

6418
		switch (get_mctgt_type(vma, addr, ptent, &target)) {
6419 6420 6421 6422 6423
		case MC_TARGET_PAGE:
			page = target.page;
			if (isolate_lru_page(page))
				goto put;
			pc = lookup_page_cgroup(page);
6424
			if (!mem_cgroup_move_account(page, 1, pc,
6425
						     mc.from, mc.to)) {
6426
				mc.precharge--;
6427 6428
				/* we uncharge from mc.from later. */
				mc.moved_charge++;
6429 6430
			}
			putback_lru_page(page);
6431
put:			/* get_mctgt_type() gets the page */
6432 6433
			put_page(page);
			break;
6434 6435
		case MC_TARGET_SWAP:
			ent = target.ent;
6436
			if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
6437
				mc.precharge--;
6438 6439 6440
				/* we fixup refcnts and charges later. */
				mc.moved_swap++;
			}
6441
			break;
6442 6443 6444 6445 6446 6447 6448 6449 6450 6451 6452 6453 6454 6455
		default:
			break;
		}
	}
	pte_unmap_unlock(pte - 1, ptl);
	cond_resched();

	if (addr != end) {
		/*
		 * We have consumed all precharges we got in can_attach().
		 * We try charge one by one, but don't do any additional
		 * charges to mc.to if we have failed in charge once in attach()
		 * phase.
		 */
6456
		ret = mem_cgroup_do_precharge(1);
6457 6458 6459 6460 6461 6462 6463 6464 6465 6466 6467 6468
		if (!ret)
			goto retry;
	}

	return ret;
}

static void mem_cgroup_move_charge(struct mm_struct *mm)
{
	struct vm_area_struct *vma;

	lru_add_drain_all();
6469 6470 6471 6472 6473 6474 6475 6476 6477 6478 6479 6480 6481
retry:
	if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
		/*
		 * Someone who are holding the mmap_sem might be waiting in
		 * waitq. So we cancel all extra charges, wake up all waiters,
		 * and retry. Because we cancel precharges, we might not be able
		 * to move enough charges, but moving charge is a best-effort
		 * feature anyway, so it wouldn't be a big problem.
		 */
		__mem_cgroup_clear_mc();
		cond_resched();
		goto retry;
	}
6482 6483 6484 6485 6486 6487 6488 6489 6490 6491 6492 6493 6494 6495 6496 6497 6498 6499
	for (vma = mm->mmap; vma; vma = vma->vm_next) {
		int ret;
		struct mm_walk mem_cgroup_move_charge_walk = {
			.pmd_entry = mem_cgroup_move_charge_pte_range,
			.mm = mm,
			.private = vma,
		};
		if (is_vm_hugetlb_page(vma))
			continue;
		ret = walk_page_range(vma->vm_start, vma->vm_end,
						&mem_cgroup_move_charge_walk);
		if (ret)
			/*
			 * means we have consumed all precharges and failed in
			 * doing additional charge. Just abandon here.
			 */
			break;
	}
6500
	up_read(&mm->mmap_sem);
6501 6502
}

6503
static void mem_cgroup_move_task(struct cgroup_subsys_state *css,
6504
				 struct cgroup_taskset *tset)
B
Balbir Singh 已提交
6505
{
6506
	struct task_struct *p = cgroup_taskset_first(tset);
6507
	struct mm_struct *mm = get_task_mm(p);
6508 6509

	if (mm) {
6510 6511
		if (mc.to)
			mem_cgroup_move_charge(mm);
6512 6513
		mmput(mm);
	}
6514 6515
	if (mc.to)
		mem_cgroup_clear_mc();
B
Balbir Singh 已提交
6516
}
6517
#else	/* !CONFIG_MMU */
6518
static int mem_cgroup_can_attach(struct cgroup_subsys_state *css,
6519
				 struct cgroup_taskset *tset)
6520 6521 6522
{
	return 0;
}
6523
static void mem_cgroup_cancel_attach(struct cgroup_subsys_state *css,
6524
				     struct cgroup_taskset *tset)
6525 6526
{
}
6527
static void mem_cgroup_move_task(struct cgroup_subsys_state *css,
6528
				 struct cgroup_taskset *tset)
6529 6530 6531
{
}
#endif
B
Balbir Singh 已提交
6532

6533 6534 6535 6536
/*
 * Cgroup retains root cgroups across [un]mount cycles making it necessary
 * to verify sane_behavior flag on each mount attempt.
 */
6537
static void mem_cgroup_bind(struct cgroup_subsys_state *root_css)
6538 6539 6540 6541 6542 6543
{
	/*
	 * use_hierarchy is forced with sane_behavior.  cgroup core
	 * guarantees that @root doesn't have any children, so turning it
	 * on for the root memcg is enough.
	 */
6544 6545
	if (cgroup_sane_behavior(root_css->cgroup))
		mem_cgroup_from_css(root_css)->use_hierarchy = true;
6546 6547
}

B
Balbir Singh 已提交
6548 6549 6550
struct cgroup_subsys mem_cgroup_subsys = {
	.name = "memory",
	.subsys_id = mem_cgroup_subsys_id,
6551
	.css_alloc = mem_cgroup_css_alloc,
6552
	.css_online = mem_cgroup_css_online,
6553 6554
	.css_offline = mem_cgroup_css_offline,
	.css_free = mem_cgroup_css_free,
6555 6556
	.can_attach = mem_cgroup_can_attach,
	.cancel_attach = mem_cgroup_cancel_attach,
B
Balbir Singh 已提交
6557
	.attach = mem_cgroup_move_task,
6558
	.bind = mem_cgroup_bind,
6559
	.base_cftypes = mem_cgroup_files,
6560
	.early_init = 0,
K
KAMEZAWA Hiroyuki 已提交
6561
	.use_id = 1,
B
Balbir Singh 已提交
6562
};
6563

A
Andrew Morton 已提交
6564
#ifdef CONFIG_MEMCG_SWAP
6565 6566
static int __init enable_swap_account(char *s)
{
6567
	if (!strcmp(s, "1"))
6568
		really_do_swap_account = 1;
6569
	else if (!strcmp(s, "0"))
6570 6571 6572
		really_do_swap_account = 0;
	return 1;
}
6573
__setup("swapaccount=", enable_swap_account);
6574

6575 6576
static void __init memsw_file_init(void)
{
6577 6578 6579 6580 6581 6582 6583 6584 6585
	WARN_ON(cgroup_add_cftypes(&mem_cgroup_subsys, memsw_cgroup_files));
}

static void __init enable_swap_cgroup(void)
{
	if (!mem_cgroup_disabled() && really_do_swap_account) {
		do_swap_account = 1;
		memsw_file_init();
	}
6586
}
6587

6588
#else
6589
static void __init enable_swap_cgroup(void)
6590 6591
{
}
6592
#endif
6593 6594

/*
6595 6596 6597 6598 6599 6600
 * subsys_initcall() for memory controller.
 *
 * Some parts like hotcpu_notifier() have to be initialized from this context
 * because of lock dependencies (cgroup_lock -> cpu hotplug) but basically
 * everything that doesn't depend on a specific mem_cgroup structure should
 * be initialized from here.
6601 6602 6603 6604
 */
static int __init mem_cgroup_init(void)
{
	hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
6605
	enable_swap_cgroup();
6606
	memcg_stock_init();
6607 6608 6609
	return 0;
}
subsys_initcall(mem_cgroup_init);