memcontrol.c 154.5 KB
Newer Older
B
Balbir Singh 已提交
1 2 3 4 5
/* memcontrol.c - Memory Controller
 *
 * Copyright IBM Corporation, 2007
 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
 *
6 7 8
 * Copyright 2007 OpenVZ SWsoft Inc
 * Author: Pavel Emelianov <xemul@openvz.org>
 *
9 10 11 12
 * Memory thresholds
 * Copyright (C) 2009 Nokia Corporation
 * Author: Kirill A. Shutemov
 *
13 14 15 16
 * Kernel Memory Controller
 * Copyright (C) 2012 Parallels Inc. and Google Inc.
 * Authors: Glauber Costa and Suleiman Souhlal
 *
17 18 19 20 21 22
 * Native page reclaim
 * Charge lifetime sanitation
 * Lockless page tracking & accounting
 * Unified hierarchy configuration model
 * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner
 *
B
Balbir Singh 已提交
23 24 25 26 27 28 29 30 31 32 33
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 */

34
#include <linux/page_counter.h>
B
Balbir Singh 已提交
35 36
#include <linux/memcontrol.h>
#include <linux/cgroup.h>
37
#include <linux/mm.h>
38
#include <linux/hugetlb.h>
K
KAMEZAWA Hiroyuki 已提交
39
#include <linux/pagemap.h>
40
#include <linux/smp.h>
41
#include <linux/page-flags.h>
42
#include <linux/backing-dev.h>
43 44
#include <linux/bit_spinlock.h>
#include <linux/rcupdate.h>
45
#include <linux/limits.h>
46
#include <linux/export.h>
47
#include <linux/mutex.h>
48
#include <linux/rbtree.h>
49
#include <linux/slab.h>
50
#include <linux/swap.h>
51
#include <linux/swapops.h>
52
#include <linux/spinlock.h>
53
#include <linux/eventfd.h>
54
#include <linux/poll.h>
55
#include <linux/sort.h>
56
#include <linux/fs.h>
57
#include <linux/seq_file.h>
58
#include <linux/vmpressure.h>
59
#include <linux/mm_inline.h>
60
#include <linux/swap_cgroup.h>
61
#include <linux/cpu.h>
62
#include <linux/oom.h>
63
#include <linux/lockdep.h>
64
#include <linux/file.h>
65
#include <linux/tracehook.h>
K
KAMEZAWA Hiroyuki 已提交
66
#include "internal.h"
G
Glauber Costa 已提交
67
#include <net/sock.h>
M
Michal Hocko 已提交
68
#include <net/ip.h>
69
#include "slab.h"
B
Balbir Singh 已提交
70

71
#include <linux/uaccess.h>
72

73 74
#include <trace/events/vmscan.h>

75 76
struct cgroup_subsys memory_cgrp_subsys __read_mostly;
EXPORT_SYMBOL(memory_cgrp_subsys);
77

78 79
struct mem_cgroup *root_mem_cgroup __read_mostly;

80
#define MEM_CGROUP_RECLAIM_RETRIES	5
B
Balbir Singh 已提交
81

82 83 84
/* Socket memory accounting disabled? */
static bool cgroup_memory_nosocket;

85 86 87
/* Kernel memory accounting disabled? */
static bool cgroup_memory_nokmem;

88
/* Whether the swap controller is active */
A
Andrew Morton 已提交
89
#ifdef CONFIG_MEMCG_SWAP
90 91
int do_swap_account __read_mostly;
#else
92
#define do_swap_account		0
93 94
#endif

95 96 97 98 99 100
/* Whether legacy memory+swap accounting is active */
static bool do_memsw_account(void)
{
	return !cgroup_subsys_on_dfl(memory_cgrp_subsys) && do_swap_account;
}

101 102 103
static const char * const mem_cgroup_stat_names[] = {
	"cache",
	"rss",
104
	"rss_huge",
105
	"mapped_file",
106
	"dirty",
107
	"writeback",
108 109 110 111 112 113 114 115 116 117
	"swap",
};

static const char * const mem_cgroup_events_names[] = {
	"pgpgin",
	"pgpgout",
	"pgfault",
	"pgmajfault",
};

118 119 120 121 122 123 124 125
static const char * const mem_cgroup_lru_names[] = {
	"inactive_anon",
	"active_anon",
	"inactive_file",
	"active_file",
	"unevictable",
};

126 127 128
#define THRESHOLDS_EVENTS_TARGET 128
#define SOFTLIMIT_EVENTS_TARGET 1024
#define NUMAINFO_EVENTS_TARGET	1024
129

130 131 132 133 134
/*
 * Cgroups above their limits are maintained in a RB-Tree, independent of
 * their hierarchy representation
 */

135
struct mem_cgroup_tree_per_node {
136 137 138 139 140 141 142 143 144 145
	struct rb_root rb_root;
	spinlock_t lock;
};

struct mem_cgroup_tree {
	struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
};

static struct mem_cgroup_tree soft_limit_tree __read_mostly;

K
KAMEZAWA Hiroyuki 已提交
146 147 148 149 150
/* for OOM */
struct mem_cgroup_eventfd_list {
	struct list_head list;
	struct eventfd_ctx *eventfd;
};
151

152 153 154
/*
 * cgroup_event represents events which userspace want to receive.
 */
155
struct mem_cgroup_event {
156
	/*
157
	 * memcg which the event belongs to.
158
	 */
159
	struct mem_cgroup *memcg;
160 161 162 163 164 165 166 167
	/*
	 * eventfd to signal userspace about the event.
	 */
	struct eventfd_ctx *eventfd;
	/*
	 * Each of these stored in a list by the cgroup.
	 */
	struct list_head list;
168 169 170 171 172
	/*
	 * register_event() callback will be used to add new userspace
	 * waiter for changes related to this event.  Use eventfd_signal()
	 * on eventfd to send notification to userspace.
	 */
173
	int (*register_event)(struct mem_cgroup *memcg,
T
Tejun Heo 已提交
174
			      struct eventfd_ctx *eventfd, const char *args);
175 176 177 178 179
	/*
	 * unregister_event() callback will be called when userspace closes
	 * the eventfd or on cgroup removing.  This callback must be set,
	 * if you want provide notification functionality.
	 */
180
	void (*unregister_event)(struct mem_cgroup *memcg,
181
				 struct eventfd_ctx *eventfd);
182 183 184 185 186 187 188 189 190 191
	/*
	 * All fields below needed to unregister event when
	 * userspace closes eventfd.
	 */
	poll_table pt;
	wait_queue_head_t *wqh;
	wait_queue_t wait;
	struct work_struct remove;
};

192 193
static void mem_cgroup_threshold(struct mem_cgroup *memcg);
static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
194

195 196
/* Stuffs for move charges at task migration. */
/*
197
 * Types of charges to be moved.
198
 */
199 200 201
#define MOVE_ANON	0x1U
#define MOVE_FILE	0x2U
#define MOVE_MASK	(MOVE_ANON | MOVE_FILE)
202

203 204
/* "mc" and its members are protected by cgroup_mutex */
static struct move_charge_struct {
205
	spinlock_t	  lock; /* for from, to */
206
	struct mm_struct  *mm;
207 208
	struct mem_cgroup *from;
	struct mem_cgroup *to;
209
	unsigned long flags;
210
	unsigned long precharge;
211
	unsigned long moved_charge;
212
	unsigned long moved_swap;
213 214 215
	struct task_struct *moving_task;	/* a task moving charges */
	wait_queue_head_t waitq;		/* a waitq for other context */
} mc = {
216
	.lock = __SPIN_LOCK_UNLOCKED(mc.lock),
217 218
	.waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
};
219

220 221 222 223
/*
 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
 * limit reclaim to prevent infinite loops, if they ever occur.
 */
224
#define	MEM_CGROUP_MAX_RECLAIM_LOOPS		100
225
#define	MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS	2
226

227 228
enum charge_type {
	MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
229
	MEM_CGROUP_CHARGE_TYPE_ANON,
K
KAMEZAWA Hiroyuki 已提交
230
	MEM_CGROUP_CHARGE_TYPE_SWAPOUT,	/* for accounting swapcache */
K
KAMEZAWA Hiroyuki 已提交
231
	MEM_CGROUP_CHARGE_TYPE_DROP,	/* a page was unused swap cache */
232 233 234
	NR_CHARGE_TYPE,
};

235
/* for encoding cft->private value on file */
G
Glauber Costa 已提交
236 237 238 239
enum res_type {
	_MEM,
	_MEMSWAP,
	_OOM_TYPE,
240
	_KMEM,
V
Vladimir Davydov 已提交
241
	_TCP,
G
Glauber Costa 已提交
242 243
};

244 245
#define MEMFILE_PRIVATE(x, val)	((x) << 16 | (val))
#define MEMFILE_TYPE(val)	((val) >> 16 & 0xffff)
246
#define MEMFILE_ATTR(val)	((val) & 0xffff)
K
KAMEZAWA Hiroyuki 已提交
247 248
/* Used for OOM nofiier */
#define OOM_CONTROL		(0)
249

250 251 252 253 254 255 256 257 258 259 260 261 262
/* Some nice accessors for the vmpressure. */
struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
{
	if (!memcg)
		memcg = root_mem_cgroup;
	return &memcg->vmpressure;
}

struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr)
{
	return &container_of(vmpr, struct mem_cgroup, vmpressure)->css;
}

263 264 265 266 267
static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
{
	return (memcg == root_mem_cgroup);
}

268
#ifndef CONFIG_SLOB
269
/*
270
 * This will be the memcg's index in each cache's ->memcg_params.memcg_caches.
L
Li Zefan 已提交
271 272 273 274 275
 * The main reason for not using cgroup id for this:
 *  this works better in sparse environments, where we have a lot of memcgs,
 *  but only a few kmem-limited. Or also, if we have, for instance, 200
 *  memcgs, and none but the 200th is kmem-limited, we'd have to have a
 *  200 entry array for that.
276
 *
277 278
 * The current size of the caches array is stored in memcg_nr_cache_ids. It
 * will double each time we have to increase it.
279
 */
280 281
static DEFINE_IDA(memcg_cache_ida);
int memcg_nr_cache_ids;
282

283 284 285 286 287 288 289 290 291 292 293 294 295
/* Protects memcg_nr_cache_ids */
static DECLARE_RWSEM(memcg_cache_ids_sem);

void memcg_get_cache_ids(void)
{
	down_read(&memcg_cache_ids_sem);
}

void memcg_put_cache_ids(void)
{
	up_read(&memcg_cache_ids_sem);
}

296 297 298 299 300 301
/*
 * MIN_SIZE is different than 1, because we would like to avoid going through
 * the alloc/free process all the time. In a small machine, 4 kmem-limited
 * cgroups is a reasonable guess. In the future, it could be a parameter or
 * tunable, but that is strictly not necessary.
 *
L
Li Zefan 已提交
302
 * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get
303 304
 * this constant directly from cgroup, but it is understandable that this is
 * better kept as an internal representation in cgroup.c. In any case, the
L
Li Zefan 已提交
305
 * cgrp_id space is not getting any smaller, and we don't have to necessarily
306 307 308
 * increase ours as well if it increases.
 */
#define MEMCG_CACHES_MIN_SIZE 4
L
Li Zefan 已提交
309
#define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX
310

311 312 313 314 315 316
/*
 * A lot of the calls to the cache allocation functions are expected to be
 * inlined by the compiler. Since the calls to memcg_kmem_get_cache are
 * conditional to this static branch, we'll have to allow modules that does
 * kmem_cache_alloc and the such to see this symbol as well
 */
317
DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key);
318
EXPORT_SYMBOL(memcg_kmem_enabled_key);
319

320
#endif /* !CONFIG_SLOB */
321

322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338
/**
 * mem_cgroup_css_from_page - css of the memcg associated with a page
 * @page: page of interest
 *
 * If memcg is bound to the default hierarchy, css of the memcg associated
 * with @page is returned.  The returned css remains associated with @page
 * until it is released.
 *
 * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup
 * is returned.
 */
struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page)
{
	struct mem_cgroup *memcg;

	memcg = page->mem_cgroup;

339
	if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
340 341 342 343 344
		memcg = root_mem_cgroup;

	return &memcg->css;
}

345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372
/**
 * page_cgroup_ino - return inode number of the memcg a page is charged to
 * @page: the page
 *
 * Look up the closest online ancestor of the memory cgroup @page is charged to
 * and return its inode number or 0 if @page is not charged to any cgroup. It
 * is safe to call this function without holding a reference to @page.
 *
 * Note, this function is inherently racy, because there is nothing to prevent
 * the cgroup inode from getting torn down and potentially reallocated a moment
 * after page_cgroup_ino() returns, so it only should be used by callers that
 * do not care (such as procfs interfaces).
 */
ino_t page_cgroup_ino(struct page *page)
{
	struct mem_cgroup *memcg;
	unsigned long ino = 0;

	rcu_read_lock();
	memcg = READ_ONCE(page->mem_cgroup);
	while (memcg && !(memcg->css.flags & CSS_ONLINE))
		memcg = parent_mem_cgroup(memcg);
	if (memcg)
		ino = cgroup_ino(memcg->css.cgroup);
	rcu_read_unlock();
	return ino;
}

373 374
static struct mem_cgroup_per_node *
mem_cgroup_page_nodeinfo(struct mem_cgroup *memcg, struct page *page)
375
{
376
	int nid = page_to_nid(page);
377

378
	return memcg->nodeinfo[nid];
379 380
}

381 382
static struct mem_cgroup_tree_per_node *
soft_limit_tree_node(int nid)
383
{
384
	return soft_limit_tree.rb_tree_per_node[nid];
385 386
}

387
static struct mem_cgroup_tree_per_node *
388 389 390 391
soft_limit_tree_from_page(struct page *page)
{
	int nid = page_to_nid(page);

392
	return soft_limit_tree.rb_tree_per_node[nid];
393 394
}

395 396
static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_node *mz,
					 struct mem_cgroup_tree_per_node *mctz,
397
					 unsigned long new_usage_in_excess)
398 399 400
{
	struct rb_node **p = &mctz->rb_root.rb_node;
	struct rb_node *parent = NULL;
401
	struct mem_cgroup_per_node *mz_node;
402 403 404 405 406 407 408 409 410

	if (mz->on_tree)
		return;

	mz->usage_in_excess = new_usage_in_excess;
	if (!mz->usage_in_excess)
		return;
	while (*p) {
		parent = *p;
411
		mz_node = rb_entry(parent, struct mem_cgroup_per_node,
412 413 414 415 416 417 418 419 420 421 422 423 424 425 426
					tree_node);
		if (mz->usage_in_excess < mz_node->usage_in_excess)
			p = &(*p)->rb_left;
		/*
		 * We can't avoid mem cgroups that are over their soft
		 * limit by the same amount
		 */
		else if (mz->usage_in_excess >= mz_node->usage_in_excess)
			p = &(*p)->rb_right;
	}
	rb_link_node(&mz->tree_node, parent, p);
	rb_insert_color(&mz->tree_node, &mctz->rb_root);
	mz->on_tree = true;
}

427 428
static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
					 struct mem_cgroup_tree_per_node *mctz)
429 430 431 432 433 434 435
{
	if (!mz->on_tree)
		return;
	rb_erase(&mz->tree_node, &mctz->rb_root);
	mz->on_tree = false;
}

436 437
static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
				       struct mem_cgroup_tree_per_node *mctz)
438
{
439 440 441
	unsigned long flags;

	spin_lock_irqsave(&mctz->lock, flags);
442
	__mem_cgroup_remove_exceeded(mz, mctz);
443
	spin_unlock_irqrestore(&mctz->lock, flags);
444 445
}

446 447 448
static unsigned long soft_limit_excess(struct mem_cgroup *memcg)
{
	unsigned long nr_pages = page_counter_read(&memcg->memory);
449
	unsigned long soft_limit = READ_ONCE(memcg->soft_limit);
450 451 452 453 454 455 456
	unsigned long excess = 0;

	if (nr_pages > soft_limit)
		excess = nr_pages - soft_limit;

	return excess;
}
457 458 459

static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
{
460
	unsigned long excess;
461 462
	struct mem_cgroup_per_node *mz;
	struct mem_cgroup_tree_per_node *mctz;
463

464
	mctz = soft_limit_tree_from_page(page);
465 466 467 468 469
	/*
	 * Necessary to update all ancestors when hierarchy is used.
	 * because their event counter is not touched.
	 */
	for (; memcg; memcg = parent_mem_cgroup(memcg)) {
470
		mz = mem_cgroup_page_nodeinfo(memcg, page);
471
		excess = soft_limit_excess(memcg);
472 473 474 475 476
		/*
		 * We have to update the tree if mz is on RB-tree or
		 * mem is over its softlimit.
		 */
		if (excess || mz->on_tree) {
477 478 479
			unsigned long flags;

			spin_lock_irqsave(&mctz->lock, flags);
480 481
			/* if on-tree, remove it */
			if (mz->on_tree)
482
				__mem_cgroup_remove_exceeded(mz, mctz);
483 484 485 486
			/*
			 * Insert again. mz->usage_in_excess will be updated.
			 * If excess is 0, no tree ops.
			 */
487
			__mem_cgroup_insert_exceeded(mz, mctz, excess);
488
			spin_unlock_irqrestore(&mctz->lock, flags);
489 490 491 492 493 494
		}
	}
}

static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
{
495 496 497
	struct mem_cgroup_tree_per_node *mctz;
	struct mem_cgroup_per_node *mz;
	int nid;
498

499
	for_each_node(nid) {
500 501 502
		mz = mem_cgroup_nodeinfo(memcg, nid);
		mctz = soft_limit_tree_node(nid);
		mem_cgroup_remove_exceeded(mz, mctz);
503 504 505
	}
}

506 507
static struct mem_cgroup_per_node *
__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
508 509
{
	struct rb_node *rightmost = NULL;
510
	struct mem_cgroup_per_node *mz;
511 512 513 514 515 516 517

retry:
	mz = NULL;
	rightmost = rb_last(&mctz->rb_root);
	if (!rightmost)
		goto done;		/* Nothing to reclaim from */

518
	mz = rb_entry(rightmost, struct mem_cgroup_per_node, tree_node);
519 520 521 522 523
	/*
	 * Remove the node now but someone else can add it back,
	 * we will to add it back at the end of reclaim to its correct
	 * position in the tree.
	 */
524
	__mem_cgroup_remove_exceeded(mz, mctz);
525
	if (!soft_limit_excess(mz->memcg) ||
526
	    !css_tryget_online(&mz->memcg->css))
527 528 529 530 531
		goto retry;
done:
	return mz;
}

532 533
static struct mem_cgroup_per_node *
mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
534
{
535
	struct mem_cgroup_per_node *mz;
536

537
	spin_lock_irq(&mctz->lock);
538
	mz = __mem_cgroup_largest_soft_limit_node(mctz);
539
	spin_unlock_irq(&mctz->lock);
540 541 542
	return mz;
}

543
/*
544 545
 * Return page count for single (non recursive) @memcg.
 *
546 547 548 549 550
 * Implementation Note: reading percpu statistics for memcg.
 *
 * Both of vmstat[] and percpu_counter has threshold and do periodic
 * synchronization to implement "quick" read. There are trade-off between
 * reading cost and precision of value. Then, we may have a chance to implement
551
 * a periodic synchronization of counter in memcg's counter.
552 553 554 555 556 557 558 559 560
 *
 * But this _read() function is used for user interface now. The user accounts
 * memory usage by memory cgroup and he _always_ requires exact value because
 * he accounts memory. Even if we provide quick-and-fuzzy read, we always
 * have to visit all online cpus and make sum. So, for now, unnecessary
 * synchronization is not implemented. (just implemented for cpu hotplug)
 *
 * If there are kernel internal actions which can make use of some not-exact
 * value, and reading all cpu value can be performance bottleneck in some
561
 * common workload, threshold and synchronization as vmstat[] should be
562 563
 * implemented.
 */
564 565
static unsigned long
mem_cgroup_read_stat(struct mem_cgroup *memcg, enum mem_cgroup_stat_index idx)
566
{
567
	long val = 0;
568 569
	int cpu;

570
	/* Per-cpu values can be negative, use a signed accumulator */
571
	for_each_possible_cpu(cpu)
572
		val += per_cpu(memcg->stat->count[idx], cpu);
573 574 575 576 577 578
	/*
	 * Summing races with updates, so val may be negative.  Avoid exposing
	 * transient negative values.
	 */
	if (val < 0)
		val = 0;
579 580 581
	return val;
}

582
static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg,
583 584 585 586 587
					    enum mem_cgroup_events_index idx)
{
	unsigned long val = 0;
	int cpu;

588
	for_each_possible_cpu(cpu)
589
		val += per_cpu(memcg->stat->events[idx], cpu);
590 591 592
	return val;
}

593
static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
594
					 struct page *page,
595
					 bool compound, int nr_pages)
596
{
597 598 599 600
	/*
	 * Here, RSS means 'mapped anon' and anon's SwapCache. Shmem/tmpfs is
	 * counted as CACHE even if it's on ANON LRU.
	 */
601
	if (PageAnon(page))
602
		__this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS],
603
				nr_pages);
604
	else
605
		__this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE],
606
				nr_pages);
607

608 609
	if (compound) {
		VM_BUG_ON_PAGE(!PageTransHuge(page), page);
610 611
		__this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE],
				nr_pages);
612
	}
613

614 615
	/* pagein of a big page is an event. So, ignore page size */
	if (nr_pages > 0)
616
		__this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGIN]);
617
	else {
618
		__this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]);
619 620
		nr_pages = -nr_pages; /* for event */
	}
621

622
	__this_cpu_add(memcg->stat->nr_page_events, nr_pages);
623 624
}

625 626
unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
					   int nid, unsigned int lru_mask)
627
{
628
	struct lruvec *lruvec = mem_cgroup_lruvec(NODE_DATA(nid), memcg);
629
	unsigned long nr = 0;
630
	enum lru_list lru;
631

632
	VM_BUG_ON((unsigned)nid >= nr_node_ids);
633

634 635 636
	for_each_lru(lru) {
		if (!(BIT(lru) & lru_mask))
			continue;
637
		nr += mem_cgroup_get_lru_size(lruvec, lru);
638 639
	}
	return nr;
640
}
641

642
static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
643
			unsigned int lru_mask)
644
{
645
	unsigned long nr = 0;
646
	int nid;
647

648
	for_each_node_state(nid, N_MEMORY)
649 650
		nr += mem_cgroup_node_nr_lru_pages(memcg, nid, lru_mask);
	return nr;
651 652
}

653 654
static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
				       enum mem_cgroup_events_target target)
655 656 657
{
	unsigned long val, next;

658
	val = __this_cpu_read(memcg->stat->nr_page_events);
659
	next = __this_cpu_read(memcg->stat->targets[target]);
660
	/* from time_after() in jiffies.h */
661 662 663 664 665
	if ((long)next - (long)val < 0) {
		switch (target) {
		case MEM_CGROUP_TARGET_THRESH:
			next = val + THRESHOLDS_EVENTS_TARGET;
			break;
666 667 668
		case MEM_CGROUP_TARGET_SOFTLIMIT:
			next = val + SOFTLIMIT_EVENTS_TARGET;
			break;
669 670 671 672 673 674 675 676
		case MEM_CGROUP_TARGET_NUMAINFO:
			next = val + NUMAINFO_EVENTS_TARGET;
			break;
		default:
			break;
		}
		__this_cpu_write(memcg->stat->targets[target], next);
		return true;
677
	}
678
	return false;
679 680 681 682 683 684
}

/*
 * Check events in order.
 *
 */
685
static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
686 687
{
	/* threshold event is triggered in finer grain than soft limit */
688 689
	if (unlikely(mem_cgroup_event_ratelimit(memcg,
						MEM_CGROUP_TARGET_THRESH))) {
690
		bool do_softlimit;
691
		bool do_numainfo __maybe_unused;
692

693 694
		do_softlimit = mem_cgroup_event_ratelimit(memcg,
						MEM_CGROUP_TARGET_SOFTLIMIT);
695 696 697 698
#if MAX_NUMNODES > 1
		do_numainfo = mem_cgroup_event_ratelimit(memcg,
						MEM_CGROUP_TARGET_NUMAINFO);
#endif
699
		mem_cgroup_threshold(memcg);
700 701
		if (unlikely(do_softlimit))
			mem_cgroup_update_tree(memcg, page);
702
#if MAX_NUMNODES > 1
703
		if (unlikely(do_numainfo))
704
			atomic_inc(&memcg->numainfo_events);
705
#endif
706
	}
707 708
}

709
struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
710
{
711 712 713 714 715 716 717 718
	/*
	 * mm_update_next_owner() may clear mm->owner to NULL
	 * if it races with swapoff, page migration, etc.
	 * So this can be called with p == NULL.
	 */
	if (unlikely(!p))
		return NULL;

719
	return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
720
}
M
Michal Hocko 已提交
721
EXPORT_SYMBOL(mem_cgroup_from_task);
722

723
static struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
724
{
725
	struct mem_cgroup *memcg = NULL;
726

727 728
	rcu_read_lock();
	do {
729 730 731 732 733 734
		/*
		 * Page cache insertions can happen withou an
		 * actual mm context, e.g. during disk probing
		 * on boot, loopback IO, acct() writes etc.
		 */
		if (unlikely(!mm))
735
			memcg = root_mem_cgroup;
736 737 738 739 740
		else {
			memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
			if (unlikely(!memcg))
				memcg = root_mem_cgroup;
		}
741
	} while (!css_tryget_online(&memcg->css));
742
	rcu_read_unlock();
743
	return memcg;
744 745
}

746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762
/**
 * mem_cgroup_iter - iterate over memory cgroup hierarchy
 * @root: hierarchy root
 * @prev: previously returned memcg, NULL on first invocation
 * @reclaim: cookie for shared reclaim walks, NULL for full walks
 *
 * Returns references to children of the hierarchy below @root, or
 * @root itself, or %NULL after a full round-trip.
 *
 * Caller must pass the return value in @prev on subsequent
 * invocations for reference counting, or use mem_cgroup_iter_break()
 * to cancel a hierarchy walk before the round-trip is complete.
 *
 * Reclaimers can specify a zone and a priority level in @reclaim to
 * divide up the memcgs in the hierarchy among all concurrent
 * reclaimers operating on the same zone and priority.
 */
763
struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
764
				   struct mem_cgroup *prev,
765
				   struct mem_cgroup_reclaim_cookie *reclaim)
K
KAMEZAWA Hiroyuki 已提交
766
{
M
Michal Hocko 已提交
767
	struct mem_cgroup_reclaim_iter *uninitialized_var(iter);
768
	struct cgroup_subsys_state *css = NULL;
769
	struct mem_cgroup *memcg = NULL;
770
	struct mem_cgroup *pos = NULL;
771

772 773
	if (mem_cgroup_disabled())
		return NULL;
774

775 776
	if (!root)
		root = root_mem_cgroup;
K
KAMEZAWA Hiroyuki 已提交
777

778
	if (prev && !reclaim)
779
		pos = prev;
K
KAMEZAWA Hiroyuki 已提交
780

781 782
	if (!root->use_hierarchy && root != root_mem_cgroup) {
		if (prev)
783
			goto out;
784
		return root;
785
	}
K
KAMEZAWA Hiroyuki 已提交
786

787
	rcu_read_lock();
M
Michal Hocko 已提交
788

789
	if (reclaim) {
790
		struct mem_cgroup_per_node *mz;
791

792
		mz = mem_cgroup_nodeinfo(root, reclaim->pgdat->node_id);
793 794 795 796 797
		iter = &mz->iter[reclaim->priority];

		if (prev && reclaim->generation != iter->generation)
			goto out_unlock;

798
		while (1) {
799
			pos = READ_ONCE(iter->position);
800 801
			if (!pos || css_tryget(&pos->css))
				break;
802
			/*
803 804 805 806 807 808
			 * css reference reached zero, so iter->position will
			 * be cleared by ->css_released. However, we should not
			 * rely on this happening soon, because ->css_released
			 * is called from a work queue, and by busy-waiting we
			 * might block it. So we clear iter->position right
			 * away.
809
			 */
810 811
			(void)cmpxchg(&iter->position, pos, NULL);
		}
812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828
	}

	if (pos)
		css = &pos->css;

	for (;;) {
		css = css_next_descendant_pre(css, &root->css);
		if (!css) {
			/*
			 * Reclaimers share the hierarchy walk, and a
			 * new one might jump in right at the end of
			 * the hierarchy - make sure they see at least
			 * one group and restart from the beginning.
			 */
			if (!prev)
				continue;
			break;
829
		}
K
KAMEZAWA Hiroyuki 已提交
830

831 832 833 834 835 836
		/*
		 * Verify the css and acquire a reference.  The root
		 * is provided by the caller, so we know it's alive
		 * and kicking, and don't take an extra reference.
		 */
		memcg = mem_cgroup_from_css(css);
K
KAMEZAWA Hiroyuki 已提交
837

838 839
		if (css == &root->css)
			break;
K
KAMEZAWA Hiroyuki 已提交
840

841 842
		if (css_tryget(css))
			break;
843

844
		memcg = NULL;
845
	}
846 847 848

	if (reclaim) {
		/*
849 850 851
		 * The position could have already been updated by a competing
		 * thread, so check that the value hasn't changed since we read
		 * it to avoid reclaiming from the same cgroup twice.
852
		 */
853 854
		(void)cmpxchg(&iter->position, pos, memcg);

855 856 857 858 859 860 861
		if (pos)
			css_put(&pos->css);

		if (!memcg)
			iter->generation++;
		else if (!prev)
			reclaim->generation = iter->generation;
862
	}
863

864 865
out_unlock:
	rcu_read_unlock();
866
out:
867 868 869
	if (prev && prev != root)
		css_put(&prev->css);

870
	return memcg;
K
KAMEZAWA Hiroyuki 已提交
871
}
K
KAMEZAWA Hiroyuki 已提交
872

873 874 875 876 877 878 879
/**
 * mem_cgroup_iter_break - abort a hierarchy walk prematurely
 * @root: hierarchy root
 * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
 */
void mem_cgroup_iter_break(struct mem_cgroup *root,
			   struct mem_cgroup *prev)
880 881 882 883 884 885
{
	if (!root)
		root = root_mem_cgroup;
	if (prev && prev != root)
		css_put(&prev->css);
}
K
KAMEZAWA Hiroyuki 已提交
886

887 888 889 890
static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
{
	struct mem_cgroup *memcg = dead_memcg;
	struct mem_cgroup_reclaim_iter *iter;
891 892
	struct mem_cgroup_per_node *mz;
	int nid;
893 894 895 896
	int i;

	while ((memcg = parent_mem_cgroup(memcg))) {
		for_each_node(nid) {
897 898 899 900 901
			mz = mem_cgroup_nodeinfo(memcg, nid);
			for (i = 0; i <= DEF_PRIORITY; i++) {
				iter = &mz->iter[i];
				cmpxchg(&iter->position,
					dead_memcg, NULL);
902 903 904 905 906
			}
		}
	}
}

907 908 909 910 911 912
/*
 * Iteration constructs for visiting all cgroups (under a tree).  If
 * loops are exited prematurely (break), mem_cgroup_iter_break() must
 * be used for reference counting.
 */
#define for_each_mem_cgroup_tree(iter, root)		\
913
	for (iter = mem_cgroup_iter(root, NULL, NULL);	\
914
	     iter != NULL;				\
915
	     iter = mem_cgroup_iter(root, iter, NULL))
916

917
#define for_each_mem_cgroup(iter)			\
918
	for (iter = mem_cgroup_iter(NULL, NULL, NULL);	\
919
	     iter != NULL;				\
920
	     iter = mem_cgroup_iter(NULL, iter, NULL))
K
KAMEZAWA Hiroyuki 已提交
921

922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958
/**
 * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
 * @memcg: hierarchy root
 * @fn: function to call for each task
 * @arg: argument passed to @fn
 *
 * This function iterates over tasks attached to @memcg or to any of its
 * descendants and calls @fn for each task. If @fn returns a non-zero
 * value, the function breaks the iteration loop and returns the value.
 * Otherwise, it will iterate over all tasks and return 0.
 *
 * This function must not be called for the root memory cgroup.
 */
int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
			  int (*fn)(struct task_struct *, void *), void *arg)
{
	struct mem_cgroup *iter;
	int ret = 0;

	BUG_ON(memcg == root_mem_cgroup);

	for_each_mem_cgroup_tree(iter, memcg) {
		struct css_task_iter it;
		struct task_struct *task;

		css_task_iter_start(&iter->css, &it);
		while (!ret && (task = css_task_iter_next(&it)))
			ret = fn(task, arg);
		css_task_iter_end(&it);
		if (ret) {
			mem_cgroup_iter_break(memcg, iter);
			break;
		}
	}
	return ret;
}

959
/**
960
 * mem_cgroup_page_lruvec - return lruvec for isolating/putting an LRU page
961
 * @page: the page
962
 * @zone: zone of the page
963 964 965 966
 *
 * This function is only safe when following the LRU page isolation
 * and putback protocol: the LRU lock must be held, and the page must
 * either be PageLRU() or the caller must have isolated/allocated it.
967
 */
M
Mel Gorman 已提交
968
struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct pglist_data *pgdat)
K
KAMEZAWA Hiroyuki 已提交
969
{
970
	struct mem_cgroup_per_node *mz;
971
	struct mem_cgroup *memcg;
972
	struct lruvec *lruvec;
973

974
	if (mem_cgroup_disabled()) {
M
Mel Gorman 已提交
975
		lruvec = &pgdat->lruvec;
976 977
		goto out;
	}
978

979
	memcg = page->mem_cgroup;
980
	/*
981
	 * Swapcache readahead pages are added to the LRU - and
982
	 * possibly migrated - before they are charged.
983
	 */
984 985
	if (!memcg)
		memcg = root_mem_cgroup;
986

987
	mz = mem_cgroup_page_nodeinfo(memcg, page);
988 989 990 991 992 993 994
	lruvec = &mz->lruvec;
out:
	/*
	 * Since a node can be onlined after the mem_cgroup was created,
	 * we have to be prepared to initialize lruvec->zone here;
	 * and if offlined then reonlined, we need to reinitialize it.
	 */
M
Mel Gorman 已提交
995 996
	if (unlikely(lruvec->pgdat != pgdat))
		lruvec->pgdat = pgdat;
997
	return lruvec;
K
KAMEZAWA Hiroyuki 已提交
998
}
999

1000
/**
1001 1002 1003
 * mem_cgroup_update_lru_size - account for adding or removing an lru page
 * @lruvec: mem_cgroup per zone lru vector
 * @lru: index of lru list the page is sitting on
1004
 * @zid: zone id of the accounted pages
1005
 * @nr_pages: positive when adding or negative when removing
1006
 *
1007 1008 1009
 * This function must be called under lru_lock, just before a page is added
 * to or just after a page is removed from an lru list (that ordering being
 * so as to allow it to check that lru_size 0 is consistent with list_empty).
1010
 */
1011
void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
1012
				int zid, int nr_pages)
1013
{
1014
	struct mem_cgroup_per_node *mz;
1015
	unsigned long *lru_size;
1016
	long size;
1017 1018 1019 1020

	if (mem_cgroup_disabled())
		return;

1021
	mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
1022
	lru_size = &mz->lru_zone_size[zid][lru];
1023 1024 1025 1026 1027

	if (nr_pages < 0)
		*lru_size += nr_pages;

	size = *lru_size;
1028 1029 1030
	if (WARN_ONCE(size < 0,
		"%s(%p, %d, %d): lru_size %ld\n",
		__func__, lruvec, lru, nr_pages, size)) {
1031 1032 1033 1034 1035 1036
		VM_BUG_ON(1);
		*lru_size = 0;
	}

	if (nr_pages > 0)
		*lru_size += nr_pages;
K
KAMEZAWA Hiroyuki 已提交
1037
}
1038

1039
bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg)
1040
{
1041
	struct mem_cgroup *task_memcg;
1042
	struct task_struct *p;
1043
	bool ret;
1044

1045
	p = find_lock_task_mm(task);
1046
	if (p) {
1047
		task_memcg = get_mem_cgroup_from_mm(p->mm);
1048 1049 1050 1051 1052 1053 1054
		task_unlock(p);
	} else {
		/*
		 * All threads may have already detached their mm's, but the oom
		 * killer still needs to detect if they have already been oom
		 * killed to prevent needlessly killing additional tasks.
		 */
1055
		rcu_read_lock();
1056 1057
		task_memcg = mem_cgroup_from_task(task);
		css_get(&task_memcg->css);
1058
		rcu_read_unlock();
1059
	}
1060 1061
	ret = mem_cgroup_is_descendant(task_memcg, memcg);
	css_put(&task_memcg->css);
1062 1063 1064
	return ret;
}

1065
/**
1066
 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
W
Wanpeng Li 已提交
1067
 * @memcg: the memory cgroup
1068
 *
1069
 * Returns the maximum amount of memory @mem can be charged with, in
1070
 * pages.
1071
 */
1072
static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1073
{
1074 1075 1076
	unsigned long margin = 0;
	unsigned long count;
	unsigned long limit;
1077

1078
	count = page_counter_read(&memcg->memory);
1079
	limit = READ_ONCE(memcg->memory.limit);
1080 1081 1082
	if (count < limit)
		margin = limit - count;

1083
	if (do_memsw_account()) {
1084
		count = page_counter_read(&memcg->memsw);
1085
		limit = READ_ONCE(memcg->memsw.limit);
1086 1087
		if (count <= limit)
			margin = min(margin, limit - count);
1088 1089
		else
			margin = 0;
1090 1091 1092
	}

	return margin;
1093 1094
}

1095
/*
Q
Qiang Huang 已提交
1096
 * A routine for checking "mem" is under move_account() or not.
1097
 *
Q
Qiang Huang 已提交
1098 1099 1100
 * Checking a cgroup is mc.from or mc.to or under hierarchy of
 * moving cgroups. This is for waiting at high-memory pressure
 * caused by "move".
1101
 */
1102
static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
1103
{
1104 1105
	struct mem_cgroup *from;
	struct mem_cgroup *to;
1106
	bool ret = false;
1107 1108 1109 1110 1111 1112 1113 1114 1115
	/*
	 * Unlike task_move routines, we access mc.to, mc.from not under
	 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
	 */
	spin_lock(&mc.lock);
	from = mc.from;
	to = mc.to;
	if (!from)
		goto unlock;
1116

1117 1118
	ret = mem_cgroup_is_descendant(from, memcg) ||
		mem_cgroup_is_descendant(to, memcg);
1119 1120
unlock:
	spin_unlock(&mc.lock);
1121 1122 1123
	return ret;
}

1124
static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
1125 1126
{
	if (mc.moving_task && current != mc.moving_task) {
1127
		if (mem_cgroup_under_move(memcg)) {
1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139
			DEFINE_WAIT(wait);
			prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
			/* moving charge context might have finished. */
			if (mc.moving_task)
				schedule();
			finish_wait(&mc.waitq, &wait);
			return true;
		}
	}
	return false;
}

1140
#define K(x) ((x) << (PAGE_SHIFT-10))
1141
/**
1142
 * mem_cgroup_print_oom_info: Print OOM information relevant to memory controller.
1143 1144 1145 1146 1147 1148 1149 1150
 * @memcg: The memory cgroup that went over limit
 * @p: Task that is going to be killed
 *
 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
 * enabled
 */
void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
{
1151 1152
	struct mem_cgroup *iter;
	unsigned int i;
1153 1154 1155

	rcu_read_lock();

1156 1157 1158 1159 1160 1161 1162 1163
	if (p) {
		pr_info("Task in ");
		pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
		pr_cont(" killed as a result of limit of ");
	} else {
		pr_info("Memory limit reached of cgroup ");
	}

T
Tejun Heo 已提交
1164
	pr_cont_cgroup_path(memcg->css.cgroup);
1165
	pr_cont("\n");
1166 1167 1168

	rcu_read_unlock();

1169 1170 1171 1172 1173 1174 1175 1176 1177
	pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
		K((u64)page_counter_read(&memcg->memory)),
		K((u64)memcg->memory.limit), memcg->memory.failcnt);
	pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
		K((u64)page_counter_read(&memcg->memsw)),
		K((u64)memcg->memsw.limit), memcg->memsw.failcnt);
	pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
		K((u64)page_counter_read(&memcg->kmem)),
		K((u64)memcg->kmem.limit), memcg->kmem.failcnt);
1178 1179

	for_each_mem_cgroup_tree(iter, memcg) {
T
Tejun Heo 已提交
1180 1181
		pr_info("Memory cgroup stats for ");
		pr_cont_cgroup_path(iter->css.cgroup);
1182 1183 1184
		pr_cont(":");

		for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
1185
			if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
1186
				continue;
1187
			pr_cont(" %s:%luKB", mem_cgroup_stat_names[i],
1188 1189 1190 1191 1192 1193 1194 1195 1196
				K(mem_cgroup_read_stat(iter, i)));
		}

		for (i = 0; i < NR_LRU_LISTS; i++)
			pr_cont(" %s:%luKB", mem_cgroup_lru_names[i],
				K(mem_cgroup_nr_lru_pages(iter, BIT(i))));

		pr_cont("\n");
	}
1197 1198
}

1199 1200 1201 1202
/*
 * This function returns the number of memcg under hierarchy tree. Returns
 * 1(self count) if no children.
 */
1203
static int mem_cgroup_count_children(struct mem_cgroup *memcg)
1204 1205
{
	int num = 0;
K
KAMEZAWA Hiroyuki 已提交
1206 1207
	struct mem_cgroup *iter;

1208
	for_each_mem_cgroup_tree(iter, memcg)
K
KAMEZAWA Hiroyuki 已提交
1209
		num++;
1210 1211 1212
	return num;
}

D
David Rientjes 已提交
1213 1214 1215
/*
 * Return the memory (and swap, if configured) limit for a memcg.
 */
1216
unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg)
D
David Rientjes 已提交
1217
{
1218
	unsigned long limit;
1219

1220
	limit = memcg->memory.limit;
1221
	if (mem_cgroup_swappiness(memcg)) {
1222
		unsigned long memsw_limit;
1223
		unsigned long swap_limit;
1224

1225
		memsw_limit = memcg->memsw.limit;
1226 1227 1228
		swap_limit = memcg->swap.limit;
		swap_limit = min(swap_limit, (unsigned long)total_swap_pages);
		limit = min(limit + swap_limit, memsw_limit);
1229 1230
	}
	return limit;
D
David Rientjes 已提交
1231 1232
}

1233
static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1234
				     int order)
1235
{
1236 1237 1238
	struct oom_control oc = {
		.zonelist = NULL,
		.nodemask = NULL,
1239
		.memcg = memcg,
1240 1241 1242
		.gfp_mask = gfp_mask,
		.order = order,
	};
1243
	bool ret;
1244

1245
	mutex_lock(&oom_lock);
1246
	ret = out_of_memory(&oc);
1247
	mutex_unlock(&oom_lock);
1248
	return ret;
1249 1250
}

1251 1252
#if MAX_NUMNODES > 1

1253 1254
/**
 * test_mem_cgroup_node_reclaimable
W
Wanpeng Li 已提交
1255
 * @memcg: the target memcg
1256 1257 1258 1259 1260 1261 1262
 * @nid: the node ID to be checked.
 * @noswap : specify true here if the user wants flle only information.
 *
 * This function returns whether the specified memcg contains any
 * reclaimable pages on a node. Returns true if there are any reclaimable
 * pages in the node.
 */
1263
static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg,
1264 1265
		int nid, bool noswap)
{
1266
	if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_FILE))
1267 1268 1269
		return true;
	if (noswap || !total_swap_pages)
		return false;
1270
	if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_ANON))
1271 1272 1273 1274
		return true;
	return false;

}
1275 1276 1277 1278 1279 1280 1281

/*
 * Always updating the nodemask is not very good - even if we have an empty
 * list or the wrong list here, we can start from some node and traverse all
 * nodes based on the zonelist. So update the list loosely once per 10 secs.
 *
 */
1282
static void mem_cgroup_may_update_nodemask(struct mem_cgroup *memcg)
1283 1284
{
	int nid;
1285 1286 1287 1288
	/*
	 * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET
	 * pagein/pageout changes since the last update.
	 */
1289
	if (!atomic_read(&memcg->numainfo_events))
1290
		return;
1291
	if (atomic_inc_return(&memcg->numainfo_updating) > 1)
1292 1293 1294
		return;

	/* make a nodemask where this memcg uses memory from */
1295
	memcg->scan_nodes = node_states[N_MEMORY];
1296

1297
	for_each_node_mask(nid, node_states[N_MEMORY]) {
1298

1299 1300
		if (!test_mem_cgroup_node_reclaimable(memcg, nid, false))
			node_clear(nid, memcg->scan_nodes);
1301
	}
1302

1303 1304
	atomic_set(&memcg->numainfo_events, 0);
	atomic_set(&memcg->numainfo_updating, 0);
1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318
}

/*
 * Selecting a node where we start reclaim from. Because what we need is just
 * reducing usage counter, start from anywhere is O,K. Considering
 * memory reclaim from current node, there are pros. and cons.
 *
 * Freeing memory from current node means freeing memory from a node which
 * we'll use or we've used. So, it may make LRU bad. And if several threads
 * hit limits, it will see a contention on a node. But freeing from remote
 * node means more costs for memory reclaim because of memory latency.
 *
 * Now, we use round-robin. Better algorithm is welcomed.
 */
1319
int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
1320 1321 1322
{
	int node;

1323 1324
	mem_cgroup_may_update_nodemask(memcg);
	node = memcg->last_scanned_node;
1325

1326
	node = next_node_in(node, memcg->scan_nodes);
1327
	/*
1328 1329 1330
	 * mem_cgroup_may_update_nodemask might have seen no reclaimmable pages
	 * last time it really checked all the LRUs due to rate limiting.
	 * Fallback to the current node in that case for simplicity.
1331 1332 1333 1334
	 */
	if (unlikely(node == MAX_NUMNODES))
		node = numa_node_id();

1335
	memcg->last_scanned_node = node;
1336 1337 1338
	return node;
}
#else
1339
int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
1340 1341 1342 1343 1344
{
	return 0;
}
#endif

1345
static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
1346
				   pg_data_t *pgdat,
1347 1348 1349 1350 1351 1352 1353 1354 1355
				   gfp_t gfp_mask,
				   unsigned long *total_scanned)
{
	struct mem_cgroup *victim = NULL;
	int total = 0;
	int loop = 0;
	unsigned long excess;
	unsigned long nr_scanned;
	struct mem_cgroup_reclaim_cookie reclaim = {
1356
		.pgdat = pgdat,
1357 1358 1359
		.priority = 0,
	};

1360
	excess = soft_limit_excess(root_memcg);
1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385

	while (1) {
		victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
		if (!victim) {
			loop++;
			if (loop >= 2) {
				/*
				 * If we have not been able to reclaim
				 * anything, it might because there are
				 * no reclaimable pages under this hierarchy
				 */
				if (!total)
					break;
				/*
				 * We want to do more targeted reclaim.
				 * excess >> 2 is not to excessive so as to
				 * reclaim too much, nor too less that we keep
				 * coming back to reclaim from this cgroup
				 */
				if (total >= (excess >> 2) ||
					(loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
					break;
			}
			continue;
		}
1386
		total += mem_cgroup_shrink_node(victim, gfp_mask, false,
1387
					pgdat, &nr_scanned);
1388
		*total_scanned += nr_scanned;
1389
		if (!soft_limit_excess(root_memcg))
1390
			break;
1391
	}
1392 1393
	mem_cgroup_iter_break(root_memcg, victim);
	return total;
1394 1395
}

1396 1397 1398 1399 1400 1401
#ifdef CONFIG_LOCKDEP
static struct lockdep_map memcg_oom_lock_dep_map = {
	.name = "memcg_oom_lock",
};
#endif

1402 1403
static DEFINE_SPINLOCK(memcg_oom_lock);

K
KAMEZAWA Hiroyuki 已提交
1404 1405 1406 1407
/*
 * Check OOM-Killer is already running under our hierarchy.
 * If someone is running, return false.
 */
1408
static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
K
KAMEZAWA Hiroyuki 已提交
1409
{
1410
	struct mem_cgroup *iter, *failed = NULL;
1411

1412 1413
	spin_lock(&memcg_oom_lock);

1414
	for_each_mem_cgroup_tree(iter, memcg) {
1415
		if (iter->oom_lock) {
1416 1417 1418 1419 1420
			/*
			 * this subtree of our hierarchy is already locked
			 * so we cannot give a lock.
			 */
			failed = iter;
1421 1422
			mem_cgroup_iter_break(memcg, iter);
			break;
1423 1424
		} else
			iter->oom_lock = true;
K
KAMEZAWA Hiroyuki 已提交
1425
	}
K
KAMEZAWA Hiroyuki 已提交
1426

1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437
	if (failed) {
		/*
		 * OK, we failed to lock the whole subtree so we have
		 * to clean up what we set up to the failing subtree
		 */
		for_each_mem_cgroup_tree(iter, memcg) {
			if (iter == failed) {
				mem_cgroup_iter_break(memcg, iter);
				break;
			}
			iter->oom_lock = false;
1438
		}
1439 1440
	} else
		mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
1441 1442 1443 1444

	spin_unlock(&memcg_oom_lock);

	return !failed;
1445
}
1446

1447
static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
1448
{
K
KAMEZAWA Hiroyuki 已提交
1449 1450
	struct mem_cgroup *iter;

1451
	spin_lock(&memcg_oom_lock);
1452
	mutex_release(&memcg_oom_lock_dep_map, 1, _RET_IP_);
1453
	for_each_mem_cgroup_tree(iter, memcg)
1454
		iter->oom_lock = false;
1455
	spin_unlock(&memcg_oom_lock);
1456 1457
}

1458
static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
1459 1460 1461
{
	struct mem_cgroup *iter;

1462
	spin_lock(&memcg_oom_lock);
1463
	for_each_mem_cgroup_tree(iter, memcg)
1464 1465
		iter->under_oom++;
	spin_unlock(&memcg_oom_lock);
1466 1467
}

1468
static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
1469 1470 1471
{
	struct mem_cgroup *iter;

K
KAMEZAWA Hiroyuki 已提交
1472 1473
	/*
	 * When a new child is created while the hierarchy is under oom,
1474
	 * mem_cgroup_oom_lock() may not be called. Watch for underflow.
K
KAMEZAWA Hiroyuki 已提交
1475
	 */
1476
	spin_lock(&memcg_oom_lock);
1477
	for_each_mem_cgroup_tree(iter, memcg)
1478 1479 1480
		if (iter->under_oom > 0)
			iter->under_oom--;
	spin_unlock(&memcg_oom_lock);
1481 1482
}

K
KAMEZAWA Hiroyuki 已提交
1483 1484
static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);

K
KAMEZAWA Hiroyuki 已提交
1485
struct oom_wait_info {
1486
	struct mem_cgroup *memcg;
K
KAMEZAWA Hiroyuki 已提交
1487 1488 1489 1490 1491 1492
	wait_queue_t	wait;
};

static int memcg_oom_wake_function(wait_queue_t *wait,
	unsigned mode, int sync, void *arg)
{
1493 1494
	struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
	struct mem_cgroup *oom_wait_memcg;
K
KAMEZAWA Hiroyuki 已提交
1495 1496 1497
	struct oom_wait_info *oom_wait_info;

	oom_wait_info = container_of(wait, struct oom_wait_info, wait);
1498
	oom_wait_memcg = oom_wait_info->memcg;
K
KAMEZAWA Hiroyuki 已提交
1499

1500 1501
	if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) &&
	    !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg))
K
KAMEZAWA Hiroyuki 已提交
1502 1503 1504 1505
		return 0;
	return autoremove_wake_function(wait, mode, sync, arg);
}

1506
static void memcg_oom_recover(struct mem_cgroup *memcg)
1507
{
1508 1509 1510 1511 1512 1513 1514 1515 1516
	/*
	 * For the following lockless ->under_oom test, the only required
	 * guarantee is that it must see the state asserted by an OOM when
	 * this function is called as a result of userland actions
	 * triggered by the notification of the OOM.  This is trivially
	 * achieved by invoking mem_cgroup_mark_under_oom() before
	 * triggering notification.
	 */
	if (memcg && memcg->under_oom)
1517
		__wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
1518 1519
}

1520
static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
1521
{
1522
	if (!current->memcg_may_oom)
1523
		return;
K
KAMEZAWA Hiroyuki 已提交
1524
	/*
1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536
	 * We are in the middle of the charge context here, so we
	 * don't want to block when potentially sitting on a callstack
	 * that holds all kinds of filesystem and mm locks.
	 *
	 * Also, the caller may handle a failed allocation gracefully
	 * (like optional page cache readahead) and so an OOM killer
	 * invocation might not even be necessary.
	 *
	 * That's why we don't do anything here except remember the
	 * OOM context and then deal with it at the end of the page
	 * fault when the stack is unwound, the locks are released,
	 * and when we know whether the fault was overall successful.
K
KAMEZAWA Hiroyuki 已提交
1537
	 */
1538
	css_get(&memcg->css);
T
Tejun Heo 已提交
1539 1540 1541
	current->memcg_in_oom = memcg;
	current->memcg_oom_gfp_mask = mask;
	current->memcg_oom_order = order;
1542 1543 1544 1545
}

/**
 * mem_cgroup_oom_synchronize - complete memcg OOM handling
1546
 * @handle: actually kill/wait or just clean up the OOM state
1547
 *
1548 1549
 * This has to be called at the end of a page fault if the memcg OOM
 * handler was enabled.
1550
 *
1551
 * Memcg supports userspace OOM handling where failed allocations must
1552 1553 1554 1555
 * sleep on a waitqueue until the userspace task resolves the
 * situation.  Sleeping directly in the charge context with all kinds
 * of locks held is not a good idea, instead we remember an OOM state
 * in the task and mem_cgroup_oom_synchronize() has to be called at
1556
 * the end of the page fault to complete the OOM handling.
1557 1558
 *
 * Returns %true if an ongoing memcg OOM situation was detected and
1559
 * completed, %false otherwise.
1560
 */
1561
bool mem_cgroup_oom_synchronize(bool handle)
1562
{
T
Tejun Heo 已提交
1563
	struct mem_cgroup *memcg = current->memcg_in_oom;
1564
	struct oom_wait_info owait;
1565
	bool locked;
1566 1567 1568

	/* OOM is global, do not handle */
	if (!memcg)
1569
		return false;
1570

1571
	if (!handle)
1572
		goto cleanup;
1573 1574 1575 1576 1577 1578

	owait.memcg = memcg;
	owait.wait.flags = 0;
	owait.wait.func = memcg_oom_wake_function;
	owait.wait.private = current;
	INIT_LIST_HEAD(&owait.wait.task_list);
K
KAMEZAWA Hiroyuki 已提交
1579

1580
	prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
1581 1582 1583 1584 1585 1586 1587 1588 1589 1590
	mem_cgroup_mark_under_oom(memcg);

	locked = mem_cgroup_oom_trylock(memcg);

	if (locked)
		mem_cgroup_oom_notify(memcg);

	if (locked && !memcg->oom_kill_disable) {
		mem_cgroup_unmark_under_oom(memcg);
		finish_wait(&memcg_oom_waitq, &owait.wait);
T
Tejun Heo 已提交
1591 1592
		mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask,
					 current->memcg_oom_order);
1593
	} else {
1594
		schedule();
1595 1596 1597 1598 1599
		mem_cgroup_unmark_under_oom(memcg);
		finish_wait(&memcg_oom_waitq, &owait.wait);
	}

	if (locked) {
1600 1601 1602 1603 1604 1605 1606 1607
		mem_cgroup_oom_unlock(memcg);
		/*
		 * There is no guarantee that an OOM-lock contender
		 * sees the wakeups triggered by the OOM kill
		 * uncharges.  Wake any sleepers explicitely.
		 */
		memcg_oom_recover(memcg);
	}
1608
cleanup:
T
Tejun Heo 已提交
1609
	current->memcg_in_oom = NULL;
1610
	css_put(&memcg->css);
K
KAMEZAWA Hiroyuki 已提交
1611
	return true;
1612 1613
}

1614
/**
1615 1616
 * lock_page_memcg - lock a page->mem_cgroup binding
 * @page: the page
1617
 *
1618 1619
 * This function protects unlocked LRU pages from being moved to
 * another cgroup and stabilizes their page->mem_cgroup binding.
1620
 */
J
Johannes Weiner 已提交
1621
void lock_page_memcg(struct page *page)
1622 1623
{
	struct mem_cgroup *memcg;
1624
	unsigned long flags;
1625

1626 1627 1628 1629 1630
	/*
	 * The RCU lock is held throughout the transaction.  The fast
	 * path can get away without acquiring the memcg->move_lock
	 * because page moving starts with an RCU grace period.
	 */
1631 1632 1633
	rcu_read_lock();

	if (mem_cgroup_disabled())
J
Johannes Weiner 已提交
1634
		return;
1635
again:
1636
	memcg = page->mem_cgroup;
1637
	if (unlikely(!memcg))
J
Johannes Weiner 已提交
1638
		return;
1639

Q
Qiang Huang 已提交
1640
	if (atomic_read(&memcg->moving_account) <= 0)
J
Johannes Weiner 已提交
1641
		return;
1642

1643
	spin_lock_irqsave(&memcg->move_lock, flags);
1644
	if (memcg != page->mem_cgroup) {
1645
		spin_unlock_irqrestore(&memcg->move_lock, flags);
1646 1647
		goto again;
	}
1648 1649 1650 1651

	/*
	 * When charge migration first begins, we can have locked and
	 * unlocked page stat updates happening concurrently.  Track
1652
	 * the task who has the lock for unlock_page_memcg().
1653 1654 1655
	 */
	memcg->move_lock_task = current;
	memcg->move_lock_flags = flags;
1656

J
Johannes Weiner 已提交
1657
	return;
1658
}
1659
EXPORT_SYMBOL(lock_page_memcg);
1660

1661
/**
1662
 * unlock_page_memcg - unlock a page->mem_cgroup binding
J
Johannes Weiner 已提交
1663
 * @page: the page
1664
 */
J
Johannes Weiner 已提交
1665
void unlock_page_memcg(struct page *page)
1666
{
J
Johannes Weiner 已提交
1667 1668
	struct mem_cgroup *memcg = page->mem_cgroup;

1669 1670 1671 1672 1673 1674 1675 1676
	if (memcg && memcg->move_lock_task == current) {
		unsigned long flags = memcg->move_lock_flags;

		memcg->move_lock_task = NULL;
		memcg->move_lock_flags = 0;

		spin_unlock_irqrestore(&memcg->move_lock, flags);
	}
1677

1678
	rcu_read_unlock();
1679
}
1680
EXPORT_SYMBOL(unlock_page_memcg);
1681

1682 1683 1684 1685
/*
 * size of first charge trial. "32" comes from vmscan.c's magic value.
 * TODO: maybe necessary to use big numbers in big irons.
 */
1686
#define CHARGE_BATCH	32U
1687 1688
struct memcg_stock_pcp {
	struct mem_cgroup *cached; /* this never be root cgroup */
1689
	unsigned int nr_pages;
1690
	struct work_struct work;
1691
	unsigned long flags;
1692
#define FLUSHING_CACHED_CHARGE	0
1693 1694
};
static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
1695
static DEFINE_MUTEX(percpu_charge_mutex);
1696

1697 1698 1699 1700 1701 1702 1703 1704 1705 1706
/**
 * consume_stock: Try to consume stocked charge on this cpu.
 * @memcg: memcg to consume from.
 * @nr_pages: how many pages to charge.
 *
 * The charges will only happen if @memcg matches the current cpu's memcg
 * stock, and at least @nr_pages are available in that stock.  Failure to
 * service an allocation will refill the stock.
 *
 * returns true if successful, false otherwise.
1707
 */
1708
static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
1709 1710
{
	struct memcg_stock_pcp *stock;
1711
	unsigned long flags;
1712
	bool ret = false;
1713

1714
	if (nr_pages > CHARGE_BATCH)
1715
		return ret;
1716

1717 1718 1719
	local_irq_save(flags);

	stock = this_cpu_ptr(&memcg_stock);
1720
	if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
1721
		stock->nr_pages -= nr_pages;
1722 1723
		ret = true;
	}
1724 1725 1726

	local_irq_restore(flags);

1727 1728 1729 1730
	return ret;
}

/*
1731
 * Returns stocks cached in percpu and reset cached information.
1732 1733 1734 1735 1736
 */
static void drain_stock(struct memcg_stock_pcp *stock)
{
	struct mem_cgroup *old = stock->cached;

1737
	if (stock->nr_pages) {
1738
		page_counter_uncharge(&old->memory, stock->nr_pages);
1739
		if (do_memsw_account())
1740
			page_counter_uncharge(&old->memsw, stock->nr_pages);
1741
		css_put_many(&old->css, stock->nr_pages);
1742
		stock->nr_pages = 0;
1743 1744 1745 1746 1747 1748
	}
	stock->cached = NULL;
}

static void drain_local_stock(struct work_struct *dummy)
{
1749 1750 1751 1752 1753 1754
	struct memcg_stock_pcp *stock;
	unsigned long flags;

	local_irq_save(flags);

	stock = this_cpu_ptr(&memcg_stock);
1755
	drain_stock(stock);
1756
	clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
1757 1758

	local_irq_restore(flags);
1759 1760 1761
}

/*
1762
 * Cache charges(val) to local per_cpu area.
1763
 * This will be consumed by consume_stock() function, later.
1764
 */
1765
static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
1766
{
1767 1768 1769 1770
	struct memcg_stock_pcp *stock;
	unsigned long flags;

	local_irq_save(flags);
1771

1772
	stock = this_cpu_ptr(&memcg_stock);
1773
	if (stock->cached != memcg) { /* reset if necessary */
1774
		drain_stock(stock);
1775
		stock->cached = memcg;
1776
	}
1777
	stock->nr_pages += nr_pages;
1778 1779

	local_irq_restore(flags);
1780 1781 1782
}

/*
1783
 * Drains all per-CPU charge caches for given root_memcg resp. subtree
1784
 * of the hierarchy under it.
1785
 */
1786
static void drain_all_stock(struct mem_cgroup *root_memcg)
1787
{
1788
	int cpu, curcpu;
1789

1790 1791 1792
	/* If someone's already draining, avoid adding running more workers. */
	if (!mutex_trylock(&percpu_charge_mutex))
		return;
1793 1794
	/* Notify other cpus that system-wide "drain" is running */
	get_online_cpus();
1795
	curcpu = get_cpu();
1796 1797
	for_each_online_cpu(cpu) {
		struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
1798
		struct mem_cgroup *memcg;
1799

1800 1801
		memcg = stock->cached;
		if (!memcg || !stock->nr_pages)
1802
			continue;
1803
		if (!mem_cgroup_is_descendant(memcg, root_memcg))
1804
			continue;
1805 1806 1807 1808 1809 1810
		if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
			if (cpu == curcpu)
				drain_local_stock(&stock->work);
			else
				schedule_work_on(cpu, &stock->work);
		}
1811
	}
1812
	put_cpu();
A
Andrew Morton 已提交
1813
	put_online_cpus();
1814
	mutex_unlock(&percpu_charge_mutex);
1815 1816
}

1817
static int memcg_hotplug_cpu_dead(unsigned int cpu)
1818 1819 1820 1821 1822
{
	struct memcg_stock_pcp *stock;

	stock = &per_cpu(memcg_stock, cpu);
	drain_stock(stock);
1823
	return 0;
1824 1825
}

1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845
static void reclaim_high(struct mem_cgroup *memcg,
			 unsigned int nr_pages,
			 gfp_t gfp_mask)
{
	do {
		if (page_counter_read(&memcg->memory) <= memcg->high)
			continue;
		mem_cgroup_events(memcg, MEMCG_HIGH, 1);
		try_to_free_mem_cgroup_pages(memcg, nr_pages, gfp_mask, true);
	} while ((memcg = parent_mem_cgroup(memcg)));
}

static void high_work_func(struct work_struct *work)
{
	struct mem_cgroup *memcg;

	memcg = container_of(work, struct mem_cgroup, high_work);
	reclaim_high(memcg, CHARGE_BATCH, GFP_KERNEL);
}

1846 1847 1848 1849 1850 1851 1852
/*
 * Scheduled by try_charge() to be executed from the userland return path
 * and reclaims memory over the high limit.
 */
void mem_cgroup_handle_over_high(void)
{
	unsigned int nr_pages = current->memcg_nr_pages_over_high;
1853
	struct mem_cgroup *memcg;
1854 1855 1856 1857

	if (likely(!nr_pages))
		return;

1858 1859
	memcg = get_mem_cgroup_from_mm(current->mm);
	reclaim_high(memcg, nr_pages, GFP_KERNEL);
1860 1861 1862 1863
	css_put(&memcg->css);
	current->memcg_nr_pages_over_high = 0;
}

1864 1865
static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
		      unsigned int nr_pages)
1866
{
1867
	unsigned int batch = max(CHARGE_BATCH, nr_pages);
1868
	int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
1869
	struct mem_cgroup *mem_over_limit;
1870
	struct page_counter *counter;
1871
	unsigned long nr_reclaimed;
1872 1873
	bool may_swap = true;
	bool drained = false;
1874

1875
	if (mem_cgroup_is_root(memcg))
1876
		return 0;
1877
retry:
1878
	if (consume_stock(memcg, nr_pages))
1879
		return 0;
1880

1881
	if (!do_memsw_account() ||
1882 1883
	    page_counter_try_charge(&memcg->memsw, batch, &counter)) {
		if (page_counter_try_charge(&memcg->memory, batch, &counter))
1884
			goto done_restock;
1885
		if (do_memsw_account())
1886 1887
			page_counter_uncharge(&memcg->memsw, batch);
		mem_over_limit = mem_cgroup_from_counter(counter, memory);
1888
	} else {
1889
		mem_over_limit = mem_cgroup_from_counter(counter, memsw);
1890
		may_swap = false;
1891
	}
1892

1893 1894 1895 1896
	if (batch > nr_pages) {
		batch = nr_pages;
		goto retry;
	}
1897

1898 1899 1900 1901 1902 1903 1904 1905 1906
	/*
	 * Unlike in global OOM situations, memcg is not in a physical
	 * memory shortage.  Allow dying and OOM-killed tasks to
	 * bypass the last charges so that they can exit quickly and
	 * free their memory.
	 */
	if (unlikely(test_thread_flag(TIF_MEMDIE) ||
		     fatal_signal_pending(current) ||
		     current->flags & PF_EXITING))
1907
		goto force;
1908

1909 1910 1911 1912 1913 1914 1915 1916 1917
	/*
	 * Prevent unbounded recursion when reclaim operations need to
	 * allocate memory. This might exceed the limits temporarily,
	 * but we prefer facilitating memory reclaim and getting back
	 * under the limit over triggering OOM kills in these cases.
	 */
	if (unlikely(current->flags & PF_MEMALLOC))
		goto force;

1918 1919 1920
	if (unlikely(task_in_memcg_oom(current)))
		goto nomem;

1921
	if (!gfpflags_allow_blocking(gfp_mask))
1922
		goto nomem;
1923

1924 1925
	mem_cgroup_events(mem_over_limit, MEMCG_MAX, 1);

1926 1927
	nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
						    gfp_mask, may_swap);
1928

1929
	if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
1930
		goto retry;
1931

1932
	if (!drained) {
1933
		drain_all_stock(mem_over_limit);
1934 1935 1936 1937
		drained = true;
		goto retry;
	}

1938 1939
	if (gfp_mask & __GFP_NORETRY)
		goto nomem;
1940 1941 1942 1943 1944 1945 1946 1947 1948
	/*
	 * Even though the limit is exceeded at this point, reclaim
	 * may have been able to free some pages.  Retry the charge
	 * before killing the task.
	 *
	 * Only for regular pages, though: huge pages are rather
	 * unlikely to succeed so close to the limit, and we fall back
	 * to regular pages anyway in case of failure.
	 */
1949
	if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
1950 1951 1952 1953 1954 1955 1956 1957
		goto retry;
	/*
	 * At task move, charge accounts can be doubly counted. So, it's
	 * better to wait until the end of task_move if something is going on.
	 */
	if (mem_cgroup_wait_acct_move(mem_over_limit))
		goto retry;

1958 1959 1960
	if (nr_retries--)
		goto retry;

1961
	if (gfp_mask & __GFP_NOFAIL)
1962
		goto force;
1963

1964
	if (fatal_signal_pending(current))
1965
		goto force;
1966

1967 1968
	mem_cgroup_events(mem_over_limit, MEMCG_OOM, 1);

1969 1970
	mem_cgroup_oom(mem_over_limit, gfp_mask,
		       get_order(nr_pages * PAGE_SIZE));
1971
nomem:
1972
	if (!(gfp_mask & __GFP_NOFAIL))
1973
		return -ENOMEM;
1974 1975 1976 1977 1978 1979 1980
force:
	/*
	 * The allocation either can't fail or will lead to more memory
	 * being freed very soon.  Allow memory usage go over the limit
	 * temporarily by force charging it.
	 */
	page_counter_charge(&memcg->memory, nr_pages);
1981
	if (do_memsw_account())
1982 1983 1984 1985
		page_counter_charge(&memcg->memsw, nr_pages);
	css_get_many(&memcg->css, nr_pages);

	return 0;
1986 1987

done_restock:
1988
	css_get_many(&memcg->css, batch);
1989 1990
	if (batch > nr_pages)
		refill_stock(memcg, batch - nr_pages);
1991

1992
	/*
1993 1994
	 * If the hierarchy is above the normal consumption range, schedule
	 * reclaim on returning to userland.  We can perform reclaim here
1995
	 * if __GFP_RECLAIM but let's always punt for simplicity and so that
1996 1997 1998 1999
	 * GFP_KERNEL can consistently be used during reclaim.  @memcg is
	 * not recorded as it most likely matches current's and won't
	 * change in the meantime.  As high limit is checked again before
	 * reclaim, the cost of mismatch is negligible.
2000 2001
	 */
	do {
2002
		if (page_counter_read(&memcg->memory) > memcg->high) {
2003 2004 2005 2006 2007
			/* Don't bother a random interrupted task */
			if (in_interrupt()) {
				schedule_work(&memcg->high_work);
				break;
			}
V
Vladimir Davydov 已提交
2008
			current->memcg_nr_pages_over_high += batch;
2009 2010 2011
			set_notify_resume(current);
			break;
		}
2012
	} while ((memcg = parent_mem_cgroup(memcg)));
2013 2014

	return 0;
2015
}
2016

2017
static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
2018
{
2019 2020 2021
	if (mem_cgroup_is_root(memcg))
		return;

2022
	page_counter_uncharge(&memcg->memory, nr_pages);
2023
	if (do_memsw_account())
2024
		page_counter_uncharge(&memcg->memsw, nr_pages);
2025

2026
	css_put_many(&memcg->css, nr_pages);
2027 2028
}

2029 2030 2031 2032
static void lock_page_lru(struct page *page, int *isolated)
{
	struct zone *zone = page_zone(page);

2033
	spin_lock_irq(zone_lru_lock(zone));
2034 2035 2036
	if (PageLRU(page)) {
		struct lruvec *lruvec;

M
Mel Gorman 已提交
2037
		lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat);
2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051
		ClearPageLRU(page);
		del_page_from_lru_list(page, lruvec, page_lru(page));
		*isolated = 1;
	} else
		*isolated = 0;
}

static void unlock_page_lru(struct page *page, int isolated)
{
	struct zone *zone = page_zone(page);

	if (isolated) {
		struct lruvec *lruvec;

M
Mel Gorman 已提交
2052
		lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat);
2053 2054 2055 2056
		VM_BUG_ON_PAGE(PageLRU(page), page);
		SetPageLRU(page);
		add_page_to_lru_list(page, lruvec, page_lru(page));
	}
2057
	spin_unlock_irq(zone_lru_lock(zone));
2058 2059
}

2060
static void commit_charge(struct page *page, struct mem_cgroup *memcg,
2061
			  bool lrucare)
2062
{
2063
	int isolated;
2064

2065
	VM_BUG_ON_PAGE(page->mem_cgroup, page);
2066 2067 2068 2069 2070

	/*
	 * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page
	 * may already be on some other mem_cgroup's LRU.  Take care of it.
	 */
2071 2072
	if (lrucare)
		lock_page_lru(page, &isolated);
2073

2074 2075
	/*
	 * Nobody should be changing or seriously looking at
2076
	 * page->mem_cgroup at this point:
2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087
	 *
	 * - the page is uncharged
	 *
	 * - the page is off-LRU
	 *
	 * - an anonymous fault has exclusive page access, except for
	 *   a locked page table
	 *
	 * - a page cache insertion, a swapin fault, or a migration
	 *   have the page locked
	 */
2088
	page->mem_cgroup = memcg;
2089

2090 2091
	if (lrucare)
		unlock_page_lru(page, isolated);
2092
}
2093

2094
#ifndef CONFIG_SLOB
2095
static int memcg_alloc_cache_id(void)
2096
{
2097 2098 2099
	int id, size;
	int err;

2100
	id = ida_simple_get(&memcg_cache_ida,
2101 2102 2103
			    0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL);
	if (id < 0)
		return id;
2104

2105
	if (id < memcg_nr_cache_ids)
2106 2107 2108 2109 2110 2111
		return id;

	/*
	 * There's no space for the new id in memcg_caches arrays,
	 * so we have to grow them.
	 */
2112
	down_write(&memcg_cache_ids_sem);
2113 2114

	size = 2 * (id + 1);
2115 2116 2117 2118 2119
	if (size < MEMCG_CACHES_MIN_SIZE)
		size = MEMCG_CACHES_MIN_SIZE;
	else if (size > MEMCG_CACHES_MAX_SIZE)
		size = MEMCG_CACHES_MAX_SIZE;

2120
	err = memcg_update_all_caches(size);
2121 2122
	if (!err)
		err = memcg_update_all_list_lrus(size);
2123 2124 2125 2126 2127
	if (!err)
		memcg_nr_cache_ids = size;

	up_write(&memcg_cache_ids_sem);

2128
	if (err) {
2129
		ida_simple_remove(&memcg_cache_ida, id);
2130 2131 2132 2133 2134 2135 2136
		return err;
	}
	return id;
}

static void memcg_free_cache_id(int id)
{
2137
	ida_simple_remove(&memcg_cache_ida, id);
2138 2139
}

2140
struct memcg_kmem_cache_create_work {
2141 2142 2143 2144 2145
	struct mem_cgroup *memcg;
	struct kmem_cache *cachep;
	struct work_struct work;
};

2146 2147
static struct workqueue_struct *memcg_kmem_cache_create_wq;

2148
static void memcg_kmem_cache_create_func(struct work_struct *w)
2149
{
2150 2151
	struct memcg_kmem_cache_create_work *cw =
		container_of(w, struct memcg_kmem_cache_create_work, work);
2152 2153
	struct mem_cgroup *memcg = cw->memcg;
	struct kmem_cache *cachep = cw->cachep;
2154

2155
	memcg_create_kmem_cache(memcg, cachep);
2156

2157
	css_put(&memcg->css);
2158 2159 2160 2161 2162 2163
	kfree(cw);
}

/*
 * Enqueue the creation of a per-memcg kmem_cache.
 */
2164 2165
static void __memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
					       struct kmem_cache *cachep)
2166
{
2167
	struct memcg_kmem_cache_create_work *cw;
2168

2169
	cw = kmalloc(sizeof(*cw), GFP_NOWAIT);
2170
	if (!cw)
2171
		return;
2172 2173

	css_get(&memcg->css);
2174 2175 2176

	cw->memcg = memcg;
	cw->cachep = cachep;
2177
	INIT_WORK(&cw->work, memcg_kmem_cache_create_func);
2178

2179
	queue_work(memcg_kmem_cache_create_wq, &cw->work);
2180 2181
}

2182 2183
static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
					     struct kmem_cache *cachep)
2184 2185 2186 2187
{
	/*
	 * We need to stop accounting when we kmalloc, because if the
	 * corresponding kmalloc cache is not yet created, the first allocation
2188
	 * in __memcg_schedule_kmem_cache_create will recurse.
2189 2190 2191 2192 2193 2194 2195
	 *
	 * However, it is better to enclose the whole function. Depending on
	 * the debugging options enabled, INIT_WORK(), for instance, can
	 * trigger an allocation. This too, will make us recurse. Because at
	 * this point we can't allow ourselves back into memcg_kmem_get_cache,
	 * the safest choice is to do it like this, wrapping the whole function.
	 */
2196
	current->memcg_kmem_skip_account = 1;
2197
	__memcg_schedule_kmem_cache_create(memcg, cachep);
2198
	current->memcg_kmem_skip_account = 0;
2199
}
2200

2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211
static inline bool memcg_kmem_bypass(void)
{
	if (in_interrupt() || !current->mm || (current->flags & PF_KTHREAD))
		return true;
	return false;
}

/**
 * memcg_kmem_get_cache: select the correct per-memcg cache for allocation
 * @cachep: the original global kmem cache
 *
2212 2213 2214
 * Return the kmem_cache we're supposed to use for a slab allocation.
 * We try to use the current memcg's version of the cache.
 *
2215 2216 2217
 * If the cache does not exist yet, if we are the first user of it, we
 * create it asynchronously in a workqueue and let the current allocation
 * go through with the original cache.
2218
 *
2219 2220 2221 2222
 * This function takes a reference to the cache it returns to assure it
 * won't get destroyed while we are working with it. Once the caller is
 * done with it, memcg_kmem_put_cache() must be called to release the
 * reference.
2223
 */
2224
struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep)
2225 2226
{
	struct mem_cgroup *memcg;
2227
	struct kmem_cache *memcg_cachep;
2228
	int kmemcg_id;
2229

2230
	VM_BUG_ON(!is_root_cache(cachep));
2231

2232
	if (memcg_kmem_bypass())
V
Vladimir Davydov 已提交
2233 2234
		return cachep;

2235
	if (current->memcg_kmem_skip_account)
2236 2237
		return cachep;

2238
	memcg = get_mem_cgroup_from_mm(current->mm);
2239
	kmemcg_id = READ_ONCE(memcg->kmemcg_id);
2240
	if (kmemcg_id < 0)
2241
		goto out;
2242

2243
	memcg_cachep = cache_from_memcg_idx(cachep, kmemcg_id);
2244 2245
	if (likely(memcg_cachep))
		return memcg_cachep;
2246 2247 2248 2249 2250 2251 2252 2253 2254

	/*
	 * If we are in a safe context (can wait, and not in interrupt
	 * context), we could be be predictable and return right away.
	 * This would guarantee that the allocation being performed
	 * already belongs in the new cache.
	 *
	 * However, there are some clashes that can arrive from locking.
	 * For instance, because we acquire the slab_mutex while doing
2255 2256 2257
	 * memcg_create_kmem_cache, this means no further allocation
	 * could happen with the slab_mutex held. So it's better to
	 * defer everything.
2258
	 */
2259
	memcg_schedule_kmem_cache_create(memcg, cachep);
2260
out:
2261
	css_put(&memcg->css);
2262
	return cachep;
2263 2264
}

2265 2266 2267 2268 2269
/**
 * memcg_kmem_put_cache: drop reference taken by memcg_kmem_get_cache
 * @cachep: the cache returned by memcg_kmem_get_cache
 */
void memcg_kmem_put_cache(struct kmem_cache *cachep)
2270 2271
{
	if (!is_root_cache(cachep))
2272
		css_put(&cachep->memcg_params.memcg->css);
2273 2274
}

2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285
/**
 * memcg_kmem_charge: charge a kmem page
 * @page: page to charge
 * @gfp: reclaim mode
 * @order: allocation order
 * @memcg: memory cgroup to charge
 *
 * Returns 0 on success, an error code on failure.
 */
int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
			    struct mem_cgroup *memcg)
2286
{
2287 2288
	unsigned int nr_pages = 1 << order;
	struct page_counter *counter;
2289 2290
	int ret;

2291
	ret = try_charge(memcg, gfp, nr_pages);
2292
	if (ret)
2293
		return ret;
2294 2295 2296 2297 2298

	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
	    !page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) {
		cancel_charge(memcg, nr_pages);
		return -ENOMEM;
2299 2300
	}

2301
	page->mem_cgroup = memcg;
2302

2303
	return 0;
2304 2305
}

2306 2307 2308 2309 2310 2311 2312 2313 2314
/**
 * memcg_kmem_charge: charge a kmem page to the current memory cgroup
 * @page: page to charge
 * @gfp: reclaim mode
 * @order: allocation order
 *
 * Returns 0 on success, an error code on failure.
 */
int memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
2315
{
2316
	struct mem_cgroup *memcg;
2317
	int ret = 0;
2318

2319 2320 2321
	if (memcg_kmem_bypass())
		return 0;

2322
	memcg = get_mem_cgroup_from_mm(current->mm);
2323
	if (!mem_cgroup_is_root(memcg)) {
2324
		ret = memcg_kmem_charge_memcg(page, gfp, order, memcg);
2325 2326 2327
		if (!ret)
			__SetPageKmemcg(page);
	}
2328
	css_put(&memcg->css);
2329
	return ret;
2330
}
2331 2332 2333 2334 2335 2336
/**
 * memcg_kmem_uncharge: uncharge a kmem page
 * @page: page to uncharge
 * @order: allocation order
 */
void memcg_kmem_uncharge(struct page *page, int order)
2337
{
2338
	struct mem_cgroup *memcg = page->mem_cgroup;
2339
	unsigned int nr_pages = 1 << order;
2340 2341 2342 2343

	if (!memcg)
		return;

2344
	VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page);
2345

2346 2347 2348
	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
		page_counter_uncharge(&memcg->kmem, nr_pages);

2349
	page_counter_uncharge(&memcg->memory, nr_pages);
2350
	if (do_memsw_account())
2351
		page_counter_uncharge(&memcg->memsw, nr_pages);
2352

2353
	page->mem_cgroup = NULL;
2354 2355 2356 2357 2358

	/* slab pages do not have PageKmemcg flag set */
	if (PageKmemcg(page))
		__ClearPageKmemcg(page);

2359
	css_put_many(&memcg->css, nr_pages);
2360
}
2361
#endif /* !CONFIG_SLOB */
2362

2363 2364 2365 2366
#ifdef CONFIG_TRANSPARENT_HUGEPAGE

/*
 * Because tail pages are not marked as "used", set it. We're under
2367
 * zone_lru_lock and migration entries setup in all page mappings.
2368
 */
2369
void mem_cgroup_split_huge_fixup(struct page *head)
2370
{
2371
	int i;
2372

2373 2374
	if (mem_cgroup_disabled())
		return;
2375

2376
	for (i = 1; i < HPAGE_PMD_NR; i++)
2377
		head[i].mem_cgroup = head->mem_cgroup;
2378

2379
	__this_cpu_sub(head->mem_cgroup->stat->count[MEM_CGROUP_STAT_RSS_HUGE],
2380
		       HPAGE_PMD_NR);
2381
}
2382
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
2383

A
Andrew Morton 已提交
2384
#ifdef CONFIG_MEMCG_SWAP
2385 2386
static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg,
					 bool charge)
K
KAMEZAWA Hiroyuki 已提交
2387
{
2388 2389
	int val = (charge) ? 1 : -1;
	this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_SWAP], val);
K
KAMEZAWA Hiroyuki 已提交
2390
}
2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402

/**
 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
 * @entry: swap entry to be moved
 * @from:  mem_cgroup which the entry is moved from
 * @to:  mem_cgroup which the entry is moved to
 *
 * It succeeds only when the swap_cgroup's record for this entry is the same
 * as the mem_cgroup's id of @from.
 *
 * Returns 0 on success, -EINVAL on failure.
 *
2403
 * The caller must have charged to @to, IOW, called page_counter_charge() about
2404 2405 2406
 * both res and memsw, and called css_get().
 */
static int mem_cgroup_move_swap_account(swp_entry_t entry,
2407
				struct mem_cgroup *from, struct mem_cgroup *to)
2408 2409 2410
{
	unsigned short old_id, new_id;

L
Li Zefan 已提交
2411 2412
	old_id = mem_cgroup_id(from);
	new_id = mem_cgroup_id(to);
2413 2414 2415

	if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
		mem_cgroup_swap_statistics(from, false);
2416
		mem_cgroup_swap_statistics(to, true);
2417 2418 2419 2420 2421 2422
		return 0;
	}
	return -EINVAL;
}
#else
static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
2423
				struct mem_cgroup *from, struct mem_cgroup *to)
2424 2425 2426
{
	return -EINVAL;
}
2427
#endif
K
KAMEZAWA Hiroyuki 已提交
2428

2429
static DEFINE_MUTEX(memcg_limit_mutex);
2430

2431
static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
2432
				   unsigned long limit)
2433
{
2434 2435 2436
	unsigned long curusage;
	unsigned long oldusage;
	bool enlarge = false;
2437
	int retry_count;
2438
	int ret;
2439 2440 2441 2442 2443 2444

	/*
	 * For keeping hierarchical_reclaim simple, how long we should retry
	 * is depends on callers. We set our retry-count to be function
	 * of # of children which we should visit in this loop.
	 */
2445 2446
	retry_count = MEM_CGROUP_RECLAIM_RETRIES *
		      mem_cgroup_count_children(memcg);
2447

2448
	oldusage = page_counter_read(&memcg->memory);
2449

2450
	do {
2451 2452 2453 2454
		if (signal_pending(current)) {
			ret = -EINTR;
			break;
		}
2455 2456 2457 2458

		mutex_lock(&memcg_limit_mutex);
		if (limit > memcg->memsw.limit) {
			mutex_unlock(&memcg_limit_mutex);
2459
			ret = -EINVAL;
2460 2461
			break;
		}
2462 2463 2464 2465
		if (limit > memcg->memory.limit)
			enlarge = true;
		ret = page_counter_limit(&memcg->memory, limit);
		mutex_unlock(&memcg_limit_mutex);
2466 2467 2468 2469

		if (!ret)
			break;

2470 2471
		try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, true);

2472
		curusage = page_counter_read(&memcg->memory);
2473
		/* Usage is reduced ? */
A
Andrew Morton 已提交
2474
		if (curusage >= oldusage)
2475 2476 2477
			retry_count--;
		else
			oldusage = curusage;
2478 2479
	} while (retry_count);

2480 2481
	if (!ret && enlarge)
		memcg_oom_recover(memcg);
2482

2483 2484 2485
	return ret;
}

L
Li Zefan 已提交
2486
static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
2487
					 unsigned long limit)
2488
{
2489 2490 2491
	unsigned long curusage;
	unsigned long oldusage;
	bool enlarge = false;
2492
	int retry_count;
2493
	int ret;
2494

2495
	/* see mem_cgroup_resize_res_limit */
2496 2497 2498 2499 2500 2501
	retry_count = MEM_CGROUP_RECLAIM_RETRIES *
		      mem_cgroup_count_children(memcg);

	oldusage = page_counter_read(&memcg->memsw);

	do {
2502 2503 2504 2505
		if (signal_pending(current)) {
			ret = -EINTR;
			break;
		}
2506 2507 2508 2509

		mutex_lock(&memcg_limit_mutex);
		if (limit < memcg->memory.limit) {
			mutex_unlock(&memcg_limit_mutex);
2510 2511 2512
			ret = -EINVAL;
			break;
		}
2513 2514 2515 2516
		if (limit > memcg->memsw.limit)
			enlarge = true;
		ret = page_counter_limit(&memcg->memsw, limit);
		mutex_unlock(&memcg_limit_mutex);
2517 2518 2519 2520

		if (!ret)
			break;

2521 2522
		try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, false);

2523
		curusage = page_counter_read(&memcg->memsw);
2524
		/* Usage is reduced ? */
2525
		if (curusage >= oldusage)
2526
			retry_count--;
2527 2528
		else
			oldusage = curusage;
2529 2530
	} while (retry_count);

2531 2532
	if (!ret && enlarge)
		memcg_oom_recover(memcg);
2533

2534 2535 2536
	return ret;
}

2537
unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
2538 2539 2540 2541
					    gfp_t gfp_mask,
					    unsigned long *total_scanned)
{
	unsigned long nr_reclaimed = 0;
2542
	struct mem_cgroup_per_node *mz, *next_mz = NULL;
2543 2544
	unsigned long reclaimed;
	int loop = 0;
2545
	struct mem_cgroup_tree_per_node *mctz;
2546
	unsigned long excess;
2547 2548 2549 2550 2551
	unsigned long nr_scanned;

	if (order > 0)
		return 0;

2552
	mctz = soft_limit_tree_node(pgdat->node_id);
2553 2554 2555 2556 2557 2558 2559 2560 2561

	/*
	 * Do not even bother to check the largest node if the root
	 * is empty. Do it lockless to prevent lock bouncing. Races
	 * are acceptable as soft limit is best effort anyway.
	 */
	if (RB_EMPTY_ROOT(&mctz->rb_root))
		return 0;

2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575
	/*
	 * This loop can run a while, specially if mem_cgroup's continuously
	 * keep exceeding their soft limit and putting the system under
	 * pressure
	 */
	do {
		if (next_mz)
			mz = next_mz;
		else
			mz = mem_cgroup_largest_soft_limit_node(mctz);
		if (!mz)
			break;

		nr_scanned = 0;
2576
		reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat,
2577 2578 2579
						    gfp_mask, &nr_scanned);
		nr_reclaimed += reclaimed;
		*total_scanned += nr_scanned;
2580
		spin_lock_irq(&mctz->lock);
2581
		__mem_cgroup_remove_exceeded(mz, mctz);
2582 2583 2584 2585 2586 2587

		/*
		 * If we failed to reclaim anything from this memory cgroup
		 * it is time to move on to the next cgroup
		 */
		next_mz = NULL;
2588 2589 2590
		if (!reclaimed)
			next_mz = __mem_cgroup_largest_soft_limit_node(mctz);

2591
		excess = soft_limit_excess(mz->memcg);
2592 2593 2594 2595 2596 2597 2598 2599 2600
		/*
		 * One school of thought says that we should not add
		 * back the node to the tree if reclaim returns 0.
		 * But our reclaim could return 0, simply because due
		 * to priority we are exposing a smaller subset of
		 * memory to reclaim from. Consider this as a longer
		 * term TODO.
		 */
		/* If excess == 0, no tree ops */
2601
		__mem_cgroup_insert_exceeded(mz, mctz, excess);
2602
		spin_unlock_irq(&mctz->lock);
2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619
		css_put(&mz->memcg->css);
		loop++;
		/*
		 * Could not reclaim anything and there are no more
		 * mem cgroups to try or we seem to be looping without
		 * reclaiming anything.
		 */
		if (!nr_reclaimed &&
			(next_mz == NULL ||
			loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
			break;
	} while (!nr_reclaimed);
	if (next_mz)
		css_put(&next_mz->memcg->css);
	return nr_reclaimed;
}

2620 2621 2622 2623 2624 2625
/*
 * Test whether @memcg has children, dead or alive.  Note that this
 * function doesn't care whether @memcg has use_hierarchy enabled and
 * returns %true if there are child csses according to the cgroup
 * hierarchy.  Testing use_hierarchy is the caller's responsiblity.
 */
2626 2627
static inline bool memcg_has_children(struct mem_cgroup *memcg)
{
2628 2629 2630 2631 2632 2633
	bool ret;

	rcu_read_lock();
	ret = css_next_child(NULL, &memcg->css);
	rcu_read_unlock();
	return ret;
2634 2635
}

2636
/*
2637
 * Reclaims as many pages from the given memcg as possible.
2638 2639 2640 2641 2642 2643 2644
 *
 * Caller is responsible for holding css reference for memcg.
 */
static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
{
	int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;

2645 2646
	/* we call try-to-free pages for make this cgroup empty */
	lru_add_drain_all();
2647
	/* try to free all pages in this cgroup */
2648
	while (nr_retries && page_counter_read(&memcg->memory)) {
2649
		int progress;
2650

2651 2652 2653
		if (signal_pending(current))
			return -EINTR;

2654 2655
		progress = try_to_free_mem_cgroup_pages(memcg, 1,
							GFP_KERNEL, true);
2656
		if (!progress) {
2657
			nr_retries--;
2658
			/* maybe some writeback is necessary */
2659
			congestion_wait(BLK_RW_ASYNC, HZ/10);
2660
		}
2661 2662

	}
2663 2664

	return 0;
2665 2666
}

2667 2668 2669
static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of,
					    char *buf, size_t nbytes,
					    loff_t off)
2670
{
2671
	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
2672

2673 2674
	if (mem_cgroup_is_root(memcg))
		return -EINVAL;
2675
	return mem_cgroup_force_empty(memcg) ?: nbytes;
2676 2677
}

2678 2679
static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
				     struct cftype *cft)
2680
{
2681
	return mem_cgroup_from_css(css)->use_hierarchy;
2682 2683
}

2684 2685
static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
				      struct cftype *cft, u64 val)
2686 2687
{
	int retval = 0;
2688
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
T
Tejun Heo 已提交
2689
	struct mem_cgroup *parent_memcg = mem_cgroup_from_css(memcg->css.parent);
2690

2691
	if (memcg->use_hierarchy == val)
2692
		return 0;
2693

2694
	/*
2695
	 * If parent's use_hierarchy is set, we can't make any modifications
2696 2697 2698 2699 2700 2701
	 * in the child subtrees. If it is unset, then the change can
	 * occur, provided the current cgroup has no children.
	 *
	 * For the root cgroup, parent_mem is NULL, we allow value to be
	 * set if there are no children.
	 */
2702
	if ((!parent_memcg || !parent_memcg->use_hierarchy) &&
2703
				(val == 1 || val == 0)) {
2704
		if (!memcg_has_children(memcg))
2705
			memcg->use_hierarchy = val;
2706 2707 2708 2709
		else
			retval = -EBUSY;
	} else
		retval = -EINVAL;
2710

2711 2712 2713
	return retval;
}

2714
static void tree_stat(struct mem_cgroup *memcg, unsigned long *stat)
2715 2716
{
	struct mem_cgroup *iter;
2717
	int i;
2718

2719
	memset(stat, 0, sizeof(*stat) * MEMCG_NR_STAT);
2720

2721 2722 2723 2724
	for_each_mem_cgroup_tree(iter, memcg) {
		for (i = 0; i < MEMCG_NR_STAT; i++)
			stat[i] += mem_cgroup_read_stat(iter, i);
	}
2725 2726
}

2727
static void tree_events(struct mem_cgroup *memcg, unsigned long *events)
2728 2729
{
	struct mem_cgroup *iter;
2730
	int i;
2731

2732
	memset(events, 0, sizeof(*events) * MEMCG_NR_EVENTS);
2733

2734 2735 2736 2737
	for_each_mem_cgroup_tree(iter, memcg) {
		for (i = 0; i < MEMCG_NR_EVENTS; i++)
			events[i] += mem_cgroup_read_events(iter, i);
	}
2738 2739
}

2740
static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
2741
{
2742
	unsigned long val = 0;
2743

2744
	if (mem_cgroup_is_root(memcg)) {
2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755
		struct mem_cgroup *iter;

		for_each_mem_cgroup_tree(iter, memcg) {
			val += mem_cgroup_read_stat(iter,
					MEM_CGROUP_STAT_CACHE);
			val += mem_cgroup_read_stat(iter,
					MEM_CGROUP_STAT_RSS);
			if (swap)
				val += mem_cgroup_read_stat(iter,
						MEM_CGROUP_STAT_SWAP);
		}
2756
	} else {
2757
		if (!swap)
2758
			val = page_counter_read(&memcg->memory);
2759
		else
2760
			val = page_counter_read(&memcg->memsw);
2761
	}
2762
	return val;
2763 2764
}

2765 2766 2767 2768 2769 2770 2771
enum {
	RES_USAGE,
	RES_LIMIT,
	RES_MAX_USAGE,
	RES_FAILCNT,
	RES_SOFT_LIMIT,
};
2772

2773
static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
2774
			       struct cftype *cft)
B
Balbir Singh 已提交
2775
{
2776
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
2777
	struct page_counter *counter;
2778

2779
	switch (MEMFILE_TYPE(cft->private)) {
2780
	case _MEM:
2781 2782
		counter = &memcg->memory;
		break;
2783
	case _MEMSWAP:
2784 2785
		counter = &memcg->memsw;
		break;
2786
	case _KMEM:
2787
		counter = &memcg->kmem;
2788
		break;
V
Vladimir Davydov 已提交
2789
	case _TCP:
2790
		counter = &memcg->tcpmem;
V
Vladimir Davydov 已提交
2791
		break;
2792 2793 2794
	default:
		BUG();
	}
2795 2796 2797 2798

	switch (MEMFILE_ATTR(cft->private)) {
	case RES_USAGE:
		if (counter == &memcg->memory)
2799
			return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE;
2800
		if (counter == &memcg->memsw)
2801
			return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE;
2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813
		return (u64)page_counter_read(counter) * PAGE_SIZE;
	case RES_LIMIT:
		return (u64)counter->limit * PAGE_SIZE;
	case RES_MAX_USAGE:
		return (u64)counter->watermark * PAGE_SIZE;
	case RES_FAILCNT:
		return counter->failcnt;
	case RES_SOFT_LIMIT:
		return (u64)memcg->soft_limit * PAGE_SIZE;
	default:
		BUG();
	}
B
Balbir Singh 已提交
2814
}
2815

2816
#ifndef CONFIG_SLOB
2817
static int memcg_online_kmem(struct mem_cgroup *memcg)
2818 2819 2820
{
	int memcg_id;

2821 2822 2823
	if (cgroup_memory_nokmem)
		return 0;

2824
	BUG_ON(memcg->kmemcg_id >= 0);
2825
	BUG_ON(memcg->kmem_state);
2826

2827
	memcg_id = memcg_alloc_cache_id();
2828 2829
	if (memcg_id < 0)
		return memcg_id;
2830

2831
	static_branch_inc(&memcg_kmem_enabled_key);
2832
	/*
2833
	 * A memory cgroup is considered kmem-online as soon as it gets
V
Vladimir Davydov 已提交
2834
	 * kmemcg_id. Setting the id after enabling static branching will
2835 2836 2837
	 * guarantee no one starts accounting before all call sites are
	 * patched.
	 */
V
Vladimir Davydov 已提交
2838
	memcg->kmemcg_id = memcg_id;
2839
	memcg->kmem_state = KMEM_ONLINE;
2840
	INIT_LIST_HEAD(&memcg->kmem_caches);
2841 2842

	return 0;
2843 2844
}

2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877
static void memcg_offline_kmem(struct mem_cgroup *memcg)
{
	struct cgroup_subsys_state *css;
	struct mem_cgroup *parent, *child;
	int kmemcg_id;

	if (memcg->kmem_state != KMEM_ONLINE)
		return;
	/*
	 * Clear the online state before clearing memcg_caches array
	 * entries. The slab_mutex in memcg_deactivate_kmem_caches()
	 * guarantees that no cache will be created for this cgroup
	 * after we are done (see memcg_create_kmem_cache()).
	 */
	memcg->kmem_state = KMEM_ALLOCATED;

	memcg_deactivate_kmem_caches(memcg);

	kmemcg_id = memcg->kmemcg_id;
	BUG_ON(kmemcg_id < 0);

	parent = parent_mem_cgroup(memcg);
	if (!parent)
		parent = root_mem_cgroup;

	/*
	 * Change kmemcg_id of this cgroup and all its descendants to the
	 * parent's id, and then move all entries from this cgroup's list_lrus
	 * to ones of the parent. After we have finished, all list_lrus
	 * corresponding to this cgroup are guaranteed to remain empty. The
	 * ordering is imposed by list_lru_node->lock taken by
	 * memcg_drain_all_list_lrus().
	 */
2878
	rcu_read_lock(); /* can be called from css_free w/o cgroup_mutex */
2879 2880 2881 2882 2883 2884 2885
	css_for_each_descendant_pre(css, &memcg->css) {
		child = mem_cgroup_from_css(css);
		BUG_ON(child->kmemcg_id != kmemcg_id);
		child->kmemcg_id = parent->kmemcg_id;
		if (!memcg->use_hierarchy)
			break;
	}
2886 2887
	rcu_read_unlock();

2888 2889 2890 2891 2892 2893 2894
	memcg_drain_all_list_lrus(kmemcg_id, parent->kmemcg_id);

	memcg_free_cache_id(kmemcg_id);
}

static void memcg_free_kmem(struct mem_cgroup *memcg)
{
2895 2896 2897 2898
	/* css_alloc() failed, offlining didn't happen */
	if (unlikely(memcg->kmem_state == KMEM_ONLINE))
		memcg_offline_kmem(memcg);

2899 2900 2901 2902 2903 2904
	if (memcg->kmem_state == KMEM_ALLOCATED) {
		memcg_destroy_kmem_caches(memcg);
		static_branch_dec(&memcg_kmem_enabled_key);
		WARN_ON(page_counter_read(&memcg->kmem));
	}
}
2905
#else
2906
static int memcg_online_kmem(struct mem_cgroup *memcg)
2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917
{
	return 0;
}
static void memcg_offline_kmem(struct mem_cgroup *memcg)
{
}
static void memcg_free_kmem(struct mem_cgroup *memcg)
{
}
#endif /* !CONFIG_SLOB */

2918
static int memcg_update_kmem_limit(struct mem_cgroup *memcg,
2919
				   unsigned long limit)
2920
{
2921
	int ret;
2922 2923 2924 2925 2926

	mutex_lock(&memcg_limit_mutex);
	ret = page_counter_limit(&memcg->kmem, limit);
	mutex_unlock(&memcg_limit_mutex);
	return ret;
2927
}
2928

V
Vladimir Davydov 已提交
2929 2930 2931 2932 2933 2934
static int memcg_update_tcp_limit(struct mem_cgroup *memcg, unsigned long limit)
{
	int ret;

	mutex_lock(&memcg_limit_mutex);

2935
	ret = page_counter_limit(&memcg->tcpmem, limit);
V
Vladimir Davydov 已提交
2936 2937 2938
	if (ret)
		goto out;

2939
	if (!memcg->tcpmem_active) {
V
Vladimir Davydov 已提交
2940 2941 2942
		/*
		 * The active flag needs to be written after the static_key
		 * update. This is what guarantees that the socket activation
2943 2944 2945
		 * function is the last one to run. See mem_cgroup_sk_alloc()
		 * for details, and note that we don't mark any socket as
		 * belonging to this memcg until that flag is up.
V
Vladimir Davydov 已提交
2946 2947 2948 2949 2950 2951
		 *
		 * We need to do this, because static_keys will span multiple
		 * sites, but we can't control their order. If we mark a socket
		 * as accounted, but the accounting functions are not patched in
		 * yet, we'll lose accounting.
		 *
2952
		 * We never race with the readers in mem_cgroup_sk_alloc(),
V
Vladimir Davydov 已提交
2953 2954 2955 2956
		 * because when this value change, the code to process it is not
		 * patched in yet.
		 */
		static_branch_inc(&memcg_sockets_enabled_key);
2957
		memcg->tcpmem_active = true;
V
Vladimir Davydov 已提交
2958 2959 2960 2961 2962 2963
	}
out:
	mutex_unlock(&memcg_limit_mutex);
	return ret;
}

2964 2965 2966 2967
/*
 * The user of this function is...
 * RES_LIMIT.
 */
2968 2969
static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
				char *buf, size_t nbytes, loff_t off)
B
Balbir Singh 已提交
2970
{
2971
	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
2972
	unsigned long nr_pages;
2973 2974
	int ret;

2975
	buf = strstrip(buf);
2976
	ret = page_counter_memparse(buf, "-1", &nr_pages);
2977 2978
	if (ret)
		return ret;
2979

2980
	switch (MEMFILE_ATTR(of_cft(of)->private)) {
2981
	case RES_LIMIT:
2982 2983 2984 2985
		if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
			ret = -EINVAL;
			break;
		}
2986 2987 2988
		switch (MEMFILE_TYPE(of_cft(of)->private)) {
		case _MEM:
			ret = mem_cgroup_resize_limit(memcg, nr_pages);
2989
			break;
2990 2991
		case _MEMSWAP:
			ret = mem_cgroup_resize_memsw_limit(memcg, nr_pages);
2992
			break;
2993 2994 2995
		case _KMEM:
			ret = memcg_update_kmem_limit(memcg, nr_pages);
			break;
V
Vladimir Davydov 已提交
2996 2997 2998
		case _TCP:
			ret = memcg_update_tcp_limit(memcg, nr_pages);
			break;
2999
		}
3000
		break;
3001 3002 3003
	case RES_SOFT_LIMIT:
		memcg->soft_limit = nr_pages;
		ret = 0;
3004 3005
		break;
	}
3006
	return ret ?: nbytes;
B
Balbir Singh 已提交
3007 3008
}

3009 3010
static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf,
				size_t nbytes, loff_t off)
3011
{
3012
	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3013
	struct page_counter *counter;
3014

3015 3016 3017 3018 3019 3020 3021 3022 3023 3024
	switch (MEMFILE_TYPE(of_cft(of)->private)) {
	case _MEM:
		counter = &memcg->memory;
		break;
	case _MEMSWAP:
		counter = &memcg->memsw;
		break;
	case _KMEM:
		counter = &memcg->kmem;
		break;
V
Vladimir Davydov 已提交
3025
	case _TCP:
3026
		counter = &memcg->tcpmem;
V
Vladimir Davydov 已提交
3027
		break;
3028 3029 3030
	default:
		BUG();
	}
3031

3032
	switch (MEMFILE_ATTR(of_cft(of)->private)) {
3033
	case RES_MAX_USAGE:
3034
		page_counter_reset_watermark(counter);
3035 3036
		break;
	case RES_FAILCNT:
3037
		counter->failcnt = 0;
3038
		break;
3039 3040
	default:
		BUG();
3041
	}
3042

3043
	return nbytes;
3044 3045
}

3046
static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
3047 3048
					struct cftype *cft)
{
3049
	return mem_cgroup_from_css(css)->move_charge_at_immigrate;
3050 3051
}

3052
#ifdef CONFIG_MMU
3053
static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3054 3055
					struct cftype *cft, u64 val)
{
3056
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3057

3058
	if (val & ~MOVE_MASK)
3059
		return -EINVAL;
3060

3061
	/*
3062 3063 3064 3065
	 * No kind of locking is needed in here, because ->can_attach() will
	 * check this value once in the beginning of the process, and then carry
	 * on with stale data. This means that changes to this value will only
	 * affect task migrations starting after the change.
3066
	 */
3067
	memcg->move_charge_at_immigrate = val;
3068 3069
	return 0;
}
3070
#else
3071
static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3072 3073 3074 3075 3076
					struct cftype *cft, u64 val)
{
	return -ENOSYS;
}
#endif
3077

3078
#ifdef CONFIG_NUMA
3079
static int memcg_numa_stat_show(struct seq_file *m, void *v)
3080
{
3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092
	struct numa_stat {
		const char *name;
		unsigned int lru_mask;
	};

	static const struct numa_stat stats[] = {
		{ "total", LRU_ALL },
		{ "file", LRU_ALL_FILE },
		{ "anon", LRU_ALL_ANON },
		{ "unevictable", BIT(LRU_UNEVICTABLE) },
	};
	const struct numa_stat *stat;
3093
	int nid;
3094
	unsigned long nr;
3095
	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
3096

3097 3098 3099 3100 3101 3102 3103 3104 3105
	for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
		nr = mem_cgroup_nr_lru_pages(memcg, stat->lru_mask);
		seq_printf(m, "%s=%lu", stat->name, nr);
		for_each_node_state(nid, N_MEMORY) {
			nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
							  stat->lru_mask);
			seq_printf(m, " N%d=%lu", nid, nr);
		}
		seq_putc(m, '\n');
3106 3107
	}

3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122
	for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
		struct mem_cgroup *iter;

		nr = 0;
		for_each_mem_cgroup_tree(iter, memcg)
			nr += mem_cgroup_nr_lru_pages(iter, stat->lru_mask);
		seq_printf(m, "hierarchical_%s=%lu", stat->name, nr);
		for_each_node_state(nid, N_MEMORY) {
			nr = 0;
			for_each_mem_cgroup_tree(iter, memcg)
				nr += mem_cgroup_node_nr_lru_pages(
					iter, nid, stat->lru_mask);
			seq_printf(m, " N%d=%lu", nid, nr);
		}
		seq_putc(m, '\n');
3123 3124 3125 3126 3127 3128
	}

	return 0;
}
#endif /* CONFIG_NUMA */

3129
static int memcg_stat_show(struct seq_file *m, void *v)
3130
{
3131
	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
3132
	unsigned long memory, memsw;
3133 3134
	struct mem_cgroup *mi;
	unsigned int i;
3135

3136 3137 3138 3139
	BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_stat_names) !=
		     MEM_CGROUP_STAT_NSTATS);
	BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_events_names) !=
		     MEM_CGROUP_EVENTS_NSTATS);
3140 3141
	BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS);

3142
	for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
3143
		if (i == MEM_CGROUP_STAT_SWAP && !do_memsw_account())
3144
			continue;
3145
		seq_printf(m, "%s %lu\n", mem_cgroup_stat_names[i],
3146
			   mem_cgroup_read_stat(memcg, i) * PAGE_SIZE);
3147
	}
L
Lee Schermerhorn 已提交
3148

3149 3150 3151 3152 3153 3154 3155 3156
	for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++)
		seq_printf(m, "%s %lu\n", mem_cgroup_events_names[i],
			   mem_cgroup_read_events(memcg, i));

	for (i = 0; i < NR_LRU_LISTS; i++)
		seq_printf(m, "%s %lu\n", mem_cgroup_lru_names[i],
			   mem_cgroup_nr_lru_pages(memcg, BIT(i)) * PAGE_SIZE);

K
KAMEZAWA Hiroyuki 已提交
3157
	/* Hierarchical information */
3158 3159 3160 3161
	memory = memsw = PAGE_COUNTER_MAX;
	for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) {
		memory = min(memory, mi->memory.limit);
		memsw = min(memsw, mi->memsw.limit);
3162
	}
3163 3164
	seq_printf(m, "hierarchical_memory_limit %llu\n",
		   (u64)memory * PAGE_SIZE);
3165
	if (do_memsw_account())
3166 3167
		seq_printf(m, "hierarchical_memsw_limit %llu\n",
			   (u64)memsw * PAGE_SIZE);
K
KOSAKI Motohiro 已提交
3168

3169
	for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
3170
		unsigned long long val = 0;
3171

3172
		if (i == MEM_CGROUP_STAT_SWAP && !do_memsw_account())
3173
			continue;
3174 3175
		for_each_mem_cgroup_tree(mi, memcg)
			val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE;
3176
		seq_printf(m, "total_%s %llu\n", mem_cgroup_stat_names[i], val);
3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193
	}

	for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
		unsigned long long val = 0;

		for_each_mem_cgroup_tree(mi, memcg)
			val += mem_cgroup_read_events(mi, i);
		seq_printf(m, "total_%s %llu\n",
			   mem_cgroup_events_names[i], val);
	}

	for (i = 0; i < NR_LRU_LISTS; i++) {
		unsigned long long val = 0;

		for_each_mem_cgroup_tree(mi, memcg)
			val += mem_cgroup_nr_lru_pages(mi, BIT(i)) * PAGE_SIZE;
		seq_printf(m, "total_%s %llu\n", mem_cgroup_lru_names[i], val);
3194
	}
K
KAMEZAWA Hiroyuki 已提交
3195

K
KOSAKI Motohiro 已提交
3196 3197
#ifdef CONFIG_DEBUG_VM
	{
3198 3199
		pg_data_t *pgdat;
		struct mem_cgroup_per_node *mz;
3200
		struct zone_reclaim_stat *rstat;
K
KOSAKI Motohiro 已提交
3201 3202 3203
		unsigned long recent_rotated[2] = {0, 0};
		unsigned long recent_scanned[2] = {0, 0};

3204 3205 3206
		for_each_online_pgdat(pgdat) {
			mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
			rstat = &mz->lruvec.reclaim_stat;
K
KOSAKI Motohiro 已提交
3207

3208 3209 3210 3211 3212
			recent_rotated[0] += rstat->recent_rotated[0];
			recent_rotated[1] += rstat->recent_rotated[1];
			recent_scanned[0] += rstat->recent_scanned[0];
			recent_scanned[1] += rstat->recent_scanned[1];
		}
3213 3214 3215 3216
		seq_printf(m, "recent_rotated_anon %lu\n", recent_rotated[0]);
		seq_printf(m, "recent_rotated_file %lu\n", recent_rotated[1]);
		seq_printf(m, "recent_scanned_anon %lu\n", recent_scanned[0]);
		seq_printf(m, "recent_scanned_file %lu\n", recent_scanned[1]);
K
KOSAKI Motohiro 已提交
3217 3218 3219
	}
#endif

3220 3221 3222
	return 0;
}

3223 3224
static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
				      struct cftype *cft)
K
KOSAKI Motohiro 已提交
3225
{
3226
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
K
KOSAKI Motohiro 已提交
3227

3228
	return mem_cgroup_swappiness(memcg);
K
KOSAKI Motohiro 已提交
3229 3230
}

3231 3232
static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
				       struct cftype *cft, u64 val)
K
KOSAKI Motohiro 已提交
3233
{
3234
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
K
KOSAKI Motohiro 已提交
3235

3236
	if (val > 100)
K
KOSAKI Motohiro 已提交
3237 3238
		return -EINVAL;

3239
	if (css->parent)
3240 3241 3242
		memcg->swappiness = val;
	else
		vm_swappiness = val;
3243

K
KOSAKI Motohiro 已提交
3244 3245 3246
	return 0;
}

3247 3248 3249
static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
{
	struct mem_cgroup_threshold_ary *t;
3250
	unsigned long usage;
3251 3252 3253 3254
	int i;

	rcu_read_lock();
	if (!swap)
3255
		t = rcu_dereference(memcg->thresholds.primary);
3256
	else
3257
		t = rcu_dereference(memcg->memsw_thresholds.primary);
3258 3259 3260 3261

	if (!t)
		goto unlock;

3262
	usage = mem_cgroup_usage(memcg, swap);
3263 3264

	/*
3265
	 * current_threshold points to threshold just below or equal to usage.
3266 3267 3268
	 * If it's not true, a threshold was crossed after last
	 * call of __mem_cgroup_threshold().
	 */
3269
	i = t->current_threshold;
3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292

	/*
	 * Iterate backward over array of thresholds starting from
	 * current_threshold and check if a threshold is crossed.
	 * If none of thresholds below usage is crossed, we read
	 * only one element of the array here.
	 */
	for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
		eventfd_signal(t->entries[i].eventfd, 1);

	/* i = current_threshold + 1 */
	i++;

	/*
	 * Iterate forward over array of thresholds starting from
	 * current_threshold+1 and check if a threshold is crossed.
	 * If none of thresholds above usage is crossed, we read
	 * only one element of the array here.
	 */
	for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
		eventfd_signal(t->entries[i].eventfd, 1);

	/* Update current_threshold */
3293
	t->current_threshold = i - 1;
3294 3295 3296 3297 3298 3299
unlock:
	rcu_read_unlock();
}

static void mem_cgroup_threshold(struct mem_cgroup *memcg)
{
3300 3301
	while (memcg) {
		__mem_cgroup_threshold(memcg, false);
3302
		if (do_memsw_account())
3303 3304 3305 3306
			__mem_cgroup_threshold(memcg, true);

		memcg = parent_mem_cgroup(memcg);
	}
3307 3308 3309 3310 3311 3312 3313
}

static int compare_thresholds(const void *a, const void *b)
{
	const struct mem_cgroup_threshold *_a = a;
	const struct mem_cgroup_threshold *_b = b;

3314 3315 3316 3317 3318 3319 3320
	if (_a->threshold > _b->threshold)
		return 1;

	if (_a->threshold < _b->threshold)
		return -1;

	return 0;
3321 3322
}

3323
static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
K
KAMEZAWA Hiroyuki 已提交
3324 3325 3326
{
	struct mem_cgroup_eventfd_list *ev;

3327 3328
	spin_lock(&memcg_oom_lock);

3329
	list_for_each_entry(ev, &memcg->oom_notify, list)
K
KAMEZAWA Hiroyuki 已提交
3330
		eventfd_signal(ev->eventfd, 1);
3331 3332

	spin_unlock(&memcg_oom_lock);
K
KAMEZAWA Hiroyuki 已提交
3333 3334 3335
	return 0;
}

3336
static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
K
KAMEZAWA Hiroyuki 已提交
3337
{
K
KAMEZAWA Hiroyuki 已提交
3338 3339
	struct mem_cgroup *iter;

3340
	for_each_mem_cgroup_tree(iter, memcg)
K
KAMEZAWA Hiroyuki 已提交
3341
		mem_cgroup_oom_notify_cb(iter);
K
KAMEZAWA Hiroyuki 已提交
3342 3343
}

3344
static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
T
Tejun Heo 已提交
3345
	struct eventfd_ctx *eventfd, const char *args, enum res_type type)
3346
{
3347 3348
	struct mem_cgroup_thresholds *thresholds;
	struct mem_cgroup_threshold_ary *new;
3349 3350
	unsigned long threshold;
	unsigned long usage;
3351
	int i, size, ret;
3352

3353
	ret = page_counter_memparse(args, "-1", &threshold);
3354 3355 3356 3357
	if (ret)
		return ret;

	mutex_lock(&memcg->thresholds_lock);
3358

3359
	if (type == _MEM) {
3360
		thresholds = &memcg->thresholds;
3361
		usage = mem_cgroup_usage(memcg, false);
3362
	} else if (type == _MEMSWAP) {
3363
		thresholds = &memcg->memsw_thresholds;
3364
		usage = mem_cgroup_usage(memcg, true);
3365
	} else
3366 3367 3368
		BUG();

	/* Check if a threshold crossed before adding a new one */
3369
	if (thresholds->primary)
3370 3371
		__mem_cgroup_threshold(memcg, type == _MEMSWAP);

3372
	size = thresholds->primary ? thresholds->primary->size + 1 : 1;
3373 3374

	/* Allocate memory for new array of thresholds */
3375
	new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold),
3376
			GFP_KERNEL);
3377
	if (!new) {
3378 3379 3380
		ret = -ENOMEM;
		goto unlock;
	}
3381
	new->size = size;
3382 3383

	/* Copy thresholds (if any) to new array */
3384 3385
	if (thresholds->primary) {
		memcpy(new->entries, thresholds->primary->entries, (size - 1) *
3386
				sizeof(struct mem_cgroup_threshold));
3387 3388
	}

3389
	/* Add new threshold */
3390 3391
	new->entries[size - 1].eventfd = eventfd;
	new->entries[size - 1].threshold = threshold;
3392 3393

	/* Sort thresholds. Registering of new threshold isn't time-critical */
3394
	sort(new->entries, size, sizeof(struct mem_cgroup_threshold),
3395 3396 3397
			compare_thresholds, NULL);

	/* Find current threshold */
3398
	new->current_threshold = -1;
3399
	for (i = 0; i < size; i++) {
3400
		if (new->entries[i].threshold <= usage) {
3401
			/*
3402 3403
			 * new->current_threshold will not be used until
			 * rcu_assign_pointer(), so it's safe to increment
3404 3405
			 * it here.
			 */
3406
			++new->current_threshold;
3407 3408
		} else
			break;
3409 3410
	}

3411 3412 3413 3414 3415
	/* Free old spare buffer and save old primary buffer as spare */
	kfree(thresholds->spare);
	thresholds->spare = thresholds->primary;

	rcu_assign_pointer(thresholds->primary, new);
3416

3417
	/* To be sure that nobody uses thresholds */
3418 3419 3420 3421 3422 3423 3424 3425
	synchronize_rcu();

unlock:
	mutex_unlock(&memcg->thresholds_lock);

	return ret;
}

3426
static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
T
Tejun Heo 已提交
3427 3428
	struct eventfd_ctx *eventfd, const char *args)
{
3429
	return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM);
T
Tejun Heo 已提交
3430 3431
}

3432
static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg,
T
Tejun Heo 已提交
3433 3434
	struct eventfd_ctx *eventfd, const char *args)
{
3435
	return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP);
T
Tejun Heo 已提交
3436 3437
}

3438
static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
T
Tejun Heo 已提交
3439
	struct eventfd_ctx *eventfd, enum res_type type)
3440
{
3441 3442
	struct mem_cgroup_thresholds *thresholds;
	struct mem_cgroup_threshold_ary *new;
3443
	unsigned long usage;
3444
	int i, j, size;
3445 3446

	mutex_lock(&memcg->thresholds_lock);
3447 3448

	if (type == _MEM) {
3449
		thresholds = &memcg->thresholds;
3450
		usage = mem_cgroup_usage(memcg, false);
3451
	} else if (type == _MEMSWAP) {
3452
		thresholds = &memcg->memsw_thresholds;
3453
		usage = mem_cgroup_usage(memcg, true);
3454
	} else
3455 3456
		BUG();

3457 3458 3459
	if (!thresholds->primary)
		goto unlock;

3460 3461 3462 3463
	/* Check if a threshold crossed before removing */
	__mem_cgroup_threshold(memcg, type == _MEMSWAP);

	/* Calculate new number of threshold */
3464 3465 3466
	size = 0;
	for (i = 0; i < thresholds->primary->size; i++) {
		if (thresholds->primary->entries[i].eventfd != eventfd)
3467 3468 3469
			size++;
	}

3470
	new = thresholds->spare;
3471

3472 3473
	/* Set thresholds array to NULL if we don't have thresholds */
	if (!size) {
3474 3475
		kfree(new);
		new = NULL;
3476
		goto swap_buffers;
3477 3478
	}

3479
	new->size = size;
3480 3481

	/* Copy thresholds and find current threshold */
3482 3483 3484
	new->current_threshold = -1;
	for (i = 0, j = 0; i < thresholds->primary->size; i++) {
		if (thresholds->primary->entries[i].eventfd == eventfd)
3485 3486
			continue;

3487
		new->entries[j] = thresholds->primary->entries[i];
3488
		if (new->entries[j].threshold <= usage) {
3489
			/*
3490
			 * new->current_threshold will not be used
3491 3492 3493
			 * until rcu_assign_pointer(), so it's safe to increment
			 * it here.
			 */
3494
			++new->current_threshold;
3495 3496 3497 3498
		}
		j++;
	}

3499
swap_buffers:
3500 3501
	/* Swap primary and spare array */
	thresholds->spare = thresholds->primary;
3502

3503
	rcu_assign_pointer(thresholds->primary, new);
3504

3505
	/* To be sure that nobody uses thresholds */
3506
	synchronize_rcu();
3507 3508 3509 3510 3511 3512

	/* If all events are unregistered, free the spare array */
	if (!new) {
		kfree(thresholds->spare);
		thresholds->spare = NULL;
	}
3513
unlock:
3514 3515
	mutex_unlock(&memcg->thresholds_lock);
}
3516

3517
static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
T
Tejun Heo 已提交
3518 3519
	struct eventfd_ctx *eventfd)
{
3520
	return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM);
T
Tejun Heo 已提交
3521 3522
}

3523
static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
T
Tejun Heo 已提交
3524 3525
	struct eventfd_ctx *eventfd)
{
3526
	return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP);
T
Tejun Heo 已提交
3527 3528
}

3529
static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
T
Tejun Heo 已提交
3530
	struct eventfd_ctx *eventfd, const char *args)
K
KAMEZAWA Hiroyuki 已提交
3531 3532 3533 3534 3535 3536 3537
{
	struct mem_cgroup_eventfd_list *event;

	event = kmalloc(sizeof(*event),	GFP_KERNEL);
	if (!event)
		return -ENOMEM;

3538
	spin_lock(&memcg_oom_lock);
K
KAMEZAWA Hiroyuki 已提交
3539 3540 3541 3542 3543

	event->eventfd = eventfd;
	list_add(&event->list, &memcg->oom_notify);

	/* already in OOM ? */
3544
	if (memcg->under_oom)
K
KAMEZAWA Hiroyuki 已提交
3545
		eventfd_signal(eventfd, 1);
3546
	spin_unlock(&memcg_oom_lock);
K
KAMEZAWA Hiroyuki 已提交
3547 3548 3549 3550

	return 0;
}

3551
static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg,
T
Tejun Heo 已提交
3552
	struct eventfd_ctx *eventfd)
K
KAMEZAWA Hiroyuki 已提交
3553 3554 3555
{
	struct mem_cgroup_eventfd_list *ev, *tmp;

3556
	spin_lock(&memcg_oom_lock);
K
KAMEZAWA Hiroyuki 已提交
3557

3558
	list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
K
KAMEZAWA Hiroyuki 已提交
3559 3560 3561 3562 3563 3564
		if (ev->eventfd == eventfd) {
			list_del(&ev->list);
			kfree(ev);
		}
	}

3565
	spin_unlock(&memcg_oom_lock);
K
KAMEZAWA Hiroyuki 已提交
3566 3567
}

3568
static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
3569
{
3570
	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf));
3571

3572
	seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable);
3573
	seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom);
3574 3575 3576
	return 0;
}

3577
static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
3578 3579
	struct cftype *cft, u64 val)
{
3580
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3581 3582

	/* cannot set to root cgroup and only 0 and 1 are allowed */
3583
	if (!css->parent || !((val == 0) || (val == 1)))
3584 3585
		return -EINVAL;

3586
	memcg->oom_kill_disable = val;
3587
	if (!val)
3588
		memcg_oom_recover(memcg);
3589

3590 3591 3592
	return 0;
}

3593 3594 3595 3596 3597 3598 3599
#ifdef CONFIG_CGROUP_WRITEBACK

struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg)
{
	return &memcg->cgwb_list;
}

T
Tejun Heo 已提交
3600 3601 3602 3603 3604 3605 3606 3607 3608 3609
static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
{
	return wb_domain_init(&memcg->cgwb_domain, gfp);
}

static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
{
	wb_domain_exit(&memcg->cgwb_domain);
}

3610 3611 3612 3613 3614
static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
{
	wb_domain_size_changed(&memcg->cgwb_domain);
}

T
Tejun Heo 已提交
3615 3616 3617 3618 3619 3620 3621 3622 3623 3624
struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);

	if (!memcg->css.parent)
		return NULL;

	return &memcg->cgwb_domain;
}

3625 3626 3627
/**
 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
 * @wb: bdi_writeback in question
3628 3629
 * @pfilepages: out parameter for number of file pages
 * @pheadroom: out parameter for number of allocatable pages according to memcg
3630 3631 3632
 * @pdirty: out parameter for number of dirty pages
 * @pwriteback: out parameter for number of pages under writeback
 *
3633 3634 3635
 * Determine the numbers of file, headroom, dirty, and writeback pages in
 * @wb's memcg.  File, dirty and writeback are self-explanatory.  Headroom
 * is a bit more involved.
3636
 *
3637 3638 3639 3640 3641
 * A memcg's headroom is "min(max, high) - used".  In the hierarchy, the
 * headroom is calculated as the lowest headroom of itself and the
 * ancestors.  Note that this doesn't consider the actual amount of
 * available memory in the system.  The caller should further cap
 * *@pheadroom accordingly.
3642
 */
3643 3644 3645
void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
			 unsigned long *pheadroom, unsigned long *pdirty,
			 unsigned long *pwriteback)
3646 3647 3648 3649 3650 3651 3652 3653
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
	struct mem_cgroup *parent;

	*pdirty = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_DIRTY);

	/* this should eventually include NR_UNSTABLE_NFS */
	*pwriteback = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_WRITEBACK);
3654 3655 3656
	*pfilepages = mem_cgroup_nr_lru_pages(memcg, (1 << LRU_INACTIVE_FILE) |
						     (1 << LRU_ACTIVE_FILE));
	*pheadroom = PAGE_COUNTER_MAX;
3657 3658 3659 3660 3661

	while ((parent = parent_mem_cgroup(memcg))) {
		unsigned long ceiling = min(memcg->memory.limit, memcg->high);
		unsigned long used = page_counter_read(&memcg->memory);

3662
		*pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
3663 3664 3665 3666
		memcg = parent;
	}
}

T
Tejun Heo 已提交
3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677
#else	/* CONFIG_CGROUP_WRITEBACK */

static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
{
	return 0;
}

static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
{
}

3678 3679 3680 3681
static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
{
}

3682 3683
#endif	/* CONFIG_CGROUP_WRITEBACK */

3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696
/*
 * DO NOT USE IN NEW FILES.
 *
 * "cgroup.event_control" implementation.
 *
 * This is way over-engineered.  It tries to support fully configurable
 * events for each user.  Such level of flexibility is completely
 * unnecessary especially in the light of the planned unified hierarchy.
 *
 * Please deprecate this and replace with something simpler if at all
 * possible.
 */

3697 3698 3699 3700 3701
/*
 * Unregister event and free resources.
 *
 * Gets called from workqueue.
 */
3702
static void memcg_event_remove(struct work_struct *work)
3703
{
3704 3705
	struct mem_cgroup_event *event =
		container_of(work, struct mem_cgroup_event, remove);
3706
	struct mem_cgroup *memcg = event->memcg;
3707 3708 3709

	remove_wait_queue(event->wqh, &event->wait);

3710
	event->unregister_event(memcg, event->eventfd);
3711 3712 3713 3714 3715 3716

	/* Notify userspace the event is going away. */
	eventfd_signal(event->eventfd, 1);

	eventfd_ctx_put(event->eventfd);
	kfree(event);
3717
	css_put(&memcg->css);
3718 3719 3720 3721 3722 3723 3724
}

/*
 * Gets called on POLLHUP on eventfd when user closes it.
 *
 * Called with wqh->lock held and interrupts disabled.
 */
3725 3726
static int memcg_event_wake(wait_queue_t *wait, unsigned mode,
			    int sync, void *key)
3727
{
3728 3729
	struct mem_cgroup_event *event =
		container_of(wait, struct mem_cgroup_event, wait);
3730
	struct mem_cgroup *memcg = event->memcg;
3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742
	unsigned long flags = (unsigned long)key;

	if (flags & POLLHUP) {
		/*
		 * If the event has been detached at cgroup removal, we
		 * can simply return knowing the other side will cleanup
		 * for us.
		 *
		 * We can't race against event freeing since the other
		 * side will require wqh->lock via remove_wait_queue(),
		 * which we hold.
		 */
3743
		spin_lock(&memcg->event_list_lock);
3744 3745 3746 3747 3748 3749 3750 3751
		if (!list_empty(&event->list)) {
			list_del_init(&event->list);
			/*
			 * We are in atomic context, but cgroup_event_remove()
			 * may sleep, so we have to call it in workqueue.
			 */
			schedule_work(&event->remove);
		}
3752
		spin_unlock(&memcg->event_list_lock);
3753 3754 3755 3756 3757
	}

	return 0;
}

3758
static void memcg_event_ptable_queue_proc(struct file *file,
3759 3760
		wait_queue_head_t *wqh, poll_table *pt)
{
3761 3762
	struct mem_cgroup_event *event =
		container_of(pt, struct mem_cgroup_event, pt);
3763 3764 3765 3766 3767 3768

	event->wqh = wqh;
	add_wait_queue(wqh, &event->wait);
}

/*
3769 3770
 * DO NOT USE IN NEW FILES.
 *
3771 3772 3773 3774 3775
 * Parse input and register new cgroup event handler.
 *
 * Input must be in format '<event_fd> <control_fd> <args>'.
 * Interpretation of args is defined by control file implementation.
 */
3776 3777
static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
					 char *buf, size_t nbytes, loff_t off)
3778
{
3779
	struct cgroup_subsys_state *css = of_css(of);
3780
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3781
	struct mem_cgroup_event *event;
3782 3783 3784 3785
	struct cgroup_subsys_state *cfile_css;
	unsigned int efd, cfd;
	struct fd efile;
	struct fd cfile;
3786
	const char *name;
3787 3788 3789
	char *endp;
	int ret;

3790 3791 3792
	buf = strstrip(buf);

	efd = simple_strtoul(buf, &endp, 10);
3793 3794
	if (*endp != ' ')
		return -EINVAL;
3795
	buf = endp + 1;
3796

3797
	cfd = simple_strtoul(buf, &endp, 10);
3798 3799
	if ((*endp != ' ') && (*endp != '\0'))
		return -EINVAL;
3800
	buf = endp + 1;
3801 3802 3803 3804 3805

	event = kzalloc(sizeof(*event), GFP_KERNEL);
	if (!event)
		return -ENOMEM;

3806
	event->memcg = memcg;
3807
	INIT_LIST_HEAD(&event->list);
3808 3809 3810
	init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc);
	init_waitqueue_func_entry(&event->wait, memcg_event_wake);
	INIT_WORK(&event->remove, memcg_event_remove);
3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835

	efile = fdget(efd);
	if (!efile.file) {
		ret = -EBADF;
		goto out_kfree;
	}

	event->eventfd = eventfd_ctx_fileget(efile.file);
	if (IS_ERR(event->eventfd)) {
		ret = PTR_ERR(event->eventfd);
		goto out_put_efile;
	}

	cfile = fdget(cfd);
	if (!cfile.file) {
		ret = -EBADF;
		goto out_put_eventfd;
	}

	/* the process need read permission on control file */
	/* AV: shouldn't we check that it's been opened for read instead? */
	ret = inode_permission(file_inode(cfile.file), MAY_READ);
	if (ret < 0)
		goto out_put_cfile;

3836 3837 3838 3839 3840
	/*
	 * Determine the event callbacks and set them in @event.  This used
	 * to be done via struct cftype but cgroup core no longer knows
	 * about these events.  The following is crude but the whole thing
	 * is for compatibility anyway.
3841 3842
	 *
	 * DO NOT ADD NEW FILES.
3843
	 */
A
Al Viro 已提交
3844
	name = cfile.file->f_path.dentry->d_name.name;
3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855

	if (!strcmp(name, "memory.usage_in_bytes")) {
		event->register_event = mem_cgroup_usage_register_event;
		event->unregister_event = mem_cgroup_usage_unregister_event;
	} else if (!strcmp(name, "memory.oom_control")) {
		event->register_event = mem_cgroup_oom_register_event;
		event->unregister_event = mem_cgroup_oom_unregister_event;
	} else if (!strcmp(name, "memory.pressure_level")) {
		event->register_event = vmpressure_register_event;
		event->unregister_event = vmpressure_unregister_event;
	} else if (!strcmp(name, "memory.memsw.usage_in_bytes")) {
T
Tejun Heo 已提交
3856 3857
		event->register_event = memsw_cgroup_usage_register_event;
		event->unregister_event = memsw_cgroup_usage_unregister_event;
3858 3859 3860 3861 3862
	} else {
		ret = -EINVAL;
		goto out_put_cfile;
	}

3863
	/*
3864 3865 3866
	 * Verify @cfile should belong to @css.  Also, remaining events are
	 * automatically removed on cgroup destruction but the removal is
	 * asynchronous, so take an extra ref on @css.
3867
	 */
A
Al Viro 已提交
3868
	cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent,
3869
					       &memory_cgrp_subsys);
3870
	ret = -EINVAL;
3871
	if (IS_ERR(cfile_css))
3872
		goto out_put_cfile;
3873 3874
	if (cfile_css != css) {
		css_put(cfile_css);
3875
		goto out_put_cfile;
3876
	}
3877

3878
	ret = event->register_event(memcg, event->eventfd, buf);
3879 3880 3881 3882 3883
	if (ret)
		goto out_put_css;

	efile.file->f_op->poll(efile.file, &event->pt);

3884 3885 3886
	spin_lock(&memcg->event_list_lock);
	list_add(&event->list, &memcg->event_list);
	spin_unlock(&memcg->event_list_lock);
3887 3888 3889 3890

	fdput(cfile);
	fdput(efile);

3891
	return nbytes;
3892 3893

out_put_css:
3894
	css_put(css);
3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906
out_put_cfile:
	fdput(cfile);
out_put_eventfd:
	eventfd_ctx_put(event->eventfd);
out_put_efile:
	fdput(efile);
out_kfree:
	kfree(event);

	return ret;
}

3907
static struct cftype mem_cgroup_legacy_files[] = {
B
Balbir Singh 已提交
3908
	{
3909
		.name = "usage_in_bytes",
3910
		.private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
3911
		.read_u64 = mem_cgroup_read_u64,
B
Balbir Singh 已提交
3912
	},
3913 3914
	{
		.name = "max_usage_in_bytes",
3915
		.private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
3916
		.write = mem_cgroup_reset,
3917
		.read_u64 = mem_cgroup_read_u64,
3918
	},
B
Balbir Singh 已提交
3919
	{
3920
		.name = "limit_in_bytes",
3921
		.private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
3922
		.write = mem_cgroup_write,
3923
		.read_u64 = mem_cgroup_read_u64,
B
Balbir Singh 已提交
3924
	},
3925 3926 3927
	{
		.name = "soft_limit_in_bytes",
		.private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
3928
		.write = mem_cgroup_write,
3929
		.read_u64 = mem_cgroup_read_u64,
3930
	},
B
Balbir Singh 已提交
3931 3932
	{
		.name = "failcnt",
3933
		.private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
3934
		.write = mem_cgroup_reset,
3935
		.read_u64 = mem_cgroup_read_u64,
B
Balbir Singh 已提交
3936
	},
3937 3938
	{
		.name = "stat",
3939
		.seq_show = memcg_stat_show,
3940
	},
3941 3942
	{
		.name = "force_empty",
3943
		.write = mem_cgroup_force_empty_write,
3944
	},
3945 3946 3947 3948 3949
	{
		.name = "use_hierarchy",
		.write_u64 = mem_cgroup_hierarchy_write,
		.read_u64 = mem_cgroup_hierarchy_read,
	},
3950
	{
3951
		.name = "cgroup.event_control",		/* XXX: for compat */
3952
		.write = memcg_write_event_control,
3953
		.flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE,
3954
	},
K
KOSAKI Motohiro 已提交
3955 3956 3957 3958 3959
	{
		.name = "swappiness",
		.read_u64 = mem_cgroup_swappiness_read,
		.write_u64 = mem_cgroup_swappiness_write,
	},
3960 3961 3962 3963 3964
	{
		.name = "move_charge_at_immigrate",
		.read_u64 = mem_cgroup_move_charge_read,
		.write_u64 = mem_cgroup_move_charge_write,
	},
K
KAMEZAWA Hiroyuki 已提交
3965 3966
	{
		.name = "oom_control",
3967
		.seq_show = mem_cgroup_oom_control_read,
3968
		.write_u64 = mem_cgroup_oom_control_write,
K
KAMEZAWA Hiroyuki 已提交
3969 3970
		.private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
	},
3971 3972 3973
	{
		.name = "pressure_level",
	},
3974 3975 3976
#ifdef CONFIG_NUMA
	{
		.name = "numa_stat",
3977
		.seq_show = memcg_numa_stat_show,
3978 3979
	},
#endif
3980 3981 3982
	{
		.name = "kmem.limit_in_bytes",
		.private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
3983
		.write = mem_cgroup_write,
3984
		.read_u64 = mem_cgroup_read_u64,
3985 3986 3987 3988
	},
	{
		.name = "kmem.usage_in_bytes",
		.private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
3989
		.read_u64 = mem_cgroup_read_u64,
3990 3991 3992 3993
	},
	{
		.name = "kmem.failcnt",
		.private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
3994
		.write = mem_cgroup_reset,
3995
		.read_u64 = mem_cgroup_read_u64,
3996 3997 3998 3999
	},
	{
		.name = "kmem.max_usage_in_bytes",
		.private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
4000
		.write = mem_cgroup_reset,
4001
		.read_u64 = mem_cgroup_read_u64,
4002
	},
4003 4004 4005
#ifdef CONFIG_SLABINFO
	{
		.name = "kmem.slabinfo",
4006 4007 4008
		.seq_start = memcg_slab_start,
		.seq_next = memcg_slab_next,
		.seq_stop = memcg_slab_stop,
4009
		.seq_show = memcg_slab_show,
4010 4011
	},
#endif
V
Vladimir Davydov 已提交
4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034
	{
		.name = "kmem.tcp.limit_in_bytes",
		.private = MEMFILE_PRIVATE(_TCP, RES_LIMIT),
		.write = mem_cgroup_write,
		.read_u64 = mem_cgroup_read_u64,
	},
	{
		.name = "kmem.tcp.usage_in_bytes",
		.private = MEMFILE_PRIVATE(_TCP, RES_USAGE),
		.read_u64 = mem_cgroup_read_u64,
	},
	{
		.name = "kmem.tcp.failcnt",
		.private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT),
		.write = mem_cgroup_reset,
		.read_u64 = mem_cgroup_read_u64,
	},
	{
		.name = "kmem.tcp.max_usage_in_bytes",
		.private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE),
		.write = mem_cgroup_reset,
		.read_u64 = mem_cgroup_read_u64,
	},
4035
	{ },	/* terminate */
4036
};
4037

4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063
/*
 * Private memory cgroup IDR
 *
 * Swap-out records and page cache shadow entries need to store memcg
 * references in constrained space, so we maintain an ID space that is
 * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of
 * memory-controlled cgroups to 64k.
 *
 * However, there usually are many references to the oflline CSS after
 * the cgroup has been destroyed, such as page cache or reclaimable
 * slab objects, that don't need to hang on to the ID. We want to keep
 * those dead CSS from occupying IDs, or we might quickly exhaust the
 * relatively small ID space and prevent the creation of new cgroups
 * even when there are much fewer than 64k cgroups - possibly none.
 *
 * Maintain a private 16-bit ID space for memcg, and allow the ID to
 * be freed and recycled when it's no longer needed, which is usually
 * when the CSS is offlined.
 *
 * The only exception to that are records of swapped out tmpfs/shmem
 * pages that need to be attributed to live ancestors on swapin. But
 * those references are manageable from userspace.
 */

static DEFINE_IDR(mem_cgroup_idr);

4064
static void mem_cgroup_id_get_many(struct mem_cgroup *memcg, unsigned int n)
4065
{
4066
	VM_BUG_ON(atomic_read(&memcg->id.ref) <= 0);
4067
	atomic_add(n, &memcg->id.ref);
4068 4069
}

4070
static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
4071
{
4072
	VM_BUG_ON(atomic_read(&memcg->id.ref) < n);
4073
	if (atomic_sub_and_test(n, &memcg->id.ref)) {
4074 4075 4076 4077 4078 4079 4080 4081
		idr_remove(&mem_cgroup_idr, memcg->id.id);
		memcg->id.id = 0;

		/* Memcg ID pins CSS */
		css_put(&memcg->css);
	}
}

4082 4083 4084 4085 4086 4087 4088 4089 4090 4091
static inline void mem_cgroup_id_get(struct mem_cgroup *memcg)
{
	mem_cgroup_id_get_many(memcg, 1);
}

static inline void mem_cgroup_id_put(struct mem_cgroup *memcg)
{
	mem_cgroup_id_put_many(memcg, 1);
}

4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103
/**
 * mem_cgroup_from_id - look up a memcg from a memcg id
 * @id: the memcg id to look up
 *
 * Caller must hold rcu_read_lock().
 */
struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
{
	WARN_ON_ONCE(!rcu_read_lock_held());
	return idr_find(&mem_cgroup_idr, id);
}

4104
static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
4105 4106
{
	struct mem_cgroup_per_node *pn;
4107
	int tmp = node;
4108 4109 4110 4111 4112 4113 4114 4115
	/*
	 * This routine is called against possible nodes.
	 * But it's BUG to call kmalloc() against offline node.
	 *
	 * TODO: this routine can waste much memory for nodes which will
	 *       never be onlined. It's better to use memory hotplug callback
	 *       function.
	 */
4116 4117
	if (!node_state(node, N_NORMAL_MEMORY))
		tmp = -1;
4118
	pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
4119 4120
	if (!pn)
		return 1;
4121

4122 4123 4124 4125 4126
	lruvec_init(&pn->lruvec);
	pn->usage_in_excess = 0;
	pn->on_tree = false;
	pn->memcg = memcg;

4127
	memcg->nodeinfo[node] = pn;
4128 4129 4130
	return 0;
}

4131
static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
4132
{
4133
	kfree(memcg->nodeinfo[node]);
4134 4135
}

4136
static void mem_cgroup_free(struct mem_cgroup *memcg)
4137
{
4138
	int node;
4139

4140
	memcg_wb_domain_exit(memcg);
4141
	for_each_node(node)
4142
		free_mem_cgroup_per_node_info(memcg, node);
4143
	free_percpu(memcg->stat);
4144
	kfree(memcg);
4145
}
4146

4147
static struct mem_cgroup *mem_cgroup_alloc(void)
B
Balbir Singh 已提交
4148
{
4149
	struct mem_cgroup *memcg;
4150
	size_t size;
4151
	int node;
B
Balbir Singh 已提交
4152

4153 4154 4155 4156
	size = sizeof(struct mem_cgroup);
	size += nr_node_ids * sizeof(struct mem_cgroup_per_node *);

	memcg = kzalloc(size, GFP_KERNEL);
4157
	if (!memcg)
4158 4159
		return NULL;

4160 4161 4162 4163 4164 4165
	memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL,
				 1, MEM_CGROUP_ID_MAX,
				 GFP_KERNEL);
	if (memcg->id.id < 0)
		goto fail;

4166 4167 4168
	memcg->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
	if (!memcg->stat)
		goto fail;
4169

B
Bob Liu 已提交
4170
	for_each_node(node)
4171
		if (alloc_mem_cgroup_per_node_info(memcg, node))
4172
			goto fail;
4173

4174 4175
	if (memcg_wb_domain_init(memcg, GFP_KERNEL))
		goto fail;
4176

4177
	INIT_WORK(&memcg->high_work, high_work_func);
4178 4179 4180 4181
	memcg->last_scanned_node = MAX_NUMNODES;
	INIT_LIST_HEAD(&memcg->oom_notify);
	mutex_init(&memcg->thresholds_lock);
	spin_lock_init(&memcg->move_lock);
4182
	vmpressure_init(&memcg->vmpressure);
4183 4184
	INIT_LIST_HEAD(&memcg->event_list);
	spin_lock_init(&memcg->event_list_lock);
4185
	memcg->socket_pressure = jiffies;
4186
#ifndef CONFIG_SLOB
V
Vladimir Davydov 已提交
4187 4188
	memcg->kmemcg_id = -1;
#endif
4189 4190 4191
#ifdef CONFIG_CGROUP_WRITEBACK
	INIT_LIST_HEAD(&memcg->cgwb_list);
#endif
4192
	idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
4193 4194
	return memcg;
fail:
4195 4196
	if (memcg->id.id > 0)
		idr_remove(&mem_cgroup_idr, memcg->id.id);
4197 4198
	mem_cgroup_free(memcg);
	return NULL;
4199 4200
}

4201 4202
static struct cgroup_subsys_state * __ref
mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
4203
{
4204 4205 4206
	struct mem_cgroup *parent = mem_cgroup_from_css(parent_css);
	struct mem_cgroup *memcg;
	long error = -ENOMEM;
4207

4208 4209 4210
	memcg = mem_cgroup_alloc();
	if (!memcg)
		return ERR_PTR(error);
4211

4212 4213 4214 4215 4216 4217 4218 4219
	memcg->high = PAGE_COUNTER_MAX;
	memcg->soft_limit = PAGE_COUNTER_MAX;
	if (parent) {
		memcg->swappiness = mem_cgroup_swappiness(parent);
		memcg->oom_kill_disable = parent->oom_kill_disable;
	}
	if (parent && parent->use_hierarchy) {
		memcg->use_hierarchy = true;
4220
		page_counter_init(&memcg->memory, &parent->memory);
4221
		page_counter_init(&memcg->swap, &parent->swap);
4222 4223
		page_counter_init(&memcg->memsw, &parent->memsw);
		page_counter_init(&memcg->kmem, &parent->kmem);
4224
		page_counter_init(&memcg->tcpmem, &parent->tcpmem);
4225
	} else {
4226
		page_counter_init(&memcg->memory, NULL);
4227
		page_counter_init(&memcg->swap, NULL);
4228 4229
		page_counter_init(&memcg->memsw, NULL);
		page_counter_init(&memcg->kmem, NULL);
4230
		page_counter_init(&memcg->tcpmem, NULL);
4231 4232 4233 4234 4235
		/*
		 * Deeper hierachy with use_hierarchy == false doesn't make
		 * much sense so let cgroup subsystem know about this
		 * unfortunate state in our controller.
		 */
4236
		if (parent != root_mem_cgroup)
4237
			memory_cgrp_subsys.broken_hierarchy = true;
4238
	}
4239

4240 4241 4242 4243 4244 4245
	/* The following stuff does not apply to the root */
	if (!parent) {
		root_mem_cgroup = memcg;
		return &memcg->css;
	}

4246
	error = memcg_online_kmem(memcg);
4247 4248
	if (error)
		goto fail;
4249

4250
	if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
4251
		static_branch_inc(&memcg_sockets_enabled_key);
4252

4253 4254 4255
	return &memcg->css;
fail:
	mem_cgroup_free(memcg);
4256
	return ERR_PTR(-ENOMEM);
4257 4258
}

4259
static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
4260
{
4261 4262
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);

4263
	/* Online state pins memcg ID, memcg ID pins CSS */
4264
	atomic_set(&memcg->id.ref, 1);
4265
	css_get(css);
4266
	return 0;
B
Balbir Singh 已提交
4267 4268
}

4269
static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
4270
{
4271
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4272
	struct mem_cgroup_event *event, *tmp;
4273 4274 4275 4276 4277 4278

	/*
	 * Unregister events and notify userspace.
	 * Notify userspace about cgroup removing only after rmdir of cgroup
	 * directory to avoid race between userspace and kernelspace.
	 */
4279 4280
	spin_lock(&memcg->event_list_lock);
	list_for_each_entry_safe(event, tmp, &memcg->event_list, list) {
4281 4282 4283
		list_del_init(&event->list);
		schedule_work(&event->remove);
	}
4284
	spin_unlock(&memcg->event_list_lock);
4285

4286
	memcg_offline_kmem(memcg);
4287
	wb_memcg_offline(memcg);
4288 4289

	mem_cgroup_id_put(memcg);
4290 4291
}

4292 4293 4294 4295 4296 4297 4298
static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);

	invalidate_reclaim_iterators(memcg);
}

4299
static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
B
Balbir Singh 已提交
4300
{
4301
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4302

4303
	if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
4304
		static_branch_dec(&memcg_sockets_enabled_key);
4305

4306
	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active)
V
Vladimir Davydov 已提交
4307
		static_branch_dec(&memcg_sockets_enabled_key);
4308

4309 4310 4311
	vmpressure_cleanup(&memcg->vmpressure);
	cancel_work_sync(&memcg->high_work);
	mem_cgroup_remove_from_trees(memcg);
4312
	memcg_free_kmem(memcg);
4313
	mem_cgroup_free(memcg);
B
Balbir Singh 已提交
4314 4315
}

4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332
/**
 * mem_cgroup_css_reset - reset the states of a mem_cgroup
 * @css: the target css
 *
 * Reset the states of the mem_cgroup associated with @css.  This is
 * invoked when the userland requests disabling on the default hierarchy
 * but the memcg is pinned through dependency.  The memcg should stop
 * applying policies and should revert to the vanilla state as it may be
 * made visible again.
 *
 * The current implementation only resets the essential configurations.
 * This needs to be expanded to cover all the visible parts.
 */
static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);

4333 4334 4335 4336 4337
	page_counter_limit(&memcg->memory, PAGE_COUNTER_MAX);
	page_counter_limit(&memcg->swap, PAGE_COUNTER_MAX);
	page_counter_limit(&memcg->memsw, PAGE_COUNTER_MAX);
	page_counter_limit(&memcg->kmem, PAGE_COUNTER_MAX);
	page_counter_limit(&memcg->tcpmem, PAGE_COUNTER_MAX);
4338 4339
	memcg->low = 0;
	memcg->high = PAGE_COUNTER_MAX;
4340
	memcg->soft_limit = PAGE_COUNTER_MAX;
4341
	memcg_wb_domain_size_changed(memcg);
4342 4343
}

4344
#ifdef CONFIG_MMU
4345
/* Handlers for move charge at task migration. */
4346
static int mem_cgroup_do_precharge(unsigned long count)
4347
{
4348
	int ret;
4349

4350 4351
	/* Try a single bulk charge without reclaim first, kswapd may wake */
	ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count);
4352
	if (!ret) {
4353 4354 4355
		mc.precharge += count;
		return ret;
	}
4356

4357
	/* Try charges one by one with reclaim, but do not retry */
4358
	while (count--) {
4359
		ret = try_charge(mc.to, GFP_KERNEL | __GFP_NORETRY, 1);
4360 4361
		if (ret)
			return ret;
4362
		mc.precharge++;
4363
		cond_resched();
4364
	}
4365
	return 0;
4366 4367 4368 4369
}

union mc_target {
	struct page	*page;
4370
	swp_entry_t	ent;
4371 4372 4373
};

enum mc_target_type {
4374
	MC_TARGET_NONE = 0,
4375
	MC_TARGET_PAGE,
4376
	MC_TARGET_SWAP,
4377 4378
};

D
Daisuke Nishimura 已提交
4379 4380
static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
						unsigned long addr, pte_t ptent)
4381
{
D
Daisuke Nishimura 已提交
4382
	struct page *page = vm_normal_page(vma, addr, ptent);
4383

D
Daisuke Nishimura 已提交
4384 4385 4386
	if (!page || !page_mapped(page))
		return NULL;
	if (PageAnon(page)) {
4387
		if (!(mc.flags & MOVE_ANON))
D
Daisuke Nishimura 已提交
4388
			return NULL;
4389 4390 4391 4392
	} else {
		if (!(mc.flags & MOVE_FILE))
			return NULL;
	}
D
Daisuke Nishimura 已提交
4393 4394 4395 4396 4397 4398
	if (!get_page_unless_zero(page))
		return NULL;

	return page;
}

4399
#ifdef CONFIG_SWAP
D
Daisuke Nishimura 已提交
4400
static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
4401
			pte_t ptent, swp_entry_t *entry)
D
Daisuke Nishimura 已提交
4402 4403 4404 4405
{
	struct page *page = NULL;
	swp_entry_t ent = pte_to_swp_entry(ptent);

4406
	if (!(mc.flags & MOVE_ANON) || non_swap_entry(ent))
D
Daisuke Nishimura 已提交
4407
		return NULL;
4408 4409 4410 4411
	/*
	 * Because lookup_swap_cache() updates some statistics counter,
	 * we call find_get_page() with swapper_space directly.
	 */
4412
	page = find_get_page(swap_address_space(ent), swp_offset(ent));
4413
	if (do_memsw_account())
D
Daisuke Nishimura 已提交
4414 4415 4416 4417
		entry->val = ent.val;

	return page;
}
4418 4419
#else
static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
4420
			pte_t ptent, swp_entry_t *entry)
4421 4422 4423 4424
{
	return NULL;
}
#endif
D
Daisuke Nishimura 已提交
4425

4426 4427 4428 4429 4430 4431 4432 4433 4434
static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
			unsigned long addr, pte_t ptent, swp_entry_t *entry)
{
	struct page *page = NULL;
	struct address_space *mapping;
	pgoff_t pgoff;

	if (!vma->vm_file) /* anonymous vma */
		return NULL;
4435
	if (!(mc.flags & MOVE_FILE))
4436 4437 4438
		return NULL;

	mapping = vma->vm_file->f_mapping;
4439
	pgoff = linear_page_index(vma, addr);
4440 4441

	/* page is moved even if it's not RSS of this task(page-faulted). */
4442 4443
#ifdef CONFIG_SWAP
	/* shmem/tmpfs may report page out on swap: account for that too. */
4444 4445 4446 4447
	if (shmem_mapping(mapping)) {
		page = find_get_entry(mapping, pgoff);
		if (radix_tree_exceptional_entry(page)) {
			swp_entry_t swp = radix_to_swp_entry(page);
4448
			if (do_memsw_account())
4449
				*entry = swp;
4450 4451
			page = find_get_page(swap_address_space(swp),
					     swp_offset(swp));
4452 4453 4454 4455 4456
		}
	} else
		page = find_get_page(mapping, pgoff);
#else
	page = find_get_page(mapping, pgoff);
4457
#endif
4458 4459 4460
	return page;
}

4461 4462 4463
/**
 * mem_cgroup_move_account - move account of the page
 * @page: the page
4464
 * @compound: charge the page as compound or small page
4465 4466 4467
 * @from: mem_cgroup which the page is moved from.
 * @to:	mem_cgroup which the page is moved to. @from != @to.
 *
4468
 * The caller must make sure the page is not on LRU (isolate_page() is useful.)
4469 4470 4471 4472 4473
 *
 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
 * from old cgroup.
 */
static int mem_cgroup_move_account(struct page *page,
4474
				   bool compound,
4475 4476 4477 4478
				   struct mem_cgroup *from,
				   struct mem_cgroup *to)
{
	unsigned long flags;
4479
	unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
4480
	int ret;
4481
	bool anon;
4482 4483 4484

	VM_BUG_ON(from == to);
	VM_BUG_ON_PAGE(PageLRU(page), page);
4485
	VM_BUG_ON(compound && !PageTransHuge(page));
4486 4487

	/*
4488
	 * Prevent mem_cgroup_migrate() from looking at
4489
	 * page->mem_cgroup of its source page while we change it.
4490
	 */
4491
	ret = -EBUSY;
4492 4493 4494 4495 4496 4497 4498
	if (!trylock_page(page))
		goto out;

	ret = -EINVAL;
	if (page->mem_cgroup != from)
		goto out_unlock;

4499 4500
	anon = PageAnon(page);

4501 4502
	spin_lock_irqsave(&from->move_lock, flags);

4503
	if (!anon && page_mapped(page)) {
4504 4505 4506 4507 4508 4509
		__this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
			       nr_pages);
		__this_cpu_add(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
			       nr_pages);
	}

4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525
	/*
	 * move_lock grabbed above and caller set from->moving_account, so
	 * mem_cgroup_update_page_stat() will serialize updates to PageDirty.
	 * So mapping should be stable for dirty pages.
	 */
	if (!anon && PageDirty(page)) {
		struct address_space *mapping = page_mapping(page);

		if (mapping_cap_account_dirty(mapping)) {
			__this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_DIRTY],
				       nr_pages);
			__this_cpu_add(to->stat->count[MEM_CGROUP_STAT_DIRTY],
				       nr_pages);
		}
	}

4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545
	if (PageWriteback(page)) {
		__this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_WRITEBACK],
			       nr_pages);
		__this_cpu_add(to->stat->count[MEM_CGROUP_STAT_WRITEBACK],
			       nr_pages);
	}

	/*
	 * It is safe to change page->mem_cgroup here because the page
	 * is referenced, charged, and isolated - we can't race with
	 * uncharging, charging, migration, or LRU putback.
	 */

	/* caller should have done css_get */
	page->mem_cgroup = to;
	spin_unlock_irqrestore(&from->move_lock, flags);

	ret = 0;

	local_irq_disable();
4546
	mem_cgroup_charge_statistics(to, page, compound, nr_pages);
4547
	memcg_check_events(to, page);
4548
	mem_cgroup_charge_statistics(from, page, compound, -nr_pages);
4549 4550 4551 4552 4553 4554 4555 4556
	memcg_check_events(from, page);
	local_irq_enable();
out_unlock:
	unlock_page(page);
out:
	return ret;
}

4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575
/**
 * get_mctgt_type - get target type of moving charge
 * @vma: the vma the pte to be checked belongs
 * @addr: the address corresponding to the pte to be checked
 * @ptent: the pte to be checked
 * @target: the pointer the target page or swap ent will be stored(can be NULL)
 *
 * Returns
 *   0(MC_TARGET_NONE): if the pte is not a target for move charge.
 *   1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
 *     move charge. if @target is not NULL, the page is stored in target->page
 *     with extra refcnt got(Callers should handle it).
 *   2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
 *     target for charge migration. if @target is not NULL, the entry is stored
 *     in target->ent.
 *
 * Called with pte lock held.
 */

4576
static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
D
Daisuke Nishimura 已提交
4577 4578 4579
		unsigned long addr, pte_t ptent, union mc_target *target)
{
	struct page *page = NULL;
4580
	enum mc_target_type ret = MC_TARGET_NONE;
D
Daisuke Nishimura 已提交
4581 4582 4583 4584 4585
	swp_entry_t ent = { .val = 0 };

	if (pte_present(ptent))
		page = mc_handle_present_pte(vma, addr, ptent);
	else if (is_swap_pte(ptent))
4586
		page = mc_handle_swap_pte(vma, ptent, &ent);
4587
	else if (pte_none(ptent))
4588
		page = mc_handle_file_pte(vma, addr, ptent, &ent);
D
Daisuke Nishimura 已提交
4589 4590

	if (!page && !ent.val)
4591
		return ret;
4592 4593
	if (page) {
		/*
4594
		 * Do only loose check w/o serialization.
4595
		 * mem_cgroup_move_account() checks the page is valid or
4596
		 * not under LRU exclusion.
4597
		 */
4598
		if (page->mem_cgroup == mc.from) {
4599 4600 4601 4602 4603 4604 4605
			ret = MC_TARGET_PAGE;
			if (target)
				target->page = page;
		}
		if (!ret || !target)
			put_page(page);
	}
D
Daisuke Nishimura 已提交
4606 4607
	/* There is a swap entry and a page doesn't exist or isn't charged */
	if (ent.val && !ret &&
L
Li Zefan 已提交
4608
	    mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) {
4609 4610 4611
		ret = MC_TARGET_SWAP;
		if (target)
			target->ent = ent;
4612 4613 4614 4615
	}
	return ret;
}

4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/*
 * We don't consider swapping or file mapped pages because THP does not
 * support them for now.
 * Caller should make sure that pmd_trans_huge(pmd) is true.
 */
static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
		unsigned long addr, pmd_t pmd, union mc_target *target)
{
	struct page *page = NULL;
	enum mc_target_type ret = MC_TARGET_NONE;

	page = pmd_page(pmd);
4629
	VM_BUG_ON_PAGE(!page || !PageHead(page), page);
4630
	if (!(mc.flags & MOVE_ANON))
4631
		return ret;
4632
	if (page->mem_cgroup == mc.from) {
4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648
		ret = MC_TARGET_PAGE;
		if (target) {
			get_page(page);
			target->page = page;
		}
	}
	return ret;
}
#else
static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
		unsigned long addr, pmd_t pmd, union mc_target *target)
{
	return MC_TARGET_NONE;
}
#endif

4649 4650 4651 4652
static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
					unsigned long addr, unsigned long end,
					struct mm_walk *walk)
{
4653
	struct vm_area_struct *vma = walk->vma;
4654 4655 4656
	pte_t *pte;
	spinlock_t *ptl;

4657 4658
	ptl = pmd_trans_huge_lock(pmd, vma);
	if (ptl) {
4659 4660
		if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
			mc.precharge += HPAGE_PMD_NR;
4661
		spin_unlock(ptl);
4662
		return 0;
4663
	}
4664

4665 4666
	if (pmd_trans_unstable(pmd))
		return 0;
4667 4668
	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
	for (; addr != end; pte++, addr += PAGE_SIZE)
4669
		if (get_mctgt_type(vma, addr, *pte, NULL))
4670 4671 4672 4673
			mc.precharge++;	/* increment precharge temporarily */
	pte_unmap_unlock(pte - 1, ptl);
	cond_resched();

4674 4675 4676
	return 0;
}

4677 4678 4679 4680
static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
{
	unsigned long precharge;

4681 4682 4683 4684
	struct mm_walk mem_cgroup_count_precharge_walk = {
		.pmd_entry = mem_cgroup_count_precharge_pte_range,
		.mm = mm,
	};
4685
	down_read(&mm->mmap_sem);
4686 4687
	walk_page_range(0, mm->highest_vm_end,
			&mem_cgroup_count_precharge_walk);
4688
	up_read(&mm->mmap_sem);
4689 4690 4691 4692 4693 4694 4695 4696 4697

	precharge = mc.precharge;
	mc.precharge = 0;

	return precharge;
}

static int mem_cgroup_precharge_mc(struct mm_struct *mm)
{
4698 4699 4700 4701 4702
	unsigned long precharge = mem_cgroup_count_precharge(mm);

	VM_BUG_ON(mc.moving_task);
	mc.moving_task = current;
	return mem_cgroup_do_precharge(precharge);
4703 4704
}

4705 4706
/* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
static void __mem_cgroup_clear_mc(void)
4707
{
4708 4709 4710
	struct mem_cgroup *from = mc.from;
	struct mem_cgroup *to = mc.to;

4711
	/* we must uncharge all the leftover precharges from mc.to */
4712
	if (mc.precharge) {
4713
		cancel_charge(mc.to, mc.precharge);
4714 4715 4716 4717 4718 4719 4720
		mc.precharge = 0;
	}
	/*
	 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
	 * we must uncharge here.
	 */
	if (mc.moved_charge) {
4721
		cancel_charge(mc.from, mc.moved_charge);
4722
		mc.moved_charge = 0;
4723
	}
4724 4725 4726
	/* we must fixup refcnts and charges */
	if (mc.moved_swap) {
		/* uncharge swap account from the old cgroup */
4727
		if (!mem_cgroup_is_root(mc.from))
4728
			page_counter_uncharge(&mc.from->memsw, mc.moved_swap);
4729

4730 4731
		mem_cgroup_id_put_many(mc.from, mc.moved_swap);

4732
		/*
4733 4734
		 * we charged both to->memory and to->memsw, so we
		 * should uncharge to->memory.
4735
		 */
4736
		if (!mem_cgroup_is_root(mc.to))
4737 4738
			page_counter_uncharge(&mc.to->memory, mc.moved_swap);

4739 4740
		mem_cgroup_id_get_many(mc.to, mc.moved_swap);
		css_put_many(&mc.to->css, mc.moved_swap);
4741

4742 4743
		mc.moved_swap = 0;
	}
4744 4745 4746 4747 4748 4749 4750
	memcg_oom_recover(from);
	memcg_oom_recover(to);
	wake_up_all(&mc.waitq);
}

static void mem_cgroup_clear_mc(void)
{
4751 4752
	struct mm_struct *mm = mc.mm;

4753 4754 4755 4756 4757 4758
	/*
	 * we must clear moving_task before waking up waiters at the end of
	 * task migration.
	 */
	mc.moving_task = NULL;
	__mem_cgroup_clear_mc();
4759
	spin_lock(&mc.lock);
4760 4761
	mc.from = NULL;
	mc.to = NULL;
4762
	mc.mm = NULL;
4763
	spin_unlock(&mc.lock);
4764 4765

	mmput(mm);
4766 4767
}

4768
static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
4769
{
4770
	struct cgroup_subsys_state *css;
4771
	struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */
4772
	struct mem_cgroup *from;
4773
	struct task_struct *leader, *p;
4774
	struct mm_struct *mm;
4775
	unsigned long move_flags;
4776
	int ret = 0;
4777

4778 4779
	/* charge immigration isn't supported on the default hierarchy */
	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
4780 4781
		return 0;

4782 4783 4784 4785 4786 4787 4788
	/*
	 * Multi-process migrations only happen on the default hierarchy
	 * where charge immigration is not used.  Perform charge
	 * immigration if @tset contains a leader and whine if there are
	 * multiple.
	 */
	p = NULL;
4789
	cgroup_taskset_for_each_leader(leader, css, tset) {
4790 4791
		WARN_ON_ONCE(p);
		p = leader;
4792
		memcg = mem_cgroup_from_css(css);
4793 4794 4795 4796
	}
	if (!p)
		return 0;

4797 4798 4799 4800 4801 4802 4803 4804 4805
	/*
	 * We are now commited to this value whatever it is. Changes in this
	 * tunable will only affect upcoming migrations, not the current one.
	 * So we need to save it, and keep it going.
	 */
	move_flags = READ_ONCE(memcg->move_charge_at_immigrate);
	if (!move_flags)
		return 0;

4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821
	from = mem_cgroup_from_task(p);

	VM_BUG_ON(from == memcg);

	mm = get_task_mm(p);
	if (!mm)
		return 0;
	/* We move charges only when we move a owner of the mm */
	if (mm->owner == p) {
		VM_BUG_ON(mc.from);
		VM_BUG_ON(mc.to);
		VM_BUG_ON(mc.precharge);
		VM_BUG_ON(mc.moved_charge);
		VM_BUG_ON(mc.moved_swap);

		spin_lock(&mc.lock);
4822
		mc.mm = mm;
4823 4824 4825 4826 4827 4828 4829 4830 4831
		mc.from = from;
		mc.to = memcg;
		mc.flags = move_flags;
		spin_unlock(&mc.lock);
		/* We set mc.moving_task later */

		ret = mem_cgroup_precharge_mc(mm);
		if (ret)
			mem_cgroup_clear_mc();
4832 4833
	} else {
		mmput(mm);
4834 4835 4836 4837
	}
	return ret;
}

4838
static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
4839
{
4840 4841
	if (mc.to)
		mem_cgroup_clear_mc();
4842 4843
}

4844 4845 4846
static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
				unsigned long addr, unsigned long end,
				struct mm_walk *walk)
4847
{
4848
	int ret = 0;
4849
	struct vm_area_struct *vma = walk->vma;
4850 4851
	pte_t *pte;
	spinlock_t *ptl;
4852 4853 4854
	enum mc_target_type target_type;
	union mc_target target;
	struct page *page;
4855

4856 4857
	ptl = pmd_trans_huge_lock(pmd, vma);
	if (ptl) {
4858
		if (mc.precharge < HPAGE_PMD_NR) {
4859
			spin_unlock(ptl);
4860 4861 4862 4863 4864 4865
			return 0;
		}
		target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
		if (target_type == MC_TARGET_PAGE) {
			page = target.page;
			if (!isolate_lru_page(page)) {
4866
				if (!mem_cgroup_move_account(page, true,
4867
							     mc.from, mc.to)) {
4868 4869 4870 4871 4872 4873 4874
					mc.precharge -= HPAGE_PMD_NR;
					mc.moved_charge += HPAGE_PMD_NR;
				}
				putback_lru_page(page);
			}
			put_page(page);
		}
4875
		spin_unlock(ptl);
4876
		return 0;
4877 4878
	}

4879 4880
	if (pmd_trans_unstable(pmd))
		return 0;
4881 4882 4883 4884
retry:
	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
	for (; addr != end; addr += PAGE_SIZE) {
		pte_t ptent = *(pte++);
4885
		swp_entry_t ent;
4886 4887 4888 4889

		if (!mc.precharge)
			break;

4890
		switch (get_mctgt_type(vma, addr, ptent, &target)) {
4891 4892
		case MC_TARGET_PAGE:
			page = target.page;
4893 4894 4895 4896 4897 4898 4899 4900
			/*
			 * We can have a part of the split pmd here. Moving it
			 * can be done but it would be too convoluted so simply
			 * ignore such a partial THP and keep it in original
			 * memcg. There should be somebody mapping the head.
			 */
			if (PageTransCompound(page))
				goto put;
4901 4902
			if (isolate_lru_page(page))
				goto put;
4903 4904
			if (!mem_cgroup_move_account(page, false,
						mc.from, mc.to)) {
4905
				mc.precharge--;
4906 4907
				/* we uncharge from mc.from later. */
				mc.moved_charge++;
4908 4909
			}
			putback_lru_page(page);
4910
put:			/* get_mctgt_type() gets the page */
4911 4912
			put_page(page);
			break;
4913 4914
		case MC_TARGET_SWAP:
			ent = target.ent;
4915
			if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
4916
				mc.precharge--;
4917 4918 4919
				/* we fixup refcnts and charges later. */
				mc.moved_swap++;
			}
4920
			break;
4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934
		default:
			break;
		}
	}
	pte_unmap_unlock(pte - 1, ptl);
	cond_resched();

	if (addr != end) {
		/*
		 * We have consumed all precharges we got in can_attach().
		 * We try charge one by one, but don't do any additional
		 * charges to mc.to if we have failed in charge once in attach()
		 * phase.
		 */
4935
		ret = mem_cgroup_do_precharge(1);
4936 4937 4938 4939 4940 4941 4942
		if (!ret)
			goto retry;
	}

	return ret;
}

4943
static void mem_cgroup_move_charge(void)
4944
{
4945 4946
	struct mm_walk mem_cgroup_move_charge_walk = {
		.pmd_entry = mem_cgroup_move_charge_pte_range,
4947
		.mm = mc.mm,
4948
	};
4949 4950

	lru_add_drain_all();
4951
	/*
4952 4953 4954
	 * Signal lock_page_memcg() to take the memcg's move_lock
	 * while we're moving its pages to another memcg. Then wait
	 * for already started RCU-only updates to finish.
4955 4956 4957
	 */
	atomic_inc(&mc.from->moving_account);
	synchronize_rcu();
4958
retry:
4959
	if (unlikely(!down_read_trylock(&mc.mm->mmap_sem))) {
4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970
		/*
		 * Someone who are holding the mmap_sem might be waiting in
		 * waitq. So we cancel all extra charges, wake up all waiters,
		 * and retry. Because we cancel precharges, we might not be able
		 * to move enough charges, but moving charge is a best-effort
		 * feature anyway, so it wouldn't be a big problem.
		 */
		__mem_cgroup_clear_mc();
		cond_resched();
		goto retry;
	}
4971 4972 4973 4974
	/*
	 * When we have consumed all precharges and failed in doing
	 * additional charge, the page walk just aborts.
	 */
4975 4976
	walk_page_range(0, mc.mm->highest_vm_end, &mem_cgroup_move_charge_walk);

4977
	up_read(&mc.mm->mmap_sem);
4978
	atomic_dec(&mc.from->moving_account);
4979 4980
}

4981
static void mem_cgroup_move_task(void)
B
Balbir Singh 已提交
4982
{
4983 4984
	if (mc.to) {
		mem_cgroup_move_charge();
4985
		mem_cgroup_clear_mc();
4986
	}
B
Balbir Singh 已提交
4987
}
4988
#else	/* !CONFIG_MMU */
4989
static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
4990 4991 4992
{
	return 0;
}
4993
static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
4994 4995
{
}
4996
static void mem_cgroup_move_task(void)
4997 4998 4999
{
}
#endif
B
Balbir Singh 已提交
5000

5001 5002
/*
 * Cgroup retains root cgroups across [un]mount cycles making it necessary
5003 5004
 * to verify whether we're attached to the default hierarchy on each mount
 * attempt.
5005
 */
5006
static void mem_cgroup_bind(struct cgroup_subsys_state *root_css)
5007 5008
{
	/*
5009
	 * use_hierarchy is forced on the default hierarchy.  cgroup core
5010 5011 5012
	 * guarantees that @root doesn't have any children, so turning it
	 * on for the root memcg is enough.
	 */
5013
	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
5014 5015 5016
		root_mem_cgroup->use_hierarchy = true;
	else
		root_mem_cgroup->use_hierarchy = false;
5017 5018
}

5019 5020 5021
static u64 memory_current_read(struct cgroup_subsys_state *css,
			       struct cftype *cft)
{
5022 5023 5024
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);

	return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE;
5025 5026 5027 5028 5029
}

static int memory_low_show(struct seq_file *m, void *v)
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5030
	unsigned long low = READ_ONCE(memcg->low);
5031 5032

	if (low == PAGE_COUNTER_MAX)
5033
		seq_puts(m, "max\n");
5034 5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045 5046 5047
	else
		seq_printf(m, "%llu\n", (u64)low * PAGE_SIZE);

	return 0;
}

static ssize_t memory_low_write(struct kernfs_open_file *of,
				char *buf, size_t nbytes, loff_t off)
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
	unsigned long low;
	int err;

	buf = strstrip(buf);
5048
	err = page_counter_memparse(buf, "max", &low);
5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059
	if (err)
		return err;

	memcg->low = low;

	return nbytes;
}

static int memory_high_show(struct seq_file *m, void *v)
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5060
	unsigned long high = READ_ONCE(memcg->high);
5061 5062

	if (high == PAGE_COUNTER_MAX)
5063
		seq_puts(m, "max\n");
5064 5065 5066 5067 5068 5069 5070 5071 5072 5073
	else
		seq_printf(m, "%llu\n", (u64)high * PAGE_SIZE);

	return 0;
}

static ssize_t memory_high_write(struct kernfs_open_file *of,
				 char *buf, size_t nbytes, loff_t off)
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5074
	unsigned long nr_pages;
5075 5076 5077 5078
	unsigned long high;
	int err;

	buf = strstrip(buf);
5079
	err = page_counter_memparse(buf, "max", &high);
5080 5081 5082 5083 5084
	if (err)
		return err;

	memcg->high = high;

5085 5086 5087 5088 5089
	nr_pages = page_counter_read(&memcg->memory);
	if (nr_pages > high)
		try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
					     GFP_KERNEL, true);

5090
	memcg_wb_domain_size_changed(memcg);
5091 5092 5093 5094 5095 5096
	return nbytes;
}

static int memory_max_show(struct seq_file *m, void *v)
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5097
	unsigned long max = READ_ONCE(memcg->memory.limit);
5098 5099

	if (max == PAGE_COUNTER_MAX)
5100
		seq_puts(m, "max\n");
5101 5102 5103 5104 5105 5106 5107 5108 5109 5110
	else
		seq_printf(m, "%llu\n", (u64)max * PAGE_SIZE);

	return 0;
}

static ssize_t memory_max_write(struct kernfs_open_file *of,
				char *buf, size_t nbytes, loff_t off)
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5111 5112
	unsigned int nr_reclaims = MEM_CGROUP_RECLAIM_RETRIES;
	bool drained = false;
5113 5114 5115 5116
	unsigned long max;
	int err;

	buf = strstrip(buf);
5117
	err = page_counter_memparse(buf, "max", &max);
5118 5119 5120
	if (err)
		return err;

5121 5122 5123 5124 5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135 5136 5137 5138 5139 5140 5141 5142 5143 5144 5145 5146 5147 5148 5149 5150
	xchg(&memcg->memory.limit, max);

	for (;;) {
		unsigned long nr_pages = page_counter_read(&memcg->memory);

		if (nr_pages <= max)
			break;

		if (signal_pending(current)) {
			err = -EINTR;
			break;
		}

		if (!drained) {
			drain_all_stock(memcg);
			drained = true;
			continue;
		}

		if (nr_reclaims) {
			if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
							  GFP_KERNEL, true))
				nr_reclaims--;
			continue;
		}

		mem_cgroup_events(memcg, MEMCG_OOM, 1);
		if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
			break;
	}
5151

5152
	memcg_wb_domain_size_changed(memcg);
5153 5154 5155 5156 5157 5158 5159 5160 5161 5162 5163 5164 5165 5166 5167
	return nbytes;
}

static int memory_events_show(struct seq_file *m, void *v)
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));

	seq_printf(m, "low %lu\n", mem_cgroup_read_events(memcg, MEMCG_LOW));
	seq_printf(m, "high %lu\n", mem_cgroup_read_events(memcg, MEMCG_HIGH));
	seq_printf(m, "max %lu\n", mem_cgroup_read_events(memcg, MEMCG_MAX));
	seq_printf(m, "oom %lu\n", mem_cgroup_read_events(memcg, MEMCG_OOM));

	return 0;
}

5168 5169 5170
static int memory_stat_show(struct seq_file *m, void *v)
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5171 5172
	unsigned long stat[MEMCG_NR_STAT];
	unsigned long events[MEMCG_NR_EVENTS];
5173 5174 5175 5176 5177 5178 5179 5180 5181 5182 5183 5184 5185
	int i;

	/*
	 * Provide statistics on the state of the memory subsystem as
	 * well as cumulative event counters that show past behavior.
	 *
	 * This list is ordered following a combination of these gradients:
	 * 1) generic big picture -> specifics and details
	 * 2) reflecting userspace activity -> reflecting kernel heuristics
	 *
	 * Current memory state:
	 */

5186 5187 5188
	tree_stat(memcg, stat);
	tree_events(memcg, events);

5189
	seq_printf(m, "anon %llu\n",
5190
		   (u64)stat[MEM_CGROUP_STAT_RSS] * PAGE_SIZE);
5191
	seq_printf(m, "file %llu\n",
5192
		   (u64)stat[MEM_CGROUP_STAT_CACHE] * PAGE_SIZE);
5193
	seq_printf(m, "kernel_stack %llu\n",
5194
		   (u64)stat[MEMCG_KERNEL_STACK_KB] * 1024);
5195 5196 5197
	seq_printf(m, "slab %llu\n",
		   (u64)(stat[MEMCG_SLAB_RECLAIMABLE] +
			 stat[MEMCG_SLAB_UNRECLAIMABLE]) * PAGE_SIZE);
5198
	seq_printf(m, "sock %llu\n",
5199
		   (u64)stat[MEMCG_SOCK] * PAGE_SIZE);
5200 5201

	seq_printf(m, "file_mapped %llu\n",
5202
		   (u64)stat[MEM_CGROUP_STAT_FILE_MAPPED] * PAGE_SIZE);
5203
	seq_printf(m, "file_dirty %llu\n",
5204
		   (u64)stat[MEM_CGROUP_STAT_DIRTY] * PAGE_SIZE);
5205
	seq_printf(m, "file_writeback %llu\n",
5206
		   (u64)stat[MEM_CGROUP_STAT_WRITEBACK] * PAGE_SIZE);
5207 5208 5209 5210 5211 5212 5213 5214 5215 5216 5217

	for (i = 0; i < NR_LRU_LISTS; i++) {
		struct mem_cgroup *mi;
		unsigned long val = 0;

		for_each_mem_cgroup_tree(mi, memcg)
			val += mem_cgroup_nr_lru_pages(mi, BIT(i));
		seq_printf(m, "%s %llu\n",
			   mem_cgroup_lru_names[i], (u64)val * PAGE_SIZE);
	}

5218 5219 5220 5221 5222
	seq_printf(m, "slab_reclaimable %llu\n",
		   (u64)stat[MEMCG_SLAB_RECLAIMABLE] * PAGE_SIZE);
	seq_printf(m, "slab_unreclaimable %llu\n",
		   (u64)stat[MEMCG_SLAB_UNRECLAIMABLE] * PAGE_SIZE);

5223 5224 5225
	/* Accumulated memory events */

	seq_printf(m, "pgfault %lu\n",
5226
		   events[MEM_CGROUP_EVENTS_PGFAULT]);
5227
	seq_printf(m, "pgmajfault %lu\n",
5228
		   events[MEM_CGROUP_EVENTS_PGMAJFAULT]);
5229 5230 5231 5232

	return 0;
}

5233 5234 5235
static struct cftype memory_files[] = {
	{
		.name = "current",
5236
		.flags = CFTYPE_NOT_ON_ROOT,
5237 5238 5239 5240 5241 5242 5243 5244 5245 5246 5247 5248 5249 5250 5251 5252 5253 5254 5255 5256 5257 5258 5259
		.read_u64 = memory_current_read,
	},
	{
		.name = "low",
		.flags = CFTYPE_NOT_ON_ROOT,
		.seq_show = memory_low_show,
		.write = memory_low_write,
	},
	{
		.name = "high",
		.flags = CFTYPE_NOT_ON_ROOT,
		.seq_show = memory_high_show,
		.write = memory_high_write,
	},
	{
		.name = "max",
		.flags = CFTYPE_NOT_ON_ROOT,
		.seq_show = memory_max_show,
		.write = memory_max_write,
	},
	{
		.name = "events",
		.flags = CFTYPE_NOT_ON_ROOT,
5260
		.file_offset = offsetof(struct mem_cgroup, events_file),
5261 5262
		.seq_show = memory_events_show,
	},
5263 5264 5265 5266 5267
	{
		.name = "stat",
		.flags = CFTYPE_NOT_ON_ROOT,
		.seq_show = memory_stat_show,
	},
5268 5269 5270
	{ }	/* terminate */
};

5271
struct cgroup_subsys memory_cgrp_subsys = {
5272
	.css_alloc = mem_cgroup_css_alloc,
5273
	.css_online = mem_cgroup_css_online,
5274
	.css_offline = mem_cgroup_css_offline,
5275
	.css_released = mem_cgroup_css_released,
5276
	.css_free = mem_cgroup_css_free,
5277
	.css_reset = mem_cgroup_css_reset,
5278 5279
	.can_attach = mem_cgroup_can_attach,
	.cancel_attach = mem_cgroup_cancel_attach,
5280
	.post_attach = mem_cgroup_move_task,
5281
	.bind = mem_cgroup_bind,
5282 5283
	.dfl_cftypes = memory_files,
	.legacy_cftypes = mem_cgroup_legacy_files,
5284
	.early_init = 0,
B
Balbir Singh 已提交
5285
};
5286

5287 5288 5289 5290 5291 5292 5293 5294 5295 5296 5297 5298 5299 5300 5301 5302 5303 5304 5305 5306 5307 5308
/**
 * mem_cgroup_low - check if memory consumption is below the normal range
 * @root: the highest ancestor to consider
 * @memcg: the memory cgroup to check
 *
 * Returns %true if memory consumption of @memcg, and that of all
 * configurable ancestors up to @root, is below the normal range.
 */
bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg)
{
	if (mem_cgroup_disabled())
		return false;

	/*
	 * The toplevel group doesn't have a configurable range, so
	 * it's never low when looked at directly, and it is not
	 * considered an ancestor when assessing the hierarchy.
	 */

	if (memcg == root_mem_cgroup)
		return false;

M
Michal Hocko 已提交
5309
	if (page_counter_read(&memcg->memory) >= memcg->low)
5310 5311 5312 5313 5314 5315 5316 5317
		return false;

	while (memcg != root) {
		memcg = parent_mem_cgroup(memcg);

		if (memcg == root_mem_cgroup)
			break;

M
Michal Hocko 已提交
5318
		if (page_counter_read(&memcg->memory) >= memcg->low)
5319 5320 5321 5322 5323
			return false;
	}
	return true;
}

5324 5325 5326 5327 5328 5329
/**
 * mem_cgroup_try_charge - try charging a page
 * @page: page to charge
 * @mm: mm context of the victim
 * @gfp_mask: reclaim mode
 * @memcgp: charged memcg return
5330
 * @compound: charge the page as compound or small page
5331 5332 5333 5334 5335 5336 5337 5338 5339 5340 5341 5342
 *
 * Try to charge @page to the memcg that @mm belongs to, reclaiming
 * pages according to @gfp_mask if necessary.
 *
 * Returns 0 on success, with *@memcgp pointing to the charged memcg.
 * Otherwise, an error code is returned.
 *
 * After page->mapping has been set up, the caller must finalize the
 * charge with mem_cgroup_commit_charge().  Or abort the transaction
 * with mem_cgroup_cancel_charge() in case page instantiation fails.
 */
int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
5343 5344
			  gfp_t gfp_mask, struct mem_cgroup **memcgp,
			  bool compound)
5345 5346
{
	struct mem_cgroup *memcg = NULL;
5347
	unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
5348 5349 5350 5351 5352 5353 5354 5355 5356 5357 5358 5359 5360
	int ret = 0;

	if (mem_cgroup_disabled())
		goto out;

	if (PageSwapCache(page)) {
		/*
		 * Every swap fault against a single page tries to charge the
		 * page, bail as early as possible.  shmem_unuse() encounters
		 * already charged pages, too.  The USED bit is protected by
		 * the page lock, which serializes swap cache removal, which
		 * in turn serializes uncharging.
		 */
5361
		VM_BUG_ON_PAGE(!PageLocked(page), page);
5362
		if (page->mem_cgroup)
5363
			goto out;
5364

5365
		if (do_swap_account) {
5366 5367 5368 5369 5370 5371 5372 5373 5374
			swp_entry_t ent = { .val = page_private(page), };
			unsigned short id = lookup_swap_cgroup_id(ent);

			rcu_read_lock();
			memcg = mem_cgroup_from_id(id);
			if (memcg && !css_tryget_online(&memcg->css))
				memcg = NULL;
			rcu_read_unlock();
		}
5375 5376 5377 5378 5379 5380 5381 5382 5383 5384 5385 5386 5387 5388 5389 5390 5391 5392
	}

	if (!memcg)
		memcg = get_mem_cgroup_from_mm(mm);

	ret = try_charge(memcg, gfp_mask, nr_pages);

	css_put(&memcg->css);
out:
	*memcgp = memcg;
	return ret;
}

/**
 * mem_cgroup_commit_charge - commit a page charge
 * @page: page to charge
 * @memcg: memcg to charge the page to
 * @lrucare: page might be on LRU already
5393
 * @compound: charge the page as compound or small page
5394 5395 5396 5397 5398 5399 5400 5401 5402 5403 5404 5405
 *
 * Finalize a charge transaction started by mem_cgroup_try_charge(),
 * after page->mapping has been set up.  This must happen atomically
 * as part of the page instantiation, i.e. under the page table lock
 * for anonymous pages, under the page lock for page and swap cache.
 *
 * In addition, the page must not be on the LRU during the commit, to
 * prevent racing with task migration.  If it might be, use @lrucare.
 *
 * Use mem_cgroup_cancel_charge() to cancel the transaction instead.
 */
void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
5406
			      bool lrucare, bool compound)
5407
{
5408
	unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
5409 5410 5411 5412 5413 5414 5415 5416 5417 5418 5419 5420 5421 5422

	VM_BUG_ON_PAGE(!page->mapping, page);
	VM_BUG_ON_PAGE(PageLRU(page) && !lrucare, page);

	if (mem_cgroup_disabled())
		return;
	/*
	 * Swap faults will attempt to charge the same page multiple
	 * times.  But reuse_swap_page() might have removed the page
	 * from swapcache already, so we can't check PageSwapCache().
	 */
	if (!memcg)
		return;

5423 5424 5425
	commit_charge(page, memcg, lrucare);

	local_irq_disable();
5426
	mem_cgroup_charge_statistics(memcg, page, compound, nr_pages);
5427 5428
	memcg_check_events(memcg, page);
	local_irq_enable();
5429

5430
	if (do_memsw_account() && PageSwapCache(page)) {
5431 5432 5433 5434 5435 5436 5437 5438 5439 5440 5441 5442 5443 5444
		swp_entry_t entry = { .val = page_private(page) };
		/*
		 * The swap entry might not get freed for a long time,
		 * let's not wait for it.  The page already received a
		 * memory+swap charge, drop the swap entry duplicate.
		 */
		mem_cgroup_uncharge_swap(entry);
	}
}

/**
 * mem_cgroup_cancel_charge - cancel a page charge
 * @page: page to charge
 * @memcg: memcg to charge the page to
5445
 * @compound: charge the page as compound or small page
5446 5447 5448
 *
 * Cancel a charge transaction started by mem_cgroup_try_charge().
 */
5449 5450
void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg,
		bool compound)
5451
{
5452
	unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
5453 5454 5455 5456 5457 5458 5459 5460 5461 5462 5463 5464 5465 5466

	if (mem_cgroup_disabled())
		return;
	/*
	 * Swap faults will attempt to charge the same page multiple
	 * times.  But reuse_swap_page() might have removed the page
	 * from swapcache already, so we can't check PageSwapCache().
	 */
	if (!memcg)
		return;

	cancel_charge(memcg, nr_pages);
}

5467 5468
static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout,
			   unsigned long nr_anon, unsigned long nr_file,
5469 5470
			   unsigned long nr_huge, unsigned long nr_kmem,
			   struct page *dummy_page)
5471
{
5472
	unsigned long nr_pages = nr_anon + nr_file + nr_kmem;
5473 5474
	unsigned long flags;

5475
	if (!mem_cgroup_is_root(memcg)) {
5476
		page_counter_uncharge(&memcg->memory, nr_pages);
5477
		if (do_memsw_account())
5478
			page_counter_uncharge(&memcg->memsw, nr_pages);
5479 5480
		if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && nr_kmem)
			page_counter_uncharge(&memcg->kmem, nr_kmem);
5481 5482
		memcg_oom_recover(memcg);
	}
5483 5484 5485 5486 5487 5488

	local_irq_save(flags);
	__this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS], nr_anon);
	__this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_CACHE], nr_file);
	__this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], nr_huge);
	__this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT], pgpgout);
5489
	__this_cpu_add(memcg->stat->nr_page_events, nr_pages);
5490 5491
	memcg_check_events(memcg, dummy_page);
	local_irq_restore(flags);
5492 5493

	if (!mem_cgroup_is_root(memcg))
5494
		css_put_many(&memcg->css, nr_pages);
5495 5496 5497 5498 5499 5500 5501 5502
}

static void uncharge_list(struct list_head *page_list)
{
	struct mem_cgroup *memcg = NULL;
	unsigned long nr_anon = 0;
	unsigned long nr_file = 0;
	unsigned long nr_huge = 0;
5503
	unsigned long nr_kmem = 0;
5504 5505 5506 5507
	unsigned long pgpgout = 0;
	struct list_head *next;
	struct page *page;

5508 5509 5510 5511
	/*
	 * Note that the list can be a single page->lru; hence the
	 * do-while loop instead of a simple list_for_each_entry().
	 */
5512 5513 5514 5515 5516 5517 5518 5519
	next = page_list->next;
	do {
		page = list_entry(next, struct page, lru);
		next = page->lru.next;

		VM_BUG_ON_PAGE(PageLRU(page), page);
		VM_BUG_ON_PAGE(page_count(page), page);

5520
		if (!page->mem_cgroup)
5521 5522 5523 5524
			continue;

		/*
		 * Nobody should be changing or seriously looking at
5525
		 * page->mem_cgroup at this point, we have fully
5526
		 * exclusive access to the page.
5527 5528
		 */

5529
		if (memcg != page->mem_cgroup) {
5530
			if (memcg) {
5531
				uncharge_batch(memcg, pgpgout, nr_anon, nr_file,
5532 5533 5534
					       nr_huge, nr_kmem, page);
				pgpgout = nr_anon = nr_file =
					nr_huge = nr_kmem = 0;
5535
			}
5536
			memcg = page->mem_cgroup;
5537 5538
		}

5539 5540
		if (!PageKmemcg(page)) {
			unsigned int nr_pages = 1;
5541

5542 5543 5544 5545 5546 5547 5548 5549 5550
			if (PageTransHuge(page)) {
				nr_pages <<= compound_order(page);
				nr_huge += nr_pages;
			}
			if (PageAnon(page))
				nr_anon += nr_pages;
			else
				nr_file += nr_pages;
			pgpgout++;
5551
		} else {
5552
			nr_kmem += 1 << compound_order(page);
5553 5554
			__ClearPageKmemcg(page);
		}
5555

5556
		page->mem_cgroup = NULL;
5557 5558 5559
	} while (next != page_list);

	if (memcg)
5560
		uncharge_batch(memcg, pgpgout, nr_anon, nr_file,
5561
			       nr_huge, nr_kmem, page);
5562 5563
}

5564 5565 5566 5567 5568 5569 5570 5571 5572 5573 5574 5575
/**
 * mem_cgroup_uncharge - uncharge a page
 * @page: page to uncharge
 *
 * Uncharge a page previously charged with mem_cgroup_try_charge() and
 * mem_cgroup_commit_charge().
 */
void mem_cgroup_uncharge(struct page *page)
{
	if (mem_cgroup_disabled())
		return;

5576
	/* Don't touch page->lru of any random page, pre-check: */
5577
	if (!page->mem_cgroup)
5578 5579
		return;

5580 5581 5582
	INIT_LIST_HEAD(&page->lru);
	uncharge_list(&page->lru);
}
5583

5584 5585 5586 5587 5588 5589 5590 5591 5592 5593 5594
/**
 * mem_cgroup_uncharge_list - uncharge a list of page
 * @page_list: list of pages to uncharge
 *
 * Uncharge a list of pages previously charged with
 * mem_cgroup_try_charge() and mem_cgroup_commit_charge().
 */
void mem_cgroup_uncharge_list(struct list_head *page_list)
{
	if (mem_cgroup_disabled())
		return;
5595

5596 5597
	if (!list_empty(page_list))
		uncharge_list(page_list);
5598 5599 5600
}

/**
5601 5602 5603
 * mem_cgroup_migrate - charge a page's replacement
 * @oldpage: currently circulating page
 * @newpage: replacement page
5604
 *
5605 5606
 * Charge @newpage as a replacement page for @oldpage. @oldpage will
 * be uncharged upon free.
5607 5608 5609
 *
 * Both pages must be locked, @newpage->mapping must be set up.
 */
5610
void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
5611
{
5612
	struct mem_cgroup *memcg;
5613 5614
	unsigned int nr_pages;
	bool compound;
5615
	unsigned long flags;
5616 5617 5618 5619

	VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
	VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
	VM_BUG_ON_PAGE(PageAnon(oldpage) != PageAnon(newpage), newpage);
5620 5621
	VM_BUG_ON_PAGE(PageTransHuge(oldpage) != PageTransHuge(newpage),
		       newpage);
5622 5623 5624 5625 5626

	if (mem_cgroup_disabled())
		return;

	/* Page cache replacement: new page already charged? */
5627
	if (newpage->mem_cgroup)
5628 5629
		return;

5630
	/* Swapcache readahead pages can get replaced before being charged */
5631
	memcg = oldpage->mem_cgroup;
5632
	if (!memcg)
5633 5634
		return;

5635 5636 5637 5638 5639 5640 5641 5642
	/* Force-charge the new page. The old one will be freed soon */
	compound = PageTransHuge(newpage);
	nr_pages = compound ? hpage_nr_pages(newpage) : 1;

	page_counter_charge(&memcg->memory, nr_pages);
	if (do_memsw_account())
		page_counter_charge(&memcg->memsw, nr_pages);
	css_get_many(&memcg->css, nr_pages);
5643

5644
	commit_charge(newpage, memcg, false);
5645

5646
	local_irq_save(flags);
5647 5648
	mem_cgroup_charge_statistics(memcg, newpage, compound, nr_pages);
	memcg_check_events(memcg, newpage);
5649
	local_irq_restore(flags);
5650 5651
}

5652
DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
5653 5654
EXPORT_SYMBOL(memcg_sockets_enabled_key);

5655
void mem_cgroup_sk_alloc(struct sock *sk)
5656 5657 5658
{
	struct mem_cgroup *memcg;

5659 5660 5661 5662 5663
	if (!mem_cgroup_sockets_enabled)
		return;

	/*
	 * Socket cloning can throw us here with sk_memcg already
5664 5665 5666 5667 5668 5669 5670 5671 5672 5673 5674 5675 5676 5677 5678
	 * filled. It won't however, necessarily happen from
	 * process context. So the test for root memcg given
	 * the current task's memcg won't help us in this case.
	 *
	 * Respecting the original socket's memcg is a better
	 * decision in this case.
	 */
	if (sk->sk_memcg) {
		BUG_ON(mem_cgroup_is_root(sk->sk_memcg));
		css_get(&sk->sk_memcg->css);
		return;
	}

	rcu_read_lock();
	memcg = mem_cgroup_from_task(current);
5679 5680
	if (memcg == root_mem_cgroup)
		goto out;
5681
	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active)
5682 5683
		goto out;
	if (css_tryget_online(&memcg->css))
5684
		sk->sk_memcg = memcg;
5685
out:
5686 5687 5688
	rcu_read_unlock();
}

5689
void mem_cgroup_sk_free(struct sock *sk)
5690
{
5691 5692
	if (sk->sk_memcg)
		css_put(&sk->sk_memcg->css);
5693 5694 5695 5696 5697 5698 5699 5700 5701 5702 5703 5704
}

/**
 * mem_cgroup_charge_skmem - charge socket memory
 * @memcg: memcg to charge
 * @nr_pages: number of pages to charge
 *
 * Charges @nr_pages to @memcg. Returns %true if the charge fit within
 * @memcg's configured limit, %false if the charge had to be forced.
 */
bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
{
5705
	gfp_t gfp_mask = GFP_KERNEL;
5706

5707
	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
5708
		struct page_counter *fail;
5709

5710 5711
		if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) {
			memcg->tcpmem_pressure = 0;
5712 5713
			return true;
		}
5714 5715
		page_counter_charge(&memcg->tcpmem, nr_pages);
		memcg->tcpmem_pressure = 1;
5716
		return false;
5717
	}
5718

5719 5720 5721 5722
	/* Don't block in the packet receive path */
	if (in_softirq())
		gfp_mask = GFP_NOWAIT;

5723 5724
	this_cpu_add(memcg->stat->count[MEMCG_SOCK], nr_pages);

5725 5726 5727 5728
	if (try_charge(memcg, gfp_mask, nr_pages) == 0)
		return true;

	try_charge(memcg, gfp_mask|__GFP_NOFAIL, nr_pages);
5729 5730 5731 5732 5733 5734 5735 5736 5737 5738
	return false;
}

/**
 * mem_cgroup_uncharge_skmem - uncharge socket memory
 * @memcg - memcg to uncharge
 * @nr_pages - number of pages to uncharge
 */
void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
{
5739
	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
5740
		page_counter_uncharge(&memcg->tcpmem, nr_pages);
5741 5742
		return;
	}
5743

5744 5745
	this_cpu_sub(memcg->stat->count[MEMCG_SOCK], nr_pages);

5746 5747
	page_counter_uncharge(&memcg->memory, nr_pages);
	css_put_many(&memcg->css, nr_pages);
5748 5749
}

5750 5751 5752 5753 5754 5755 5756 5757 5758
static int __init cgroup_memory(char *s)
{
	char *token;

	while ((token = strsep(&s, ",")) != NULL) {
		if (!*token)
			continue;
		if (!strcmp(token, "nosocket"))
			cgroup_memory_nosocket = true;
5759 5760
		if (!strcmp(token, "nokmem"))
			cgroup_memory_nokmem = true;
5761 5762 5763 5764
	}
	return 0;
}
__setup("cgroup.memory=", cgroup_memory);
5765

5766
/*
5767 5768
 * subsys_initcall() for memory controller.
 *
5769 5770 5771 5772
 * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this
 * context because of lock dependencies (cgroup_lock -> cpu hotplug) but
 * basically everything that doesn't depend on a specific mem_cgroup structure
 * should be initialized from here.
5773 5774 5775
 */
static int __init mem_cgroup_init(void)
{
5776 5777
	int cpu, node;

5778 5779 5780 5781 5782 5783 5784 5785 5786 5787 5788
#ifndef CONFIG_SLOB
	/*
	 * Kmem cache creation is mostly done with the slab_mutex held,
	 * so use a special workqueue to avoid stalling all worker
	 * threads in case lots of cgroups are created simultaneously.
	 */
	memcg_kmem_cache_create_wq =
		alloc_ordered_workqueue("memcg_kmem_cache_create", 0);
	BUG_ON(!memcg_kmem_cache_create_wq);
#endif

5789 5790
	cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
				  memcg_hotplug_cpu_dead);
5791 5792 5793 5794 5795 5796 5797 5798 5799 5800 5801

	for_each_possible_cpu(cpu)
		INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
			  drain_local_stock);

	for_each_node(node) {
		struct mem_cgroup_tree_per_node *rtpn;

		rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL,
				    node_online(node) ? node : NUMA_NO_NODE);

5802 5803
		rtpn->rb_root = RB_ROOT;
		spin_lock_init(&rtpn->lock);
5804 5805 5806
		soft_limit_tree.rb_tree_per_node[node] = rtpn;
	}

5807 5808 5809
	return 0;
}
subsys_initcall(mem_cgroup_init);
5810 5811

#ifdef CONFIG_MEMCG_SWAP
5812 5813 5814 5815 5816 5817 5818 5819 5820 5821 5822 5823 5824 5825 5826 5827 5828 5829
static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg)
{
	while (!atomic_inc_not_zero(&memcg->id.ref)) {
		/*
		 * The root cgroup cannot be destroyed, so it's refcount must
		 * always be >= 1.
		 */
		if (WARN_ON_ONCE(memcg == root_mem_cgroup)) {
			VM_BUG_ON(1);
			break;
		}
		memcg = parent_mem_cgroup(memcg);
		if (!memcg)
			memcg = root_mem_cgroup;
	}
	return memcg;
}

5830 5831 5832 5833 5834 5835 5836 5837 5838
/**
 * mem_cgroup_swapout - transfer a memsw charge to swap
 * @page: page whose memsw charge to transfer
 * @entry: swap entry to move the charge to
 *
 * Transfer the memsw charge of @page to @entry.
 */
void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
{
5839
	struct mem_cgroup *memcg, *swap_memcg;
5840 5841 5842 5843 5844
	unsigned short oldid;

	VM_BUG_ON_PAGE(PageLRU(page), page);
	VM_BUG_ON_PAGE(page_count(page), page);

5845
	if (!do_memsw_account())
5846 5847 5848 5849 5850 5851 5852 5853
		return;

	memcg = page->mem_cgroup;

	/* Readahead page, never charged */
	if (!memcg)
		return;

5854 5855 5856 5857 5858 5859 5860
	/*
	 * In case the memcg owning these pages has been offlined and doesn't
	 * have an ID allocated to it anymore, charge the closest online
	 * ancestor for the swap instead and transfer the memory+swap charge.
	 */
	swap_memcg = mem_cgroup_id_get_online(memcg);
	oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg));
5861
	VM_BUG_ON_PAGE(oldid, page);
5862
	mem_cgroup_swap_statistics(swap_memcg, true);
5863 5864 5865 5866 5867 5868

	page->mem_cgroup = NULL;

	if (!mem_cgroup_is_root(memcg))
		page_counter_uncharge(&memcg->memory, 1);

5869 5870 5871 5872 5873 5874
	if (memcg != swap_memcg) {
		if (!mem_cgroup_is_root(swap_memcg))
			page_counter_charge(&swap_memcg->memsw, 1);
		page_counter_uncharge(&memcg->memsw, 1);
	}

5875 5876 5877 5878 5879 5880 5881
	/*
	 * Interrupts should be disabled here because the caller holds the
	 * mapping->tree_lock lock which is taken with interrupts-off. It is
	 * important here to have the interrupts disabled because it is the
	 * only synchronisation we have for udpating the per-CPU variables.
	 */
	VM_BUG_ON(!irqs_disabled());
5882
	mem_cgroup_charge_statistics(memcg, page, false, -1);
5883
	memcg_check_events(memcg, page);
5884 5885 5886

	if (!mem_cgroup_is_root(memcg))
		css_put(&memcg->css);
5887 5888
}

5889 5890 5891 5892 5893 5894 5895 5896 5897 5898 5899 5900 5901 5902 5903 5904 5905 5906 5907 5908 5909 5910 5911 5912
/*
 * mem_cgroup_try_charge_swap - try charging a swap entry
 * @page: page being added to swap
 * @entry: swap entry to charge
 *
 * Try to charge @entry to the memcg that @page belongs to.
 *
 * Returns 0 on success, -ENOMEM on failure.
 */
int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
{
	struct mem_cgroup *memcg;
	struct page_counter *counter;
	unsigned short oldid;

	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) || !do_swap_account)
		return 0;

	memcg = page->mem_cgroup;

	/* Readahead page, never charged */
	if (!memcg)
		return 0;

5913 5914
	memcg = mem_cgroup_id_get_online(memcg);

5915
	if (!mem_cgroup_is_root(memcg) &&
5916 5917
	    !page_counter_try_charge(&memcg->swap, 1, &counter)) {
		mem_cgroup_id_put(memcg);
5918
		return -ENOMEM;
5919
	}
5920 5921 5922 5923 5924 5925 5926 5927

	oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
	VM_BUG_ON_PAGE(oldid, page);
	mem_cgroup_swap_statistics(memcg, true);

	return 0;
}

5928 5929 5930 5931
/**
 * mem_cgroup_uncharge_swap - uncharge a swap entry
 * @entry: swap entry to uncharge
 *
5932
 * Drop the swap charge associated with @entry.
5933 5934 5935 5936 5937 5938
 */
void mem_cgroup_uncharge_swap(swp_entry_t entry)
{
	struct mem_cgroup *memcg;
	unsigned short id;

5939
	if (!do_swap_account)
5940 5941 5942 5943
		return;

	id = swap_cgroup_record(entry, 0);
	rcu_read_lock();
5944
	memcg = mem_cgroup_from_id(id);
5945
	if (memcg) {
5946 5947 5948 5949 5950 5951
		if (!mem_cgroup_is_root(memcg)) {
			if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
				page_counter_uncharge(&memcg->swap, 1);
			else
				page_counter_uncharge(&memcg->memsw, 1);
		}
5952
		mem_cgroup_swap_statistics(memcg, false);
5953
		mem_cgroup_id_put(memcg);
5954 5955 5956 5957
	}
	rcu_read_unlock();
}

5958 5959 5960 5961 5962 5963 5964 5965 5966 5967 5968 5969 5970
long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
{
	long nr_swap_pages = get_nr_swap_pages();

	if (!do_swap_account || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
		return nr_swap_pages;
	for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
		nr_swap_pages = min_t(long, nr_swap_pages,
				      READ_ONCE(memcg->swap.limit) -
				      page_counter_read(&memcg->swap));
	return nr_swap_pages;
}

5971 5972 5973 5974 5975 5976 5977 5978 5979 5980 5981 5982 5983 5984 5985 5986 5987 5988 5989 5990 5991 5992
bool mem_cgroup_swap_full(struct page *page)
{
	struct mem_cgroup *memcg;

	VM_BUG_ON_PAGE(!PageLocked(page), page);

	if (vm_swap_full())
		return true;
	if (!do_swap_account || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
		return false;

	memcg = page->mem_cgroup;
	if (!memcg)
		return false;

	for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
		if (page_counter_read(&memcg->swap) * 2 >= memcg->swap.limit)
			return true;

	return false;
}

5993 5994 5995 5996 5997 5998 5999 6000 6001 6002 6003 6004 6005 6006 6007 6008 6009
/* for remember boot option*/
#ifdef CONFIG_MEMCG_SWAP_ENABLED
static int really_do_swap_account __initdata = 1;
#else
static int really_do_swap_account __initdata;
#endif

static int __init enable_swap_account(char *s)
{
	if (!strcmp(s, "1"))
		really_do_swap_account = 1;
	else if (!strcmp(s, "0"))
		really_do_swap_account = 0;
	return 1;
}
__setup("swapaccount=", enable_swap_account);

6010 6011 6012 6013 6014 6015 6016 6017 6018 6019 6020 6021 6022 6023 6024 6025 6026 6027 6028 6029 6030 6031 6032 6033 6034 6035 6036 6037 6038 6039 6040 6041 6042 6043 6044 6045 6046 6047 6048 6049 6050 6051 6052 6053 6054 6055 6056 6057 6058 6059 6060 6061 6062 6063 6064 6065 6066
static u64 swap_current_read(struct cgroup_subsys_state *css,
			     struct cftype *cft)
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);

	return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE;
}

static int swap_max_show(struct seq_file *m, void *v)
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
	unsigned long max = READ_ONCE(memcg->swap.limit);

	if (max == PAGE_COUNTER_MAX)
		seq_puts(m, "max\n");
	else
		seq_printf(m, "%llu\n", (u64)max * PAGE_SIZE);

	return 0;
}

static ssize_t swap_max_write(struct kernfs_open_file *of,
			      char *buf, size_t nbytes, loff_t off)
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
	unsigned long max;
	int err;

	buf = strstrip(buf);
	err = page_counter_memparse(buf, "max", &max);
	if (err)
		return err;

	mutex_lock(&memcg_limit_mutex);
	err = page_counter_limit(&memcg->swap, max);
	mutex_unlock(&memcg_limit_mutex);
	if (err)
		return err;

	return nbytes;
}

static struct cftype swap_files[] = {
	{
		.name = "swap.current",
		.flags = CFTYPE_NOT_ON_ROOT,
		.read_u64 = swap_current_read,
	},
	{
		.name = "swap.max",
		.flags = CFTYPE_NOT_ON_ROOT,
		.seq_show = swap_max_show,
		.write = swap_max_write,
	},
	{ }	/* terminate */
};

6067 6068 6069 6070 6071 6072 6073 6074 6075 6076 6077 6078 6079 6080 6081 6082 6083 6084 6085 6086 6087 6088 6089 6090 6091 6092 6093 6094 6095 6096 6097
static struct cftype memsw_cgroup_files[] = {
	{
		.name = "memsw.usage_in_bytes",
		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
		.read_u64 = mem_cgroup_read_u64,
	},
	{
		.name = "memsw.max_usage_in_bytes",
		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
		.write = mem_cgroup_reset,
		.read_u64 = mem_cgroup_read_u64,
	},
	{
		.name = "memsw.limit_in_bytes",
		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
		.write = mem_cgroup_write,
		.read_u64 = mem_cgroup_read_u64,
	},
	{
		.name = "memsw.failcnt",
		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
		.write = mem_cgroup_reset,
		.read_u64 = mem_cgroup_read_u64,
	},
	{ },	/* terminate */
};

static int __init mem_cgroup_swap_init(void)
{
	if (!mem_cgroup_disabled() && really_do_swap_account) {
		do_swap_account = 1;
6098 6099
		WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys,
					       swap_files));
6100 6101 6102 6103 6104 6105 6106 6107
		WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys,
						  memsw_cgroup_files));
	}
	return 0;
}
subsys_initcall(mem_cgroup_swap_init);

#endif /* CONFIG_MEMCG_SWAP */