memcontrol.c 152.9 KB
Newer Older
B
Balbir Singh 已提交
1 2 3 4 5
/* memcontrol.c - Memory Controller
 *
 * Copyright IBM Corporation, 2007
 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
 *
6 7 8
 * Copyright 2007 OpenVZ SWsoft Inc
 * Author: Pavel Emelianov <xemul@openvz.org>
 *
9 10 11 12
 * Memory thresholds
 * Copyright (C) 2009 Nokia Corporation
 * Author: Kirill A. Shutemov
 *
13 14 15 16
 * Kernel Memory Controller
 * Copyright (C) 2012 Parallels Inc. and Google Inc.
 * Authors: Glauber Costa and Suleiman Souhlal
 *
17 18 19 20 21 22
 * Native page reclaim
 * Charge lifetime sanitation
 * Lockless page tracking & accounting
 * Unified hierarchy configuration model
 * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner
 *
B
Balbir Singh 已提交
23 24 25 26 27 28 29 30 31 32 33
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 */

34
#include <linux/page_counter.h>
B
Balbir Singh 已提交
35 36
#include <linux/memcontrol.h>
#include <linux/cgroup.h>
37
#include <linux/mm.h>
38
#include <linux/hugetlb.h>
K
KAMEZAWA Hiroyuki 已提交
39
#include <linux/pagemap.h>
40
#include <linux/smp.h>
41
#include <linux/page-flags.h>
42
#include <linux/backing-dev.h>
43 44
#include <linux/bit_spinlock.h>
#include <linux/rcupdate.h>
45
#include <linux/limits.h>
46
#include <linux/export.h>
47
#include <linux/mutex.h>
48
#include <linux/rbtree.h>
49
#include <linux/slab.h>
50
#include <linux/swap.h>
51
#include <linux/swapops.h>
52
#include <linux/spinlock.h>
53
#include <linux/eventfd.h>
54
#include <linux/poll.h>
55
#include <linux/sort.h>
56
#include <linux/fs.h>
57
#include <linux/seq_file.h>
58
#include <linux/vmpressure.h>
59
#include <linux/mm_inline.h>
60
#include <linux/swap_cgroup.h>
61
#include <linux/cpu.h>
62
#include <linux/oom.h>
63
#include <linux/lockdep.h>
64
#include <linux/file.h>
65
#include <linux/tracehook.h>
K
KAMEZAWA Hiroyuki 已提交
66
#include "internal.h"
G
Glauber Costa 已提交
67
#include <net/sock.h>
M
Michal Hocko 已提交
68
#include <net/ip.h>
69
#include "slab.h"
B
Balbir Singh 已提交
70

71 72
#include <asm/uaccess.h>

73 74
#include <trace/events/vmscan.h>

75 76
struct cgroup_subsys memory_cgrp_subsys __read_mostly;
EXPORT_SYMBOL(memory_cgrp_subsys);
77

78 79
struct mem_cgroup *root_mem_cgroup __read_mostly;

80
#define MEM_CGROUP_RECLAIM_RETRIES	5
B
Balbir Singh 已提交
81

82 83 84
/* Socket memory accounting disabled? */
static bool cgroup_memory_nosocket;

85 86 87
/* Kernel memory accounting disabled? */
static bool cgroup_memory_nokmem;

88
/* Whether the swap controller is active */
A
Andrew Morton 已提交
89
#ifdef CONFIG_MEMCG_SWAP
90 91
int do_swap_account __read_mostly;
#else
92
#define do_swap_account		0
93 94
#endif

95 96 97 98 99 100
/* Whether legacy memory+swap accounting is active */
static bool do_memsw_account(void)
{
	return !cgroup_subsys_on_dfl(memory_cgrp_subsys) && do_swap_account;
}

101 102 103
static const char * const mem_cgroup_stat_names[] = {
	"cache",
	"rss",
104
	"rss_huge",
105
	"mapped_file",
106
	"dirty",
107
	"writeback",
108 109 110 111 112 113 114 115 116 117
	"swap",
};

static const char * const mem_cgroup_events_names[] = {
	"pgpgin",
	"pgpgout",
	"pgfault",
	"pgmajfault",
};

118 119 120 121 122 123 124 125
static const char * const mem_cgroup_lru_names[] = {
	"inactive_anon",
	"active_anon",
	"inactive_file",
	"active_file",
	"unevictable",
};

126 127 128
#define THRESHOLDS_EVENTS_TARGET 128
#define SOFTLIMIT_EVENTS_TARGET 1024
#define NUMAINFO_EVENTS_TARGET	1024
129

130 131 132 133 134
/*
 * Cgroups above their limits are maintained in a RB-Tree, independent of
 * their hierarchy representation
 */

135
struct mem_cgroup_tree_per_node {
136 137 138 139 140 141 142 143 144 145
	struct rb_root rb_root;
	spinlock_t lock;
};

struct mem_cgroup_tree {
	struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
};

static struct mem_cgroup_tree soft_limit_tree __read_mostly;

K
KAMEZAWA Hiroyuki 已提交
146 147 148 149 150
/* for OOM */
struct mem_cgroup_eventfd_list {
	struct list_head list;
	struct eventfd_ctx *eventfd;
};
151

152 153 154
/*
 * cgroup_event represents events which userspace want to receive.
 */
155
struct mem_cgroup_event {
156
	/*
157
	 * memcg which the event belongs to.
158
	 */
159
	struct mem_cgroup *memcg;
160 161 162 163 164 165 166 167
	/*
	 * eventfd to signal userspace about the event.
	 */
	struct eventfd_ctx *eventfd;
	/*
	 * Each of these stored in a list by the cgroup.
	 */
	struct list_head list;
168 169 170 171 172
	/*
	 * register_event() callback will be used to add new userspace
	 * waiter for changes related to this event.  Use eventfd_signal()
	 * on eventfd to send notification to userspace.
	 */
173
	int (*register_event)(struct mem_cgroup *memcg,
T
Tejun Heo 已提交
174
			      struct eventfd_ctx *eventfd, const char *args);
175 176 177 178 179
	/*
	 * unregister_event() callback will be called when userspace closes
	 * the eventfd or on cgroup removing.  This callback must be set,
	 * if you want provide notification functionality.
	 */
180
	void (*unregister_event)(struct mem_cgroup *memcg,
181
				 struct eventfd_ctx *eventfd);
182 183 184 185 186 187 188 189 190 191
	/*
	 * All fields below needed to unregister event when
	 * userspace closes eventfd.
	 */
	poll_table pt;
	wait_queue_head_t *wqh;
	wait_queue_t wait;
	struct work_struct remove;
};

192 193
static void mem_cgroup_threshold(struct mem_cgroup *memcg);
static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
194

195 196
/* Stuffs for move charges at task migration. */
/*
197
 * Types of charges to be moved.
198
 */
199 200 201
#define MOVE_ANON	0x1U
#define MOVE_FILE	0x2U
#define MOVE_MASK	(MOVE_ANON | MOVE_FILE)
202

203 204
/* "mc" and its members are protected by cgroup_mutex */
static struct move_charge_struct {
205
	spinlock_t	  lock; /* for from, to */
206
	struct mm_struct  *mm;
207 208
	struct mem_cgroup *from;
	struct mem_cgroup *to;
209
	unsigned long flags;
210
	unsigned long precharge;
211
	unsigned long moved_charge;
212
	unsigned long moved_swap;
213 214 215
	struct task_struct *moving_task;	/* a task moving charges */
	wait_queue_head_t waitq;		/* a waitq for other context */
} mc = {
216
	.lock = __SPIN_LOCK_UNLOCKED(mc.lock),
217 218
	.waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
};
219

220 221 222 223
/*
 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
 * limit reclaim to prevent infinite loops, if they ever occur.
 */
224
#define	MEM_CGROUP_MAX_RECLAIM_LOOPS		100
225
#define	MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS	2
226

227 228
enum charge_type {
	MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
229
	MEM_CGROUP_CHARGE_TYPE_ANON,
K
KAMEZAWA Hiroyuki 已提交
230
	MEM_CGROUP_CHARGE_TYPE_SWAPOUT,	/* for accounting swapcache */
K
KAMEZAWA Hiroyuki 已提交
231
	MEM_CGROUP_CHARGE_TYPE_DROP,	/* a page was unused swap cache */
232 233 234
	NR_CHARGE_TYPE,
};

235
/* for encoding cft->private value on file */
G
Glauber Costa 已提交
236 237 238 239
enum res_type {
	_MEM,
	_MEMSWAP,
	_OOM_TYPE,
240
	_KMEM,
V
Vladimir Davydov 已提交
241
	_TCP,
G
Glauber Costa 已提交
242 243
};

244 245
#define MEMFILE_PRIVATE(x, val)	((x) << 16 | (val))
#define MEMFILE_TYPE(val)	((val) >> 16 & 0xffff)
246
#define MEMFILE_ATTR(val)	((val) & 0xffff)
K
KAMEZAWA Hiroyuki 已提交
247 248
/* Used for OOM nofiier */
#define OOM_CONTROL		(0)
249

250 251 252 253 254 255 256 257 258 259 260 261 262
/* Some nice accessors for the vmpressure. */
struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
{
	if (!memcg)
		memcg = root_mem_cgroup;
	return &memcg->vmpressure;
}

struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr)
{
	return &container_of(vmpr, struct mem_cgroup, vmpressure)->css;
}

263 264 265 266 267
static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
{
	return (memcg == root_mem_cgroup);
}

268
#ifndef CONFIG_SLOB
269
/*
270
 * This will be the memcg's index in each cache's ->memcg_params.memcg_caches.
L
Li Zefan 已提交
271 272 273 274 275
 * The main reason for not using cgroup id for this:
 *  this works better in sparse environments, where we have a lot of memcgs,
 *  but only a few kmem-limited. Or also, if we have, for instance, 200
 *  memcgs, and none but the 200th is kmem-limited, we'd have to have a
 *  200 entry array for that.
276
 *
277 278
 * The current size of the caches array is stored in memcg_nr_cache_ids. It
 * will double each time we have to increase it.
279
 */
280 281
static DEFINE_IDA(memcg_cache_ida);
int memcg_nr_cache_ids;
282

283 284 285 286 287 288 289 290 291 292 293 294 295
/* Protects memcg_nr_cache_ids */
static DECLARE_RWSEM(memcg_cache_ids_sem);

void memcg_get_cache_ids(void)
{
	down_read(&memcg_cache_ids_sem);
}

void memcg_put_cache_ids(void)
{
	up_read(&memcg_cache_ids_sem);
}

296 297 298 299 300 301
/*
 * MIN_SIZE is different than 1, because we would like to avoid going through
 * the alloc/free process all the time. In a small machine, 4 kmem-limited
 * cgroups is a reasonable guess. In the future, it could be a parameter or
 * tunable, but that is strictly not necessary.
 *
L
Li Zefan 已提交
302
 * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get
303 304
 * this constant directly from cgroup, but it is understandable that this is
 * better kept as an internal representation in cgroup.c. In any case, the
L
Li Zefan 已提交
305
 * cgrp_id space is not getting any smaller, and we don't have to necessarily
306 307 308
 * increase ours as well if it increases.
 */
#define MEMCG_CACHES_MIN_SIZE 4
L
Li Zefan 已提交
309
#define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX
310

311 312 313 314 315 316
/*
 * A lot of the calls to the cache allocation functions are expected to be
 * inlined by the compiler. Since the calls to memcg_kmem_get_cache are
 * conditional to this static branch, we'll have to allow modules that does
 * kmem_cache_alloc and the such to see this symbol as well
 */
317
DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key);
318
EXPORT_SYMBOL(memcg_kmem_enabled_key);
319

320
#endif /* !CONFIG_SLOB */
321

322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338
/**
 * mem_cgroup_css_from_page - css of the memcg associated with a page
 * @page: page of interest
 *
 * If memcg is bound to the default hierarchy, css of the memcg associated
 * with @page is returned.  The returned css remains associated with @page
 * until it is released.
 *
 * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup
 * is returned.
 */
struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page)
{
	struct mem_cgroup *memcg;

	memcg = page->mem_cgroup;

339
	if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
340 341 342 343 344
		memcg = root_mem_cgroup;

	return &memcg->css;
}

345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372
/**
 * page_cgroup_ino - return inode number of the memcg a page is charged to
 * @page: the page
 *
 * Look up the closest online ancestor of the memory cgroup @page is charged to
 * and return its inode number or 0 if @page is not charged to any cgroup. It
 * is safe to call this function without holding a reference to @page.
 *
 * Note, this function is inherently racy, because there is nothing to prevent
 * the cgroup inode from getting torn down and potentially reallocated a moment
 * after page_cgroup_ino() returns, so it only should be used by callers that
 * do not care (such as procfs interfaces).
 */
ino_t page_cgroup_ino(struct page *page)
{
	struct mem_cgroup *memcg;
	unsigned long ino = 0;

	rcu_read_lock();
	memcg = READ_ONCE(page->mem_cgroup);
	while (memcg && !(memcg->css.flags & CSS_ONLINE))
		memcg = parent_mem_cgroup(memcg);
	if (memcg)
		ino = cgroup_ino(memcg->css.cgroup);
	rcu_read_unlock();
	return ino;
}

373 374
static struct mem_cgroup_per_node *
mem_cgroup_page_nodeinfo(struct mem_cgroup *memcg, struct page *page)
375
{
376
	int nid = page_to_nid(page);
377

378
	return memcg->nodeinfo[nid];
379 380
}

381 382
static struct mem_cgroup_tree_per_node *
soft_limit_tree_node(int nid)
383
{
384
	return soft_limit_tree.rb_tree_per_node[nid];
385 386
}

387
static struct mem_cgroup_tree_per_node *
388 389 390 391
soft_limit_tree_from_page(struct page *page)
{
	int nid = page_to_nid(page);

392
	return soft_limit_tree.rb_tree_per_node[nid];
393 394
}

395 396
static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_node *mz,
					 struct mem_cgroup_tree_per_node *mctz,
397
					 unsigned long new_usage_in_excess)
398 399 400
{
	struct rb_node **p = &mctz->rb_root.rb_node;
	struct rb_node *parent = NULL;
401
	struct mem_cgroup_per_node *mz_node;
402 403 404 405 406 407 408 409 410

	if (mz->on_tree)
		return;

	mz->usage_in_excess = new_usage_in_excess;
	if (!mz->usage_in_excess)
		return;
	while (*p) {
		parent = *p;
411
		mz_node = rb_entry(parent, struct mem_cgroup_per_node,
412 413 414 415 416 417 418 419 420 421 422 423 424 425 426
					tree_node);
		if (mz->usage_in_excess < mz_node->usage_in_excess)
			p = &(*p)->rb_left;
		/*
		 * We can't avoid mem cgroups that are over their soft
		 * limit by the same amount
		 */
		else if (mz->usage_in_excess >= mz_node->usage_in_excess)
			p = &(*p)->rb_right;
	}
	rb_link_node(&mz->tree_node, parent, p);
	rb_insert_color(&mz->tree_node, &mctz->rb_root);
	mz->on_tree = true;
}

427 428
static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
					 struct mem_cgroup_tree_per_node *mctz)
429 430 431 432 433 434 435
{
	if (!mz->on_tree)
		return;
	rb_erase(&mz->tree_node, &mctz->rb_root);
	mz->on_tree = false;
}

436 437
static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
				       struct mem_cgroup_tree_per_node *mctz)
438
{
439 440 441
	unsigned long flags;

	spin_lock_irqsave(&mctz->lock, flags);
442
	__mem_cgroup_remove_exceeded(mz, mctz);
443
	spin_unlock_irqrestore(&mctz->lock, flags);
444 445
}

446 447 448
static unsigned long soft_limit_excess(struct mem_cgroup *memcg)
{
	unsigned long nr_pages = page_counter_read(&memcg->memory);
449
	unsigned long soft_limit = READ_ONCE(memcg->soft_limit);
450 451 452 453 454 455 456
	unsigned long excess = 0;

	if (nr_pages > soft_limit)
		excess = nr_pages - soft_limit;

	return excess;
}
457 458 459

static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
{
460
	unsigned long excess;
461 462
	struct mem_cgroup_per_node *mz;
	struct mem_cgroup_tree_per_node *mctz;
463

464
	mctz = soft_limit_tree_from_page(page);
465 466 467 468 469
	/*
	 * Necessary to update all ancestors when hierarchy is used.
	 * because their event counter is not touched.
	 */
	for (; memcg; memcg = parent_mem_cgroup(memcg)) {
470
		mz = mem_cgroup_page_nodeinfo(memcg, page);
471
		excess = soft_limit_excess(memcg);
472 473 474 475 476
		/*
		 * We have to update the tree if mz is on RB-tree or
		 * mem is over its softlimit.
		 */
		if (excess || mz->on_tree) {
477 478 479
			unsigned long flags;

			spin_lock_irqsave(&mctz->lock, flags);
480 481
			/* if on-tree, remove it */
			if (mz->on_tree)
482
				__mem_cgroup_remove_exceeded(mz, mctz);
483 484 485 486
			/*
			 * Insert again. mz->usage_in_excess will be updated.
			 * If excess is 0, no tree ops.
			 */
487
			__mem_cgroup_insert_exceeded(mz, mctz, excess);
488
			spin_unlock_irqrestore(&mctz->lock, flags);
489 490 491 492 493 494
		}
	}
}

static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
{
495 496 497
	struct mem_cgroup_tree_per_node *mctz;
	struct mem_cgroup_per_node *mz;
	int nid;
498

499
	for_each_node(nid) {
500 501 502
		mz = mem_cgroup_nodeinfo(memcg, nid);
		mctz = soft_limit_tree_node(nid);
		mem_cgroup_remove_exceeded(mz, mctz);
503 504 505
	}
}

506 507
static struct mem_cgroup_per_node *
__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
508 509
{
	struct rb_node *rightmost = NULL;
510
	struct mem_cgroup_per_node *mz;
511 512 513 514 515 516 517

retry:
	mz = NULL;
	rightmost = rb_last(&mctz->rb_root);
	if (!rightmost)
		goto done;		/* Nothing to reclaim from */

518
	mz = rb_entry(rightmost, struct mem_cgroup_per_node, tree_node);
519 520 521 522 523
	/*
	 * Remove the node now but someone else can add it back,
	 * we will to add it back at the end of reclaim to its correct
	 * position in the tree.
	 */
524
	__mem_cgroup_remove_exceeded(mz, mctz);
525
	if (!soft_limit_excess(mz->memcg) ||
526
	    !css_tryget_online(&mz->memcg->css))
527 528 529 530 531
		goto retry;
done:
	return mz;
}

532 533
static struct mem_cgroup_per_node *
mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
534
{
535
	struct mem_cgroup_per_node *mz;
536

537
	spin_lock_irq(&mctz->lock);
538
	mz = __mem_cgroup_largest_soft_limit_node(mctz);
539
	spin_unlock_irq(&mctz->lock);
540 541 542
	return mz;
}

543
/*
544 545
 * Return page count for single (non recursive) @memcg.
 *
546 547 548 549 550
 * Implementation Note: reading percpu statistics for memcg.
 *
 * Both of vmstat[] and percpu_counter has threshold and do periodic
 * synchronization to implement "quick" read. There are trade-off between
 * reading cost and precision of value. Then, we may have a chance to implement
551
 * a periodic synchronization of counter in memcg's counter.
552 553 554 555 556 557 558 559 560
 *
 * But this _read() function is used for user interface now. The user accounts
 * memory usage by memory cgroup and he _always_ requires exact value because
 * he accounts memory. Even if we provide quick-and-fuzzy read, we always
 * have to visit all online cpus and make sum. So, for now, unnecessary
 * synchronization is not implemented. (just implemented for cpu hotplug)
 *
 * If there are kernel internal actions which can make use of some not-exact
 * value, and reading all cpu value can be performance bottleneck in some
561
 * common workload, threshold and synchronization as vmstat[] should be
562 563
 * implemented.
 */
564 565
static unsigned long
mem_cgroup_read_stat(struct mem_cgroup *memcg, enum mem_cgroup_stat_index idx)
566
{
567
	long val = 0;
568 569
	int cpu;

570
	/* Per-cpu values can be negative, use a signed accumulator */
571
	for_each_possible_cpu(cpu)
572
		val += per_cpu(memcg->stat->count[idx], cpu);
573 574 575 576 577 578
	/*
	 * Summing races with updates, so val may be negative.  Avoid exposing
	 * transient negative values.
	 */
	if (val < 0)
		val = 0;
579 580 581
	return val;
}

582
static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg,
583 584 585 586 587
					    enum mem_cgroup_events_index idx)
{
	unsigned long val = 0;
	int cpu;

588
	for_each_possible_cpu(cpu)
589
		val += per_cpu(memcg->stat->events[idx], cpu);
590 591 592
	return val;
}

593
static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
594
					 struct page *page,
595
					 bool compound, int nr_pages)
596
{
597 598 599 600
	/*
	 * Here, RSS means 'mapped anon' and anon's SwapCache. Shmem/tmpfs is
	 * counted as CACHE even if it's on ANON LRU.
	 */
601
	if (PageAnon(page))
602
		__this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS],
603
				nr_pages);
604
	else
605
		__this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE],
606
				nr_pages);
607

608 609
	if (compound) {
		VM_BUG_ON_PAGE(!PageTransHuge(page), page);
610 611
		__this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE],
				nr_pages);
612
	}
613

614 615
	/* pagein of a big page is an event. So, ignore page size */
	if (nr_pages > 0)
616
		__this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGIN]);
617
	else {
618
		__this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]);
619 620
		nr_pages = -nr_pages; /* for event */
	}
621

622
	__this_cpu_add(memcg->stat->nr_page_events, nr_pages);
623 624
}

625 626
unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
					   int nid, unsigned int lru_mask)
627
{
628
	unsigned long nr = 0;
629 630
	struct mem_cgroup_per_node *mz;
	enum lru_list lru;
631

632
	VM_BUG_ON((unsigned)nid >= nr_node_ids);
633

634 635 636 637 638
	for_each_lru(lru) {
		if (!(BIT(lru) & lru_mask))
			continue;
		mz = mem_cgroup_nodeinfo(memcg, nid);
		nr += mz->lru_size[lru];
639 640
	}
	return nr;
641
}
642

643
static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
644
			unsigned int lru_mask)
645
{
646
	unsigned long nr = 0;
647
	int nid;
648

649
	for_each_node_state(nid, N_MEMORY)
650 651
		nr += mem_cgroup_node_nr_lru_pages(memcg, nid, lru_mask);
	return nr;
652 653
}

654 655
static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
				       enum mem_cgroup_events_target target)
656 657 658
{
	unsigned long val, next;

659
	val = __this_cpu_read(memcg->stat->nr_page_events);
660
	next = __this_cpu_read(memcg->stat->targets[target]);
661
	/* from time_after() in jiffies.h */
662 663 664 665 666
	if ((long)next - (long)val < 0) {
		switch (target) {
		case MEM_CGROUP_TARGET_THRESH:
			next = val + THRESHOLDS_EVENTS_TARGET;
			break;
667 668 669
		case MEM_CGROUP_TARGET_SOFTLIMIT:
			next = val + SOFTLIMIT_EVENTS_TARGET;
			break;
670 671 672 673 674 675 676 677
		case MEM_CGROUP_TARGET_NUMAINFO:
			next = val + NUMAINFO_EVENTS_TARGET;
			break;
		default:
			break;
		}
		__this_cpu_write(memcg->stat->targets[target], next);
		return true;
678
	}
679
	return false;
680 681 682 683 684 685
}

/*
 * Check events in order.
 *
 */
686
static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
687 688
{
	/* threshold event is triggered in finer grain than soft limit */
689 690
	if (unlikely(mem_cgroup_event_ratelimit(memcg,
						MEM_CGROUP_TARGET_THRESH))) {
691
		bool do_softlimit;
692
		bool do_numainfo __maybe_unused;
693

694 695
		do_softlimit = mem_cgroup_event_ratelimit(memcg,
						MEM_CGROUP_TARGET_SOFTLIMIT);
696 697 698 699
#if MAX_NUMNODES > 1
		do_numainfo = mem_cgroup_event_ratelimit(memcg,
						MEM_CGROUP_TARGET_NUMAINFO);
#endif
700
		mem_cgroup_threshold(memcg);
701 702
		if (unlikely(do_softlimit))
			mem_cgroup_update_tree(memcg, page);
703
#if MAX_NUMNODES > 1
704
		if (unlikely(do_numainfo))
705
			atomic_inc(&memcg->numainfo_events);
706
#endif
707
	}
708 709
}

710
struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
711
{
712 713 714 715 716 717 718 719
	/*
	 * mm_update_next_owner() may clear mm->owner to NULL
	 * if it races with swapoff, page migration, etc.
	 * So this can be called with p == NULL.
	 */
	if (unlikely(!p))
		return NULL;

720
	return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
721
}
M
Michal Hocko 已提交
722
EXPORT_SYMBOL(mem_cgroup_from_task);
723

724
static struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
725
{
726
	struct mem_cgroup *memcg = NULL;
727

728 729
	rcu_read_lock();
	do {
730 731 732 733 734 735
		/*
		 * Page cache insertions can happen withou an
		 * actual mm context, e.g. during disk probing
		 * on boot, loopback IO, acct() writes etc.
		 */
		if (unlikely(!mm))
736
			memcg = root_mem_cgroup;
737 738 739 740 741
		else {
			memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
			if (unlikely(!memcg))
				memcg = root_mem_cgroup;
		}
742
	} while (!css_tryget_online(&memcg->css));
743
	rcu_read_unlock();
744
	return memcg;
745 746
}

747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763
/**
 * mem_cgroup_iter - iterate over memory cgroup hierarchy
 * @root: hierarchy root
 * @prev: previously returned memcg, NULL on first invocation
 * @reclaim: cookie for shared reclaim walks, NULL for full walks
 *
 * Returns references to children of the hierarchy below @root, or
 * @root itself, or %NULL after a full round-trip.
 *
 * Caller must pass the return value in @prev on subsequent
 * invocations for reference counting, or use mem_cgroup_iter_break()
 * to cancel a hierarchy walk before the round-trip is complete.
 *
 * Reclaimers can specify a zone and a priority level in @reclaim to
 * divide up the memcgs in the hierarchy among all concurrent
 * reclaimers operating on the same zone and priority.
 */
764
struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
765
				   struct mem_cgroup *prev,
766
				   struct mem_cgroup_reclaim_cookie *reclaim)
K
KAMEZAWA Hiroyuki 已提交
767
{
M
Michal Hocko 已提交
768
	struct mem_cgroup_reclaim_iter *uninitialized_var(iter);
769
	struct cgroup_subsys_state *css = NULL;
770
	struct mem_cgroup *memcg = NULL;
771
	struct mem_cgroup *pos = NULL;
772

773 774
	if (mem_cgroup_disabled())
		return NULL;
775

776 777
	if (!root)
		root = root_mem_cgroup;
K
KAMEZAWA Hiroyuki 已提交
778

779
	if (prev && !reclaim)
780
		pos = prev;
K
KAMEZAWA Hiroyuki 已提交
781

782 783
	if (!root->use_hierarchy && root != root_mem_cgroup) {
		if (prev)
784
			goto out;
785
		return root;
786
	}
K
KAMEZAWA Hiroyuki 已提交
787

788
	rcu_read_lock();
M
Michal Hocko 已提交
789

790
	if (reclaim) {
791
		struct mem_cgroup_per_node *mz;
792

793
		mz = mem_cgroup_nodeinfo(root, reclaim->pgdat->node_id);
794 795 796 797 798
		iter = &mz->iter[reclaim->priority];

		if (prev && reclaim->generation != iter->generation)
			goto out_unlock;

799
		while (1) {
800
			pos = READ_ONCE(iter->position);
801 802
			if (!pos || css_tryget(&pos->css))
				break;
803
			/*
804 805 806 807 808 809
			 * css reference reached zero, so iter->position will
			 * be cleared by ->css_released. However, we should not
			 * rely on this happening soon, because ->css_released
			 * is called from a work queue, and by busy-waiting we
			 * might block it. So we clear iter->position right
			 * away.
810
			 */
811 812
			(void)cmpxchg(&iter->position, pos, NULL);
		}
813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829
	}

	if (pos)
		css = &pos->css;

	for (;;) {
		css = css_next_descendant_pre(css, &root->css);
		if (!css) {
			/*
			 * Reclaimers share the hierarchy walk, and a
			 * new one might jump in right at the end of
			 * the hierarchy - make sure they see at least
			 * one group and restart from the beginning.
			 */
			if (!prev)
				continue;
			break;
830
		}
K
KAMEZAWA Hiroyuki 已提交
831

832 833 834 835 836 837
		/*
		 * Verify the css and acquire a reference.  The root
		 * is provided by the caller, so we know it's alive
		 * and kicking, and don't take an extra reference.
		 */
		memcg = mem_cgroup_from_css(css);
K
KAMEZAWA Hiroyuki 已提交
838

839 840
		if (css == &root->css)
			break;
K
KAMEZAWA Hiroyuki 已提交
841

842 843
		if (css_tryget(css))
			break;
844

845
		memcg = NULL;
846
	}
847 848 849

	if (reclaim) {
		/*
850 851 852
		 * The position could have already been updated by a competing
		 * thread, so check that the value hasn't changed since we read
		 * it to avoid reclaiming from the same cgroup twice.
853
		 */
854 855
		(void)cmpxchg(&iter->position, pos, memcg);

856 857 858 859 860 861 862
		if (pos)
			css_put(&pos->css);

		if (!memcg)
			iter->generation++;
		else if (!prev)
			reclaim->generation = iter->generation;
863
	}
864

865 866
out_unlock:
	rcu_read_unlock();
867
out:
868 869 870
	if (prev && prev != root)
		css_put(&prev->css);

871
	return memcg;
K
KAMEZAWA Hiroyuki 已提交
872
}
K
KAMEZAWA Hiroyuki 已提交
873

874 875 876 877 878 879 880
/**
 * mem_cgroup_iter_break - abort a hierarchy walk prematurely
 * @root: hierarchy root
 * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
 */
void mem_cgroup_iter_break(struct mem_cgroup *root,
			   struct mem_cgroup *prev)
881 882 883 884 885 886
{
	if (!root)
		root = root_mem_cgroup;
	if (prev && prev != root)
		css_put(&prev->css);
}
K
KAMEZAWA Hiroyuki 已提交
887

888 889 890 891
static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
{
	struct mem_cgroup *memcg = dead_memcg;
	struct mem_cgroup_reclaim_iter *iter;
892 893
	struct mem_cgroup_per_node *mz;
	int nid;
894 895 896 897
	int i;

	while ((memcg = parent_mem_cgroup(memcg))) {
		for_each_node(nid) {
898 899 900 901 902
			mz = mem_cgroup_nodeinfo(memcg, nid);
			for (i = 0; i <= DEF_PRIORITY; i++) {
				iter = &mz->iter[i];
				cmpxchg(&iter->position,
					dead_memcg, NULL);
903 904 905 906 907
			}
		}
	}
}

908 909 910 911 912 913
/*
 * Iteration constructs for visiting all cgroups (under a tree).  If
 * loops are exited prematurely (break), mem_cgroup_iter_break() must
 * be used for reference counting.
 */
#define for_each_mem_cgroup_tree(iter, root)		\
914
	for (iter = mem_cgroup_iter(root, NULL, NULL);	\
915
	     iter != NULL;				\
916
	     iter = mem_cgroup_iter(root, iter, NULL))
917

918
#define for_each_mem_cgroup(iter)			\
919
	for (iter = mem_cgroup_iter(NULL, NULL, NULL);	\
920
	     iter != NULL;				\
921
	     iter = mem_cgroup_iter(NULL, iter, NULL))
K
KAMEZAWA Hiroyuki 已提交
922

923
/**
924
 * mem_cgroup_page_lruvec - return lruvec for isolating/putting an LRU page
925
 * @page: the page
926
 * @zone: zone of the page
927 928 929 930
 *
 * This function is only safe when following the LRU page isolation
 * and putback protocol: the LRU lock must be held, and the page must
 * either be PageLRU() or the caller must have isolated/allocated it.
931
 */
M
Mel Gorman 已提交
932
struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct pglist_data *pgdat)
K
KAMEZAWA Hiroyuki 已提交
933
{
934
	struct mem_cgroup_per_node *mz;
935
	struct mem_cgroup *memcg;
936
	struct lruvec *lruvec;
937

938
	if (mem_cgroup_disabled()) {
M
Mel Gorman 已提交
939
		lruvec = &pgdat->lruvec;
940 941
		goto out;
	}
942

943
	memcg = page->mem_cgroup;
944
	/*
945
	 * Swapcache readahead pages are added to the LRU - and
946
	 * possibly migrated - before they are charged.
947
	 */
948 949
	if (!memcg)
		memcg = root_mem_cgroup;
950

951
	mz = mem_cgroup_page_nodeinfo(memcg, page);
952 953 954 955 956 957 958
	lruvec = &mz->lruvec;
out:
	/*
	 * Since a node can be onlined after the mem_cgroup was created,
	 * we have to be prepared to initialize lruvec->zone here;
	 * and if offlined then reonlined, we need to reinitialize it.
	 */
M
Mel Gorman 已提交
959 960
	if (unlikely(lruvec->pgdat != pgdat))
		lruvec->pgdat = pgdat;
961
	return lruvec;
K
KAMEZAWA Hiroyuki 已提交
962
}
963

964
/**
965 966 967
 * mem_cgroup_update_lru_size - account for adding or removing an lru page
 * @lruvec: mem_cgroup per zone lru vector
 * @lru: index of lru list the page is sitting on
M
Mel Gorman 已提交
968
 * @zid: Zone ID of the zone pages have been added to
969
 * @nr_pages: positive when adding or negative when removing
970
 *
971 972 973
 * This function must be called under lru_lock, just before a page is added
 * to or just after a page is removed from an lru list (that ordering being
 * so as to allow it to check that lru_size 0 is consistent with list_empty).
974
 */
975
void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
M
Mel Gorman 已提交
976
				enum zone_type zid, int nr_pages)
977
{
978
	struct mem_cgroup_per_node *mz;
979
	unsigned long *lru_size;
980 981
	long size;
	bool empty;
982

M
Mel Gorman 已提交
983
	__update_lru_size(lruvec, lru, zid, nr_pages);
984

985 986 987
	if (mem_cgroup_disabled())
		return;

988
	mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
989
	lru_size = mz->lru_size + lru;
990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004
	empty = list_empty(lruvec->lists + lru);

	if (nr_pages < 0)
		*lru_size += nr_pages;

	size = *lru_size;
	if (WARN_ONCE(size < 0 || empty != !size,
		"%s(%p, %d, %d): lru_size %ld but %sempty\n",
		__func__, lruvec, lru, nr_pages, size, empty ? "" : "not ")) {
		VM_BUG_ON(1);
		*lru_size = 0;
	}

	if (nr_pages > 0)
		*lru_size += nr_pages;
K
KAMEZAWA Hiroyuki 已提交
1005
}
1006

1007
bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg)
1008
{
1009
	struct mem_cgroup *task_memcg;
1010
	struct task_struct *p;
1011
	bool ret;
1012

1013
	p = find_lock_task_mm(task);
1014
	if (p) {
1015
		task_memcg = get_mem_cgroup_from_mm(p->mm);
1016 1017 1018 1019 1020 1021 1022
		task_unlock(p);
	} else {
		/*
		 * All threads may have already detached their mm's, but the oom
		 * killer still needs to detect if they have already been oom
		 * killed to prevent needlessly killing additional tasks.
		 */
1023
		rcu_read_lock();
1024 1025
		task_memcg = mem_cgroup_from_task(task);
		css_get(&task_memcg->css);
1026
		rcu_read_unlock();
1027
	}
1028 1029
	ret = mem_cgroup_is_descendant(task_memcg, memcg);
	css_put(&task_memcg->css);
1030 1031 1032
	return ret;
}

1033
/**
1034
 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
W
Wanpeng Li 已提交
1035
 * @memcg: the memory cgroup
1036
 *
1037
 * Returns the maximum amount of memory @mem can be charged with, in
1038
 * pages.
1039
 */
1040
static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1041
{
1042 1043 1044
	unsigned long margin = 0;
	unsigned long count;
	unsigned long limit;
1045

1046
	count = page_counter_read(&memcg->memory);
1047
	limit = READ_ONCE(memcg->memory.limit);
1048 1049 1050
	if (count < limit)
		margin = limit - count;

1051
	if (do_memsw_account()) {
1052
		count = page_counter_read(&memcg->memsw);
1053
		limit = READ_ONCE(memcg->memsw.limit);
1054 1055
		if (count <= limit)
			margin = min(margin, limit - count);
1056 1057
		else
			margin = 0;
1058 1059 1060
	}

	return margin;
1061 1062
}

1063
/*
Q
Qiang Huang 已提交
1064
 * A routine for checking "mem" is under move_account() or not.
1065
 *
Q
Qiang Huang 已提交
1066 1067 1068
 * Checking a cgroup is mc.from or mc.to or under hierarchy of
 * moving cgroups. This is for waiting at high-memory pressure
 * caused by "move".
1069
 */
1070
static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
1071
{
1072 1073
	struct mem_cgroup *from;
	struct mem_cgroup *to;
1074
	bool ret = false;
1075 1076 1077 1078 1079 1080 1081 1082 1083
	/*
	 * Unlike task_move routines, we access mc.to, mc.from not under
	 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
	 */
	spin_lock(&mc.lock);
	from = mc.from;
	to = mc.to;
	if (!from)
		goto unlock;
1084

1085 1086
	ret = mem_cgroup_is_descendant(from, memcg) ||
		mem_cgroup_is_descendant(to, memcg);
1087 1088
unlock:
	spin_unlock(&mc.lock);
1089 1090 1091
	return ret;
}

1092
static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
1093 1094
{
	if (mc.moving_task && current != mc.moving_task) {
1095
		if (mem_cgroup_under_move(memcg)) {
1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107
			DEFINE_WAIT(wait);
			prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
			/* moving charge context might have finished. */
			if (mc.moving_task)
				schedule();
			finish_wait(&mc.waitq, &wait);
			return true;
		}
	}
	return false;
}

1108
#define K(x) ((x) << (PAGE_SHIFT-10))
1109
/**
1110
 * mem_cgroup_print_oom_info: Print OOM information relevant to memory controller.
1111 1112 1113 1114 1115 1116 1117 1118
 * @memcg: The memory cgroup that went over limit
 * @p: Task that is going to be killed
 *
 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
 * enabled
 */
void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
{
1119 1120
	struct mem_cgroup *iter;
	unsigned int i;
1121 1122 1123

	rcu_read_lock();

1124 1125 1126 1127 1128 1129 1130 1131
	if (p) {
		pr_info("Task in ");
		pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
		pr_cont(" killed as a result of limit of ");
	} else {
		pr_info("Memory limit reached of cgroup ");
	}

T
Tejun Heo 已提交
1132
	pr_cont_cgroup_path(memcg->css.cgroup);
1133
	pr_cont("\n");
1134 1135 1136

	rcu_read_unlock();

1137 1138 1139 1140 1141 1142 1143 1144 1145
	pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
		K((u64)page_counter_read(&memcg->memory)),
		K((u64)memcg->memory.limit), memcg->memory.failcnt);
	pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
		K((u64)page_counter_read(&memcg->memsw)),
		K((u64)memcg->memsw.limit), memcg->memsw.failcnt);
	pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
		K((u64)page_counter_read(&memcg->kmem)),
		K((u64)memcg->kmem.limit), memcg->kmem.failcnt);
1146 1147

	for_each_mem_cgroup_tree(iter, memcg) {
T
Tejun Heo 已提交
1148 1149
		pr_info("Memory cgroup stats for ");
		pr_cont_cgroup_path(iter->css.cgroup);
1150 1151 1152
		pr_cont(":");

		for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
1153
			if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
1154
				continue;
1155
			pr_cont(" %s:%luKB", mem_cgroup_stat_names[i],
1156 1157 1158 1159 1160 1161 1162 1163 1164
				K(mem_cgroup_read_stat(iter, i)));
		}

		for (i = 0; i < NR_LRU_LISTS; i++)
			pr_cont(" %s:%luKB", mem_cgroup_lru_names[i],
				K(mem_cgroup_nr_lru_pages(iter, BIT(i))));

		pr_cont("\n");
	}
1165 1166
}

1167 1168 1169 1170
/*
 * This function returns the number of memcg under hierarchy tree. Returns
 * 1(self count) if no children.
 */
1171
static int mem_cgroup_count_children(struct mem_cgroup *memcg)
1172 1173
{
	int num = 0;
K
KAMEZAWA Hiroyuki 已提交
1174 1175
	struct mem_cgroup *iter;

1176
	for_each_mem_cgroup_tree(iter, memcg)
K
KAMEZAWA Hiroyuki 已提交
1177
		num++;
1178 1179 1180
	return num;
}

D
David Rientjes 已提交
1181 1182 1183
/*
 * Return the memory (and swap, if configured) limit for a memcg.
 */
1184
static unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg)
D
David Rientjes 已提交
1185
{
1186
	unsigned long limit;
1187

1188
	limit = memcg->memory.limit;
1189
	if (mem_cgroup_swappiness(memcg)) {
1190
		unsigned long memsw_limit;
1191
		unsigned long swap_limit;
1192

1193
		memsw_limit = memcg->memsw.limit;
1194 1195 1196
		swap_limit = memcg->swap.limit;
		swap_limit = min(swap_limit, (unsigned long)total_swap_pages);
		limit = min(limit + swap_limit, memsw_limit);
1197 1198
	}
	return limit;
D
David Rientjes 已提交
1199 1200
}

1201
static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1202
				     int order)
1203
{
1204 1205 1206
	struct oom_control oc = {
		.zonelist = NULL,
		.nodemask = NULL,
1207
		.memcg = memcg,
1208 1209 1210
		.gfp_mask = gfp_mask,
		.order = order,
	};
1211 1212 1213 1214 1215 1216
	struct mem_cgroup *iter;
	unsigned long chosen_points = 0;
	unsigned long totalpages;
	unsigned int points = 0;
	struct task_struct *chosen = NULL;

1217 1218
	mutex_lock(&oom_lock);

1219
	/*
1220 1221 1222
	 * If current has a pending SIGKILL or is exiting, then automatically
	 * select it.  The goal is to allow it to allocate so that it may
	 * quickly exit and free its memory.
1223
	 */
1224
	if (task_will_free_mem(current)) {
1225
		mark_oom_victim(current);
1226
		wake_oom_reaper(current);
1227
		goto unlock;
1228 1229
	}

1230
	check_panic_on_oom(&oc, CONSTRAINT_MEMCG);
1231
	totalpages = mem_cgroup_get_limit(memcg) ? : 1;
1232
	for_each_mem_cgroup_tree(iter, memcg) {
1233
		struct css_task_iter it;
1234 1235
		struct task_struct *task;

1236 1237
		css_task_iter_start(&iter->css, &it);
		while ((task = css_task_iter_next(&it))) {
1238
			switch (oom_scan_process_thread(&oc, task)) {
1239 1240 1241 1242 1243 1244 1245 1246 1247 1248
			case OOM_SCAN_SELECT:
				if (chosen)
					put_task_struct(chosen);
				chosen = task;
				chosen_points = ULONG_MAX;
				get_task_struct(chosen);
				/* fall through */
			case OOM_SCAN_CONTINUE:
				continue;
			case OOM_SCAN_ABORT:
1249
				css_task_iter_end(&it);
1250 1251 1252
				mem_cgroup_iter_break(memcg, iter);
				if (chosen)
					put_task_struct(chosen);
1253 1254
				/* Set a dummy value to return "true". */
				chosen = (void *) 1;
1255
				goto unlock;
1256 1257 1258 1259
			case OOM_SCAN_OK:
				break;
			};
			points = oom_badness(task, memcg, NULL, totalpages);
1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271
			if (!points || points < chosen_points)
				continue;
			/* Prefer thread group leaders for display purposes */
			if (points == chosen_points &&
			    thread_group_leader(chosen))
				continue;

			if (chosen)
				put_task_struct(chosen);
			chosen = task;
			chosen_points = points;
			get_task_struct(chosen);
1272
		}
1273
		css_task_iter_end(&it);
1274 1275
	}

1276 1277
	if (chosen) {
		points = chosen_points * 1000 / totalpages;
1278
		oom_kill_process(&oc, chosen, points, totalpages,
1279
				 "Memory cgroup out of memory");
1280 1281 1282
	}
unlock:
	mutex_unlock(&oom_lock);
1283
	return chosen;
1284 1285
}

1286 1287
#if MAX_NUMNODES > 1

1288 1289
/**
 * test_mem_cgroup_node_reclaimable
W
Wanpeng Li 已提交
1290
 * @memcg: the target memcg
1291 1292 1293 1294 1295 1296 1297
 * @nid: the node ID to be checked.
 * @noswap : specify true here if the user wants flle only information.
 *
 * This function returns whether the specified memcg contains any
 * reclaimable pages on a node. Returns true if there are any reclaimable
 * pages in the node.
 */
1298
static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg,
1299 1300
		int nid, bool noswap)
{
1301
	if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_FILE))
1302 1303 1304
		return true;
	if (noswap || !total_swap_pages)
		return false;
1305
	if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_ANON))
1306 1307 1308 1309
		return true;
	return false;

}
1310 1311 1312 1313 1314 1315 1316

/*
 * Always updating the nodemask is not very good - even if we have an empty
 * list or the wrong list here, we can start from some node and traverse all
 * nodes based on the zonelist. So update the list loosely once per 10 secs.
 *
 */
1317
static void mem_cgroup_may_update_nodemask(struct mem_cgroup *memcg)
1318 1319
{
	int nid;
1320 1321 1322 1323
	/*
	 * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET
	 * pagein/pageout changes since the last update.
	 */
1324
	if (!atomic_read(&memcg->numainfo_events))
1325
		return;
1326
	if (atomic_inc_return(&memcg->numainfo_updating) > 1)
1327 1328 1329
		return;

	/* make a nodemask where this memcg uses memory from */
1330
	memcg->scan_nodes = node_states[N_MEMORY];
1331

1332
	for_each_node_mask(nid, node_states[N_MEMORY]) {
1333

1334 1335
		if (!test_mem_cgroup_node_reclaimable(memcg, nid, false))
			node_clear(nid, memcg->scan_nodes);
1336
	}
1337

1338 1339
	atomic_set(&memcg->numainfo_events, 0);
	atomic_set(&memcg->numainfo_updating, 0);
1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353
}

/*
 * Selecting a node where we start reclaim from. Because what we need is just
 * reducing usage counter, start from anywhere is O,K. Considering
 * memory reclaim from current node, there are pros. and cons.
 *
 * Freeing memory from current node means freeing memory from a node which
 * we'll use or we've used. So, it may make LRU bad. And if several threads
 * hit limits, it will see a contention on a node. But freeing from remote
 * node means more costs for memory reclaim because of memory latency.
 *
 * Now, we use round-robin. Better algorithm is welcomed.
 */
1354
int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
1355 1356 1357
{
	int node;

1358 1359
	mem_cgroup_may_update_nodemask(memcg);
	node = memcg->last_scanned_node;
1360

1361
	node = next_node_in(node, memcg->scan_nodes);
1362
	/*
1363 1364 1365
	 * mem_cgroup_may_update_nodemask might have seen no reclaimmable pages
	 * last time it really checked all the LRUs due to rate limiting.
	 * Fallback to the current node in that case for simplicity.
1366 1367 1368 1369
	 */
	if (unlikely(node == MAX_NUMNODES))
		node = numa_node_id();

1370
	memcg->last_scanned_node = node;
1371 1372 1373
	return node;
}
#else
1374
int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
1375 1376 1377 1378 1379
{
	return 0;
}
#endif

1380
static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
1381
				   pg_data_t *pgdat,
1382 1383 1384 1385 1386 1387 1388 1389 1390
				   gfp_t gfp_mask,
				   unsigned long *total_scanned)
{
	struct mem_cgroup *victim = NULL;
	int total = 0;
	int loop = 0;
	unsigned long excess;
	unsigned long nr_scanned;
	struct mem_cgroup_reclaim_cookie reclaim = {
1391
		.pgdat = pgdat,
1392 1393 1394
		.priority = 0,
	};

1395
	excess = soft_limit_excess(root_memcg);
1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420

	while (1) {
		victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
		if (!victim) {
			loop++;
			if (loop >= 2) {
				/*
				 * If we have not been able to reclaim
				 * anything, it might because there are
				 * no reclaimable pages under this hierarchy
				 */
				if (!total)
					break;
				/*
				 * We want to do more targeted reclaim.
				 * excess >> 2 is not to excessive so as to
				 * reclaim too much, nor too less that we keep
				 * coming back to reclaim from this cgroup
				 */
				if (total >= (excess >> 2) ||
					(loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
					break;
			}
			continue;
		}
1421
		total += mem_cgroup_shrink_node(victim, gfp_mask, false,
1422
					pgdat, &nr_scanned);
1423
		*total_scanned += nr_scanned;
1424
		if (!soft_limit_excess(root_memcg))
1425
			break;
1426
	}
1427 1428
	mem_cgroup_iter_break(root_memcg, victim);
	return total;
1429 1430
}

1431 1432 1433 1434 1435 1436
#ifdef CONFIG_LOCKDEP
static struct lockdep_map memcg_oom_lock_dep_map = {
	.name = "memcg_oom_lock",
};
#endif

1437 1438
static DEFINE_SPINLOCK(memcg_oom_lock);

K
KAMEZAWA Hiroyuki 已提交
1439 1440 1441 1442
/*
 * Check OOM-Killer is already running under our hierarchy.
 * If someone is running, return false.
 */
1443
static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
K
KAMEZAWA Hiroyuki 已提交
1444
{
1445
	struct mem_cgroup *iter, *failed = NULL;
1446

1447 1448
	spin_lock(&memcg_oom_lock);

1449
	for_each_mem_cgroup_tree(iter, memcg) {
1450
		if (iter->oom_lock) {
1451 1452 1453 1454 1455
			/*
			 * this subtree of our hierarchy is already locked
			 * so we cannot give a lock.
			 */
			failed = iter;
1456 1457
			mem_cgroup_iter_break(memcg, iter);
			break;
1458 1459
		} else
			iter->oom_lock = true;
K
KAMEZAWA Hiroyuki 已提交
1460
	}
K
KAMEZAWA Hiroyuki 已提交
1461

1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472
	if (failed) {
		/*
		 * OK, we failed to lock the whole subtree so we have
		 * to clean up what we set up to the failing subtree
		 */
		for_each_mem_cgroup_tree(iter, memcg) {
			if (iter == failed) {
				mem_cgroup_iter_break(memcg, iter);
				break;
			}
			iter->oom_lock = false;
1473
		}
1474 1475
	} else
		mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
1476 1477 1478 1479

	spin_unlock(&memcg_oom_lock);

	return !failed;
1480
}
1481

1482
static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
1483
{
K
KAMEZAWA Hiroyuki 已提交
1484 1485
	struct mem_cgroup *iter;

1486
	spin_lock(&memcg_oom_lock);
1487
	mutex_release(&memcg_oom_lock_dep_map, 1, _RET_IP_);
1488
	for_each_mem_cgroup_tree(iter, memcg)
1489
		iter->oom_lock = false;
1490
	spin_unlock(&memcg_oom_lock);
1491 1492
}

1493
static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
1494 1495 1496
{
	struct mem_cgroup *iter;

1497
	spin_lock(&memcg_oom_lock);
1498
	for_each_mem_cgroup_tree(iter, memcg)
1499 1500
		iter->under_oom++;
	spin_unlock(&memcg_oom_lock);
1501 1502
}

1503
static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
1504 1505 1506
{
	struct mem_cgroup *iter;

K
KAMEZAWA Hiroyuki 已提交
1507 1508
	/*
	 * When a new child is created while the hierarchy is under oom,
1509
	 * mem_cgroup_oom_lock() may not be called. Watch for underflow.
K
KAMEZAWA Hiroyuki 已提交
1510
	 */
1511
	spin_lock(&memcg_oom_lock);
1512
	for_each_mem_cgroup_tree(iter, memcg)
1513 1514 1515
		if (iter->under_oom > 0)
			iter->under_oom--;
	spin_unlock(&memcg_oom_lock);
1516 1517
}

K
KAMEZAWA Hiroyuki 已提交
1518 1519
static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);

K
KAMEZAWA Hiroyuki 已提交
1520
struct oom_wait_info {
1521
	struct mem_cgroup *memcg;
K
KAMEZAWA Hiroyuki 已提交
1522 1523 1524 1525 1526 1527
	wait_queue_t	wait;
};

static int memcg_oom_wake_function(wait_queue_t *wait,
	unsigned mode, int sync, void *arg)
{
1528 1529
	struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
	struct mem_cgroup *oom_wait_memcg;
K
KAMEZAWA Hiroyuki 已提交
1530 1531 1532
	struct oom_wait_info *oom_wait_info;

	oom_wait_info = container_of(wait, struct oom_wait_info, wait);
1533
	oom_wait_memcg = oom_wait_info->memcg;
K
KAMEZAWA Hiroyuki 已提交
1534

1535 1536
	if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) &&
	    !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg))
K
KAMEZAWA Hiroyuki 已提交
1537 1538 1539 1540
		return 0;
	return autoremove_wake_function(wait, mode, sync, arg);
}

1541
static void memcg_oom_recover(struct mem_cgroup *memcg)
1542
{
1543 1544 1545 1546 1547 1548 1549 1550 1551
	/*
	 * For the following lockless ->under_oom test, the only required
	 * guarantee is that it must see the state asserted by an OOM when
	 * this function is called as a result of userland actions
	 * triggered by the notification of the OOM.  This is trivially
	 * achieved by invoking mem_cgroup_mark_under_oom() before
	 * triggering notification.
	 */
	if (memcg && memcg->under_oom)
1552
		__wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
1553 1554
}

1555
static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
1556
{
1557
	if (!current->memcg_may_oom)
1558
		return;
K
KAMEZAWA Hiroyuki 已提交
1559
	/*
1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571
	 * We are in the middle of the charge context here, so we
	 * don't want to block when potentially sitting on a callstack
	 * that holds all kinds of filesystem and mm locks.
	 *
	 * Also, the caller may handle a failed allocation gracefully
	 * (like optional page cache readahead) and so an OOM killer
	 * invocation might not even be necessary.
	 *
	 * That's why we don't do anything here except remember the
	 * OOM context and then deal with it at the end of the page
	 * fault when the stack is unwound, the locks are released,
	 * and when we know whether the fault was overall successful.
K
KAMEZAWA Hiroyuki 已提交
1572
	 */
1573
	css_get(&memcg->css);
T
Tejun Heo 已提交
1574 1575 1576
	current->memcg_in_oom = memcg;
	current->memcg_oom_gfp_mask = mask;
	current->memcg_oom_order = order;
1577 1578 1579 1580
}

/**
 * mem_cgroup_oom_synchronize - complete memcg OOM handling
1581
 * @handle: actually kill/wait or just clean up the OOM state
1582
 *
1583 1584
 * This has to be called at the end of a page fault if the memcg OOM
 * handler was enabled.
1585
 *
1586
 * Memcg supports userspace OOM handling where failed allocations must
1587 1588 1589 1590
 * sleep on a waitqueue until the userspace task resolves the
 * situation.  Sleeping directly in the charge context with all kinds
 * of locks held is not a good idea, instead we remember an OOM state
 * in the task and mem_cgroup_oom_synchronize() has to be called at
1591
 * the end of the page fault to complete the OOM handling.
1592 1593
 *
 * Returns %true if an ongoing memcg OOM situation was detected and
1594
 * completed, %false otherwise.
1595
 */
1596
bool mem_cgroup_oom_synchronize(bool handle)
1597
{
T
Tejun Heo 已提交
1598
	struct mem_cgroup *memcg = current->memcg_in_oom;
1599
	struct oom_wait_info owait;
1600
	bool locked;
1601 1602 1603

	/* OOM is global, do not handle */
	if (!memcg)
1604
		return false;
1605

1606
	if (!handle || oom_killer_disabled)
1607
		goto cleanup;
1608 1609 1610 1611 1612 1613

	owait.memcg = memcg;
	owait.wait.flags = 0;
	owait.wait.func = memcg_oom_wake_function;
	owait.wait.private = current;
	INIT_LIST_HEAD(&owait.wait.task_list);
K
KAMEZAWA Hiroyuki 已提交
1614

1615
	prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
1616 1617 1618 1619 1620 1621 1622 1623 1624 1625
	mem_cgroup_mark_under_oom(memcg);

	locked = mem_cgroup_oom_trylock(memcg);

	if (locked)
		mem_cgroup_oom_notify(memcg);

	if (locked && !memcg->oom_kill_disable) {
		mem_cgroup_unmark_under_oom(memcg);
		finish_wait(&memcg_oom_waitq, &owait.wait);
T
Tejun Heo 已提交
1626 1627
		mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask,
					 current->memcg_oom_order);
1628
	} else {
1629
		schedule();
1630 1631 1632 1633 1634
		mem_cgroup_unmark_under_oom(memcg);
		finish_wait(&memcg_oom_waitq, &owait.wait);
	}

	if (locked) {
1635 1636 1637 1638 1639 1640 1641 1642
		mem_cgroup_oom_unlock(memcg);
		/*
		 * There is no guarantee that an OOM-lock contender
		 * sees the wakeups triggered by the OOM kill
		 * uncharges.  Wake any sleepers explicitely.
		 */
		memcg_oom_recover(memcg);
	}
1643
cleanup:
T
Tejun Heo 已提交
1644
	current->memcg_in_oom = NULL;
1645
	css_put(&memcg->css);
K
KAMEZAWA Hiroyuki 已提交
1646
	return true;
1647 1648
}

1649
/**
1650 1651
 * lock_page_memcg - lock a page->mem_cgroup binding
 * @page: the page
1652
 *
1653 1654
 * This function protects unlocked LRU pages from being moved to
 * another cgroup and stabilizes their page->mem_cgroup binding.
1655
 */
J
Johannes Weiner 已提交
1656
void lock_page_memcg(struct page *page)
1657 1658
{
	struct mem_cgroup *memcg;
1659
	unsigned long flags;
1660

1661 1662 1663 1664 1665
	/*
	 * The RCU lock is held throughout the transaction.  The fast
	 * path can get away without acquiring the memcg->move_lock
	 * because page moving starts with an RCU grace period.
	 */
1666 1667 1668
	rcu_read_lock();

	if (mem_cgroup_disabled())
J
Johannes Weiner 已提交
1669
		return;
1670
again:
1671
	memcg = page->mem_cgroup;
1672
	if (unlikely(!memcg))
J
Johannes Weiner 已提交
1673
		return;
1674

Q
Qiang Huang 已提交
1675
	if (atomic_read(&memcg->moving_account) <= 0)
J
Johannes Weiner 已提交
1676
		return;
1677

1678
	spin_lock_irqsave(&memcg->move_lock, flags);
1679
	if (memcg != page->mem_cgroup) {
1680
		spin_unlock_irqrestore(&memcg->move_lock, flags);
1681 1682
		goto again;
	}
1683 1684 1685 1686

	/*
	 * When charge migration first begins, we can have locked and
	 * unlocked page stat updates happening concurrently.  Track
1687
	 * the task who has the lock for unlock_page_memcg().
1688 1689 1690
	 */
	memcg->move_lock_task = current;
	memcg->move_lock_flags = flags;
1691

J
Johannes Weiner 已提交
1692
	return;
1693
}
1694
EXPORT_SYMBOL(lock_page_memcg);
1695

1696
/**
1697
 * unlock_page_memcg - unlock a page->mem_cgroup binding
J
Johannes Weiner 已提交
1698
 * @page: the page
1699
 */
J
Johannes Weiner 已提交
1700
void unlock_page_memcg(struct page *page)
1701
{
J
Johannes Weiner 已提交
1702 1703
	struct mem_cgroup *memcg = page->mem_cgroup;

1704 1705 1706 1707 1708 1709 1710 1711
	if (memcg && memcg->move_lock_task == current) {
		unsigned long flags = memcg->move_lock_flags;

		memcg->move_lock_task = NULL;
		memcg->move_lock_flags = 0;

		spin_unlock_irqrestore(&memcg->move_lock, flags);
	}
1712

1713
	rcu_read_unlock();
1714
}
1715
EXPORT_SYMBOL(unlock_page_memcg);
1716

1717 1718 1719 1720
/*
 * size of first charge trial. "32" comes from vmscan.c's magic value.
 * TODO: maybe necessary to use big numbers in big irons.
 */
1721
#define CHARGE_BATCH	32U
1722 1723
struct memcg_stock_pcp {
	struct mem_cgroup *cached; /* this never be root cgroup */
1724
	unsigned int nr_pages;
1725
	struct work_struct work;
1726
	unsigned long flags;
1727
#define FLUSHING_CACHED_CHARGE	0
1728 1729
};
static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
1730
static DEFINE_MUTEX(percpu_charge_mutex);
1731

1732 1733 1734 1735 1736 1737 1738 1739 1740 1741
/**
 * consume_stock: Try to consume stocked charge on this cpu.
 * @memcg: memcg to consume from.
 * @nr_pages: how many pages to charge.
 *
 * The charges will only happen if @memcg matches the current cpu's memcg
 * stock, and at least @nr_pages are available in that stock.  Failure to
 * service an allocation will refill the stock.
 *
 * returns true if successful, false otherwise.
1742
 */
1743
static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
1744 1745
{
	struct memcg_stock_pcp *stock;
1746
	bool ret = false;
1747

1748
	if (nr_pages > CHARGE_BATCH)
1749
		return ret;
1750

1751
	stock = &get_cpu_var(memcg_stock);
1752
	if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
1753
		stock->nr_pages -= nr_pages;
1754 1755
		ret = true;
	}
1756 1757 1758 1759 1760
	put_cpu_var(memcg_stock);
	return ret;
}

/*
1761
 * Returns stocks cached in percpu and reset cached information.
1762 1763 1764 1765 1766
 */
static void drain_stock(struct memcg_stock_pcp *stock)
{
	struct mem_cgroup *old = stock->cached;

1767
	if (stock->nr_pages) {
1768
		page_counter_uncharge(&old->memory, stock->nr_pages);
1769
		if (do_memsw_account())
1770
			page_counter_uncharge(&old->memsw, stock->nr_pages);
1771
		css_put_many(&old->css, stock->nr_pages);
1772
		stock->nr_pages = 0;
1773 1774 1775 1776 1777 1778 1779 1780 1781 1782
	}
	stock->cached = NULL;
}

/*
 * This must be called under preempt disabled or must be called by
 * a thread which is pinned to local cpu.
 */
static void drain_local_stock(struct work_struct *dummy)
{
1783
	struct memcg_stock_pcp *stock = this_cpu_ptr(&memcg_stock);
1784
	drain_stock(stock);
1785
	clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
1786 1787 1788
}

/*
1789
 * Cache charges(val) to local per_cpu area.
1790
 * This will be consumed by consume_stock() function, later.
1791
 */
1792
static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
1793 1794 1795
{
	struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock);

1796
	if (stock->cached != memcg) { /* reset if necessary */
1797
		drain_stock(stock);
1798
		stock->cached = memcg;
1799
	}
1800
	stock->nr_pages += nr_pages;
1801 1802 1803 1804
	put_cpu_var(memcg_stock);
}

/*
1805
 * Drains all per-CPU charge caches for given root_memcg resp. subtree
1806
 * of the hierarchy under it.
1807
 */
1808
static void drain_all_stock(struct mem_cgroup *root_memcg)
1809
{
1810
	int cpu, curcpu;
1811

1812 1813 1814
	/* If someone's already draining, avoid adding running more workers. */
	if (!mutex_trylock(&percpu_charge_mutex))
		return;
1815 1816
	/* Notify other cpus that system-wide "drain" is running */
	get_online_cpus();
1817
	curcpu = get_cpu();
1818 1819
	for_each_online_cpu(cpu) {
		struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
1820
		struct mem_cgroup *memcg;
1821

1822 1823
		memcg = stock->cached;
		if (!memcg || !stock->nr_pages)
1824
			continue;
1825
		if (!mem_cgroup_is_descendant(memcg, root_memcg))
1826
			continue;
1827 1828 1829 1830 1831 1832
		if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
			if (cpu == curcpu)
				drain_local_stock(&stock->work);
			else
				schedule_work_on(cpu, &stock->work);
		}
1833
	}
1834
	put_cpu();
A
Andrew Morton 已提交
1835
	put_online_cpus();
1836
	mutex_unlock(&percpu_charge_mutex);
1837 1838
}

1839
static int memcg_cpu_hotplug_callback(struct notifier_block *nb,
1840 1841 1842 1843 1844 1845
					unsigned long action,
					void *hcpu)
{
	int cpu = (unsigned long)hcpu;
	struct memcg_stock_pcp *stock;

1846
	if (action == CPU_ONLINE)
1847 1848
		return NOTIFY_OK;

1849
	if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
1850
		return NOTIFY_OK;
1851

1852 1853 1854 1855 1856
	stock = &per_cpu(memcg_stock, cpu);
	drain_stock(stock);
	return NOTIFY_OK;
}

1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876
static void reclaim_high(struct mem_cgroup *memcg,
			 unsigned int nr_pages,
			 gfp_t gfp_mask)
{
	do {
		if (page_counter_read(&memcg->memory) <= memcg->high)
			continue;
		mem_cgroup_events(memcg, MEMCG_HIGH, 1);
		try_to_free_mem_cgroup_pages(memcg, nr_pages, gfp_mask, true);
	} while ((memcg = parent_mem_cgroup(memcg)));
}

static void high_work_func(struct work_struct *work)
{
	struct mem_cgroup *memcg;

	memcg = container_of(work, struct mem_cgroup, high_work);
	reclaim_high(memcg, CHARGE_BATCH, GFP_KERNEL);
}

1877 1878 1879 1880 1881 1882 1883
/*
 * Scheduled by try_charge() to be executed from the userland return path
 * and reclaims memory over the high limit.
 */
void mem_cgroup_handle_over_high(void)
{
	unsigned int nr_pages = current->memcg_nr_pages_over_high;
1884
	struct mem_cgroup *memcg;
1885 1886 1887 1888

	if (likely(!nr_pages))
		return;

1889 1890
	memcg = get_mem_cgroup_from_mm(current->mm);
	reclaim_high(memcg, nr_pages, GFP_KERNEL);
1891 1892 1893 1894
	css_put(&memcg->css);
	current->memcg_nr_pages_over_high = 0;
}

1895 1896
static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
		      unsigned int nr_pages)
1897
{
1898
	unsigned int batch = max(CHARGE_BATCH, nr_pages);
1899
	int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
1900
	struct mem_cgroup *mem_over_limit;
1901
	struct page_counter *counter;
1902
	unsigned long nr_reclaimed;
1903 1904
	bool may_swap = true;
	bool drained = false;
1905

1906
	if (mem_cgroup_is_root(memcg))
1907
		return 0;
1908
retry:
1909
	if (consume_stock(memcg, nr_pages))
1910
		return 0;
1911

1912
	if (!do_memsw_account() ||
1913 1914
	    page_counter_try_charge(&memcg->memsw, batch, &counter)) {
		if (page_counter_try_charge(&memcg->memory, batch, &counter))
1915
			goto done_restock;
1916
		if (do_memsw_account())
1917 1918
			page_counter_uncharge(&memcg->memsw, batch);
		mem_over_limit = mem_cgroup_from_counter(counter, memory);
1919
	} else {
1920
		mem_over_limit = mem_cgroup_from_counter(counter, memsw);
1921
		may_swap = false;
1922
	}
1923

1924 1925 1926 1927
	if (batch > nr_pages) {
		batch = nr_pages;
		goto retry;
	}
1928

1929 1930 1931 1932 1933 1934 1935 1936 1937
	/*
	 * Unlike in global OOM situations, memcg is not in a physical
	 * memory shortage.  Allow dying and OOM-killed tasks to
	 * bypass the last charges so that they can exit quickly and
	 * free their memory.
	 */
	if (unlikely(test_thread_flag(TIF_MEMDIE) ||
		     fatal_signal_pending(current) ||
		     current->flags & PF_EXITING))
1938
		goto force;
1939 1940 1941 1942

	if (unlikely(task_in_memcg_oom(current)))
		goto nomem;

1943
	if (!gfpflags_allow_blocking(gfp_mask))
1944
		goto nomem;
1945

1946 1947
	mem_cgroup_events(mem_over_limit, MEMCG_MAX, 1);

1948 1949
	nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
						    gfp_mask, may_swap);
1950

1951
	if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
1952
		goto retry;
1953

1954
	if (!drained) {
1955
		drain_all_stock(mem_over_limit);
1956 1957 1958 1959
		drained = true;
		goto retry;
	}

1960 1961
	if (gfp_mask & __GFP_NORETRY)
		goto nomem;
1962 1963 1964 1965 1966 1967 1968 1969 1970
	/*
	 * Even though the limit is exceeded at this point, reclaim
	 * may have been able to free some pages.  Retry the charge
	 * before killing the task.
	 *
	 * Only for regular pages, though: huge pages are rather
	 * unlikely to succeed so close to the limit, and we fall back
	 * to regular pages anyway in case of failure.
	 */
1971
	if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
1972 1973 1974 1975 1976 1977 1978 1979
		goto retry;
	/*
	 * At task move, charge accounts can be doubly counted. So, it's
	 * better to wait until the end of task_move if something is going on.
	 */
	if (mem_cgroup_wait_acct_move(mem_over_limit))
		goto retry;

1980 1981 1982
	if (nr_retries--)
		goto retry;

1983
	if (gfp_mask & __GFP_NOFAIL)
1984
		goto force;
1985

1986
	if (fatal_signal_pending(current))
1987
		goto force;
1988

1989 1990
	mem_cgroup_events(mem_over_limit, MEMCG_OOM, 1);

1991 1992
	mem_cgroup_oom(mem_over_limit, gfp_mask,
		       get_order(nr_pages * PAGE_SIZE));
1993
nomem:
1994
	if (!(gfp_mask & __GFP_NOFAIL))
1995
		return -ENOMEM;
1996 1997 1998 1999 2000 2001 2002
force:
	/*
	 * The allocation either can't fail or will lead to more memory
	 * being freed very soon.  Allow memory usage go over the limit
	 * temporarily by force charging it.
	 */
	page_counter_charge(&memcg->memory, nr_pages);
2003
	if (do_memsw_account())
2004 2005 2006 2007
		page_counter_charge(&memcg->memsw, nr_pages);
	css_get_many(&memcg->css, nr_pages);

	return 0;
2008 2009

done_restock:
2010
	css_get_many(&memcg->css, batch);
2011 2012
	if (batch > nr_pages)
		refill_stock(memcg, batch - nr_pages);
2013

2014
	/*
2015 2016
	 * If the hierarchy is above the normal consumption range, schedule
	 * reclaim on returning to userland.  We can perform reclaim here
2017
	 * if __GFP_RECLAIM but let's always punt for simplicity and so that
2018 2019 2020 2021
	 * GFP_KERNEL can consistently be used during reclaim.  @memcg is
	 * not recorded as it most likely matches current's and won't
	 * change in the meantime.  As high limit is checked again before
	 * reclaim, the cost of mismatch is negligible.
2022 2023
	 */
	do {
2024
		if (page_counter_read(&memcg->memory) > memcg->high) {
2025 2026 2027 2028 2029
			/* Don't bother a random interrupted task */
			if (in_interrupt()) {
				schedule_work(&memcg->high_work);
				break;
			}
V
Vladimir Davydov 已提交
2030
			current->memcg_nr_pages_over_high += batch;
2031 2032 2033
			set_notify_resume(current);
			break;
		}
2034
	} while ((memcg = parent_mem_cgroup(memcg)));
2035 2036

	return 0;
2037
}
2038

2039
static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
2040
{
2041 2042 2043
	if (mem_cgroup_is_root(memcg))
		return;

2044
	page_counter_uncharge(&memcg->memory, nr_pages);
2045
	if (do_memsw_account())
2046
		page_counter_uncharge(&memcg->memsw, nr_pages);
2047

2048
	css_put_many(&memcg->css, nr_pages);
2049 2050
}

2051 2052 2053 2054
static void lock_page_lru(struct page *page, int *isolated)
{
	struct zone *zone = page_zone(page);

2055
	spin_lock_irq(zone_lru_lock(zone));
2056 2057 2058
	if (PageLRU(page)) {
		struct lruvec *lruvec;

M
Mel Gorman 已提交
2059
		lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat);
2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073
		ClearPageLRU(page);
		del_page_from_lru_list(page, lruvec, page_lru(page));
		*isolated = 1;
	} else
		*isolated = 0;
}

static void unlock_page_lru(struct page *page, int isolated)
{
	struct zone *zone = page_zone(page);

	if (isolated) {
		struct lruvec *lruvec;

M
Mel Gorman 已提交
2074
		lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat);
2075 2076 2077 2078
		VM_BUG_ON_PAGE(PageLRU(page), page);
		SetPageLRU(page);
		add_page_to_lru_list(page, lruvec, page_lru(page));
	}
2079
	spin_unlock_irq(zone_lru_lock(zone));
2080 2081
}

2082
static void commit_charge(struct page *page, struct mem_cgroup *memcg,
2083
			  bool lrucare)
2084
{
2085
	int isolated;
2086

2087
	VM_BUG_ON_PAGE(page->mem_cgroup, page);
2088 2089 2090 2091 2092

	/*
	 * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page
	 * may already be on some other mem_cgroup's LRU.  Take care of it.
	 */
2093 2094
	if (lrucare)
		lock_page_lru(page, &isolated);
2095

2096 2097
	/*
	 * Nobody should be changing or seriously looking at
2098
	 * page->mem_cgroup at this point:
2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109
	 *
	 * - the page is uncharged
	 *
	 * - the page is off-LRU
	 *
	 * - an anonymous fault has exclusive page access, except for
	 *   a locked page table
	 *
	 * - a page cache insertion, a swapin fault, or a migration
	 *   have the page locked
	 */
2110
	page->mem_cgroup = memcg;
2111

2112 2113
	if (lrucare)
		unlock_page_lru(page, isolated);
2114
}
2115

2116
#ifndef CONFIG_SLOB
2117
static int memcg_alloc_cache_id(void)
2118
{
2119 2120 2121
	int id, size;
	int err;

2122
	id = ida_simple_get(&memcg_cache_ida,
2123 2124 2125
			    0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL);
	if (id < 0)
		return id;
2126

2127
	if (id < memcg_nr_cache_ids)
2128 2129 2130 2131 2132 2133
		return id;

	/*
	 * There's no space for the new id in memcg_caches arrays,
	 * so we have to grow them.
	 */
2134
	down_write(&memcg_cache_ids_sem);
2135 2136

	size = 2 * (id + 1);
2137 2138 2139 2140 2141
	if (size < MEMCG_CACHES_MIN_SIZE)
		size = MEMCG_CACHES_MIN_SIZE;
	else if (size > MEMCG_CACHES_MAX_SIZE)
		size = MEMCG_CACHES_MAX_SIZE;

2142
	err = memcg_update_all_caches(size);
2143 2144
	if (!err)
		err = memcg_update_all_list_lrus(size);
2145 2146 2147 2148 2149
	if (!err)
		memcg_nr_cache_ids = size;

	up_write(&memcg_cache_ids_sem);

2150
	if (err) {
2151
		ida_simple_remove(&memcg_cache_ida, id);
2152 2153 2154 2155 2156 2157 2158
		return err;
	}
	return id;
}

static void memcg_free_cache_id(int id)
{
2159
	ida_simple_remove(&memcg_cache_ida, id);
2160 2161
}

2162
struct memcg_kmem_cache_create_work {
2163 2164 2165 2166 2167
	struct mem_cgroup *memcg;
	struct kmem_cache *cachep;
	struct work_struct work;
};

2168
static void memcg_kmem_cache_create_func(struct work_struct *w)
2169
{
2170 2171
	struct memcg_kmem_cache_create_work *cw =
		container_of(w, struct memcg_kmem_cache_create_work, work);
2172 2173
	struct mem_cgroup *memcg = cw->memcg;
	struct kmem_cache *cachep = cw->cachep;
2174

2175
	memcg_create_kmem_cache(memcg, cachep);
2176

2177
	css_put(&memcg->css);
2178 2179 2180 2181 2182 2183
	kfree(cw);
}

/*
 * Enqueue the creation of a per-memcg kmem_cache.
 */
2184 2185
static void __memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
					       struct kmem_cache *cachep)
2186
{
2187
	struct memcg_kmem_cache_create_work *cw;
2188

2189
	cw = kmalloc(sizeof(*cw), GFP_NOWAIT);
2190
	if (!cw)
2191
		return;
2192 2193

	css_get(&memcg->css);
2194 2195 2196

	cw->memcg = memcg;
	cw->cachep = cachep;
2197
	INIT_WORK(&cw->work, memcg_kmem_cache_create_func);
2198 2199 2200 2201

	schedule_work(&cw->work);
}

2202 2203
static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
					     struct kmem_cache *cachep)
2204 2205 2206 2207
{
	/*
	 * We need to stop accounting when we kmalloc, because if the
	 * corresponding kmalloc cache is not yet created, the first allocation
2208
	 * in __memcg_schedule_kmem_cache_create will recurse.
2209 2210 2211 2212 2213 2214 2215
	 *
	 * However, it is better to enclose the whole function. Depending on
	 * the debugging options enabled, INIT_WORK(), for instance, can
	 * trigger an allocation. This too, will make us recurse. Because at
	 * this point we can't allow ourselves back into memcg_kmem_get_cache,
	 * the safest choice is to do it like this, wrapping the whole function.
	 */
2216
	current->memcg_kmem_skip_account = 1;
2217
	__memcg_schedule_kmem_cache_create(memcg, cachep);
2218
	current->memcg_kmem_skip_account = 0;
2219
}
2220

2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231
static inline bool memcg_kmem_bypass(void)
{
	if (in_interrupt() || !current->mm || (current->flags & PF_KTHREAD))
		return true;
	return false;
}

/**
 * memcg_kmem_get_cache: select the correct per-memcg cache for allocation
 * @cachep: the original global kmem cache
 *
2232 2233 2234
 * Return the kmem_cache we're supposed to use for a slab allocation.
 * We try to use the current memcg's version of the cache.
 *
2235 2236 2237
 * If the cache does not exist yet, if we are the first user of it, we
 * create it asynchronously in a workqueue and let the current allocation
 * go through with the original cache.
2238
 *
2239 2240 2241 2242
 * This function takes a reference to the cache it returns to assure it
 * won't get destroyed while we are working with it. Once the caller is
 * done with it, memcg_kmem_put_cache() must be called to release the
 * reference.
2243
 */
2244
struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep)
2245 2246
{
	struct mem_cgroup *memcg;
2247
	struct kmem_cache *memcg_cachep;
2248
	int kmemcg_id;
2249

2250
	VM_BUG_ON(!is_root_cache(cachep));
2251

2252
	if (memcg_kmem_bypass())
V
Vladimir Davydov 已提交
2253 2254
		return cachep;

2255
	if (current->memcg_kmem_skip_account)
2256 2257
		return cachep;

2258
	memcg = get_mem_cgroup_from_mm(current->mm);
2259
	kmemcg_id = READ_ONCE(memcg->kmemcg_id);
2260
	if (kmemcg_id < 0)
2261
		goto out;
2262

2263
	memcg_cachep = cache_from_memcg_idx(cachep, kmemcg_id);
2264 2265
	if (likely(memcg_cachep))
		return memcg_cachep;
2266 2267 2268 2269 2270 2271 2272 2273 2274

	/*
	 * If we are in a safe context (can wait, and not in interrupt
	 * context), we could be be predictable and return right away.
	 * This would guarantee that the allocation being performed
	 * already belongs in the new cache.
	 *
	 * However, there are some clashes that can arrive from locking.
	 * For instance, because we acquire the slab_mutex while doing
2275 2276 2277
	 * memcg_create_kmem_cache, this means no further allocation
	 * could happen with the slab_mutex held. So it's better to
	 * defer everything.
2278
	 */
2279
	memcg_schedule_kmem_cache_create(memcg, cachep);
2280
out:
2281
	css_put(&memcg->css);
2282
	return cachep;
2283 2284
}

2285 2286 2287 2288 2289
/**
 * memcg_kmem_put_cache: drop reference taken by memcg_kmem_get_cache
 * @cachep: the cache returned by memcg_kmem_get_cache
 */
void memcg_kmem_put_cache(struct kmem_cache *cachep)
2290 2291
{
	if (!is_root_cache(cachep))
2292
		css_put(&cachep->memcg_params.memcg->css);
2293 2294
}

2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305
/**
 * memcg_kmem_charge: charge a kmem page
 * @page: page to charge
 * @gfp: reclaim mode
 * @order: allocation order
 * @memcg: memory cgroup to charge
 *
 * Returns 0 on success, an error code on failure.
 */
int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
			    struct mem_cgroup *memcg)
2306
{
2307 2308
	unsigned int nr_pages = 1 << order;
	struct page_counter *counter;
2309 2310
	int ret;

2311
	ret = try_charge(memcg, gfp, nr_pages);
2312
	if (ret)
2313
		return ret;
2314 2315 2316 2317 2318

	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
	    !page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) {
		cancel_charge(memcg, nr_pages);
		return -ENOMEM;
2319 2320
	}

2321
	page->mem_cgroup = memcg;
2322

2323
	return 0;
2324 2325
}

2326 2327 2328 2329 2330 2331 2332 2333 2334
/**
 * memcg_kmem_charge: charge a kmem page to the current memory cgroup
 * @page: page to charge
 * @gfp: reclaim mode
 * @order: allocation order
 *
 * Returns 0 on success, an error code on failure.
 */
int memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
2335
{
2336
	struct mem_cgroup *memcg;
2337
	int ret = 0;
2338

2339 2340 2341
	if (memcg_kmem_bypass())
		return 0;

2342
	memcg = get_mem_cgroup_from_mm(current->mm);
2343
	if (!mem_cgroup_is_root(memcg))
2344
		ret = memcg_kmem_charge_memcg(page, gfp, order, memcg);
2345
	css_put(&memcg->css);
2346
	return ret;
2347
}
2348 2349 2350 2351 2352 2353
/**
 * memcg_kmem_uncharge: uncharge a kmem page
 * @page: page to uncharge
 * @order: allocation order
 */
void memcg_kmem_uncharge(struct page *page, int order)
2354
{
2355
	struct mem_cgroup *memcg = page->mem_cgroup;
2356
	unsigned int nr_pages = 1 << order;
2357 2358 2359 2360

	if (!memcg)
		return;

2361
	VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page);
2362

2363 2364 2365
	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
		page_counter_uncharge(&memcg->kmem, nr_pages);

2366
	page_counter_uncharge(&memcg->memory, nr_pages);
2367
	if (do_memsw_account())
2368
		page_counter_uncharge(&memcg->memsw, nr_pages);
2369

2370
	page->mem_cgroup = NULL;
2371
	css_put_many(&memcg->css, nr_pages);
2372
}
2373
#endif /* !CONFIG_SLOB */
2374

2375 2376 2377 2378
#ifdef CONFIG_TRANSPARENT_HUGEPAGE

/*
 * Because tail pages are not marked as "used", set it. We're under
2379
 * zone_lru_lock and migration entries setup in all page mappings.
2380
 */
2381
void mem_cgroup_split_huge_fixup(struct page *head)
2382
{
2383
	int i;
2384

2385 2386
	if (mem_cgroup_disabled())
		return;
2387

2388
	for (i = 1; i < HPAGE_PMD_NR; i++)
2389
		head[i].mem_cgroup = head->mem_cgroup;
2390

2391
	__this_cpu_sub(head->mem_cgroup->stat->count[MEM_CGROUP_STAT_RSS_HUGE],
2392
		       HPAGE_PMD_NR);
2393
}
2394
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
2395

A
Andrew Morton 已提交
2396
#ifdef CONFIG_MEMCG_SWAP
2397 2398
static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg,
					 bool charge)
K
KAMEZAWA Hiroyuki 已提交
2399
{
2400 2401
	int val = (charge) ? 1 : -1;
	this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_SWAP], val);
K
KAMEZAWA Hiroyuki 已提交
2402
}
2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414

/**
 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
 * @entry: swap entry to be moved
 * @from:  mem_cgroup which the entry is moved from
 * @to:  mem_cgroup which the entry is moved to
 *
 * It succeeds only when the swap_cgroup's record for this entry is the same
 * as the mem_cgroup's id of @from.
 *
 * Returns 0 on success, -EINVAL on failure.
 *
2415
 * The caller must have charged to @to, IOW, called page_counter_charge() about
2416 2417 2418
 * both res and memsw, and called css_get().
 */
static int mem_cgroup_move_swap_account(swp_entry_t entry,
2419
				struct mem_cgroup *from, struct mem_cgroup *to)
2420 2421 2422
{
	unsigned short old_id, new_id;

L
Li Zefan 已提交
2423 2424
	old_id = mem_cgroup_id(from);
	new_id = mem_cgroup_id(to);
2425 2426 2427

	if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
		mem_cgroup_swap_statistics(from, false);
2428
		mem_cgroup_swap_statistics(to, true);
2429 2430 2431 2432 2433 2434
		return 0;
	}
	return -EINVAL;
}
#else
static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
2435
				struct mem_cgroup *from, struct mem_cgroup *to)
2436 2437 2438
{
	return -EINVAL;
}
2439
#endif
K
KAMEZAWA Hiroyuki 已提交
2440

2441
static DEFINE_MUTEX(memcg_limit_mutex);
2442

2443
static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
2444
				   unsigned long limit)
2445
{
2446 2447 2448
	unsigned long curusage;
	unsigned long oldusage;
	bool enlarge = false;
2449
	int retry_count;
2450
	int ret;
2451 2452 2453 2454 2455 2456

	/*
	 * For keeping hierarchical_reclaim simple, how long we should retry
	 * is depends on callers. We set our retry-count to be function
	 * of # of children which we should visit in this loop.
	 */
2457 2458
	retry_count = MEM_CGROUP_RECLAIM_RETRIES *
		      mem_cgroup_count_children(memcg);
2459

2460
	oldusage = page_counter_read(&memcg->memory);
2461

2462
	do {
2463 2464 2465 2466
		if (signal_pending(current)) {
			ret = -EINTR;
			break;
		}
2467 2468 2469 2470

		mutex_lock(&memcg_limit_mutex);
		if (limit > memcg->memsw.limit) {
			mutex_unlock(&memcg_limit_mutex);
2471
			ret = -EINVAL;
2472 2473
			break;
		}
2474 2475 2476 2477
		if (limit > memcg->memory.limit)
			enlarge = true;
		ret = page_counter_limit(&memcg->memory, limit);
		mutex_unlock(&memcg_limit_mutex);
2478 2479 2480 2481

		if (!ret)
			break;

2482 2483
		try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, true);

2484
		curusage = page_counter_read(&memcg->memory);
2485
		/* Usage is reduced ? */
A
Andrew Morton 已提交
2486
		if (curusage >= oldusage)
2487 2488 2489
			retry_count--;
		else
			oldusage = curusage;
2490 2491
	} while (retry_count);

2492 2493
	if (!ret && enlarge)
		memcg_oom_recover(memcg);
2494

2495 2496 2497
	return ret;
}

L
Li Zefan 已提交
2498
static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
2499
					 unsigned long limit)
2500
{
2501 2502 2503
	unsigned long curusage;
	unsigned long oldusage;
	bool enlarge = false;
2504
	int retry_count;
2505
	int ret;
2506

2507
	/* see mem_cgroup_resize_res_limit */
2508 2509 2510 2511 2512 2513
	retry_count = MEM_CGROUP_RECLAIM_RETRIES *
		      mem_cgroup_count_children(memcg);

	oldusage = page_counter_read(&memcg->memsw);

	do {
2514 2515 2516 2517
		if (signal_pending(current)) {
			ret = -EINTR;
			break;
		}
2518 2519 2520 2521

		mutex_lock(&memcg_limit_mutex);
		if (limit < memcg->memory.limit) {
			mutex_unlock(&memcg_limit_mutex);
2522 2523 2524
			ret = -EINVAL;
			break;
		}
2525 2526 2527 2528
		if (limit > memcg->memsw.limit)
			enlarge = true;
		ret = page_counter_limit(&memcg->memsw, limit);
		mutex_unlock(&memcg_limit_mutex);
2529 2530 2531 2532

		if (!ret)
			break;

2533 2534
		try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, false);

2535
		curusage = page_counter_read(&memcg->memsw);
2536
		/* Usage is reduced ? */
2537
		if (curusage >= oldusage)
2538
			retry_count--;
2539 2540
		else
			oldusage = curusage;
2541 2542
	} while (retry_count);

2543 2544
	if (!ret && enlarge)
		memcg_oom_recover(memcg);
2545

2546 2547 2548
	return ret;
}

2549
unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
2550 2551 2552 2553
					    gfp_t gfp_mask,
					    unsigned long *total_scanned)
{
	unsigned long nr_reclaimed = 0;
2554
	struct mem_cgroup_per_node *mz, *next_mz = NULL;
2555 2556
	unsigned long reclaimed;
	int loop = 0;
2557
	struct mem_cgroup_tree_per_node *mctz;
2558
	unsigned long excess;
2559 2560 2561 2562 2563
	unsigned long nr_scanned;

	if (order > 0)
		return 0;

2564
	mctz = soft_limit_tree_node(pgdat->node_id);
2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578
	/*
	 * This loop can run a while, specially if mem_cgroup's continuously
	 * keep exceeding their soft limit and putting the system under
	 * pressure
	 */
	do {
		if (next_mz)
			mz = next_mz;
		else
			mz = mem_cgroup_largest_soft_limit_node(mctz);
		if (!mz)
			break;

		nr_scanned = 0;
2579
		reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat,
2580 2581 2582
						    gfp_mask, &nr_scanned);
		nr_reclaimed += reclaimed;
		*total_scanned += nr_scanned;
2583
		spin_lock_irq(&mctz->lock);
2584
		__mem_cgroup_remove_exceeded(mz, mctz);
2585 2586 2587 2588 2589 2590

		/*
		 * If we failed to reclaim anything from this memory cgroup
		 * it is time to move on to the next cgroup
		 */
		next_mz = NULL;
2591 2592 2593
		if (!reclaimed)
			next_mz = __mem_cgroup_largest_soft_limit_node(mctz);

2594
		excess = soft_limit_excess(mz->memcg);
2595 2596 2597 2598 2599 2600 2601 2602 2603
		/*
		 * One school of thought says that we should not add
		 * back the node to the tree if reclaim returns 0.
		 * But our reclaim could return 0, simply because due
		 * to priority we are exposing a smaller subset of
		 * memory to reclaim from. Consider this as a longer
		 * term TODO.
		 */
		/* If excess == 0, no tree ops */
2604
		__mem_cgroup_insert_exceeded(mz, mctz, excess);
2605
		spin_unlock_irq(&mctz->lock);
2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622
		css_put(&mz->memcg->css);
		loop++;
		/*
		 * Could not reclaim anything and there are no more
		 * mem cgroups to try or we seem to be looping without
		 * reclaiming anything.
		 */
		if (!nr_reclaimed &&
			(next_mz == NULL ||
			loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
			break;
	} while (!nr_reclaimed);
	if (next_mz)
		css_put(&next_mz->memcg->css);
	return nr_reclaimed;
}

2623 2624 2625 2626 2627 2628
/*
 * Test whether @memcg has children, dead or alive.  Note that this
 * function doesn't care whether @memcg has use_hierarchy enabled and
 * returns %true if there are child csses according to the cgroup
 * hierarchy.  Testing use_hierarchy is the caller's responsiblity.
 */
2629 2630
static inline bool memcg_has_children(struct mem_cgroup *memcg)
{
2631 2632 2633 2634 2635 2636
	bool ret;

	rcu_read_lock();
	ret = css_next_child(NULL, &memcg->css);
	rcu_read_unlock();
	return ret;
2637 2638
}

2639
/*
2640
 * Reclaims as many pages from the given memcg as possible.
2641 2642 2643 2644 2645 2646 2647
 *
 * Caller is responsible for holding css reference for memcg.
 */
static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
{
	int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;

2648 2649
	/* we call try-to-free pages for make this cgroup empty */
	lru_add_drain_all();
2650
	/* try to free all pages in this cgroup */
2651
	while (nr_retries && page_counter_read(&memcg->memory)) {
2652
		int progress;
2653

2654 2655 2656
		if (signal_pending(current))
			return -EINTR;

2657 2658
		progress = try_to_free_mem_cgroup_pages(memcg, 1,
							GFP_KERNEL, true);
2659
		if (!progress) {
2660
			nr_retries--;
2661
			/* maybe some writeback is necessary */
2662
			congestion_wait(BLK_RW_ASYNC, HZ/10);
2663
		}
2664 2665

	}
2666 2667

	return 0;
2668 2669
}

2670 2671 2672
static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of,
					    char *buf, size_t nbytes,
					    loff_t off)
2673
{
2674
	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
2675

2676 2677
	if (mem_cgroup_is_root(memcg))
		return -EINVAL;
2678
	return mem_cgroup_force_empty(memcg) ?: nbytes;
2679 2680
}

2681 2682
static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
				     struct cftype *cft)
2683
{
2684
	return mem_cgroup_from_css(css)->use_hierarchy;
2685 2686
}

2687 2688
static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
				      struct cftype *cft, u64 val)
2689 2690
{
	int retval = 0;
2691
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
T
Tejun Heo 已提交
2692
	struct mem_cgroup *parent_memcg = mem_cgroup_from_css(memcg->css.parent);
2693

2694
	if (memcg->use_hierarchy == val)
2695
		return 0;
2696

2697
	/*
2698
	 * If parent's use_hierarchy is set, we can't make any modifications
2699 2700 2701 2702 2703 2704
	 * in the child subtrees. If it is unset, then the change can
	 * occur, provided the current cgroup has no children.
	 *
	 * For the root cgroup, parent_mem is NULL, we allow value to be
	 * set if there are no children.
	 */
2705
	if ((!parent_memcg || !parent_memcg->use_hierarchy) &&
2706
				(val == 1 || val == 0)) {
2707
		if (!memcg_has_children(memcg))
2708
			memcg->use_hierarchy = val;
2709 2710 2711 2712
		else
			retval = -EBUSY;
	} else
		retval = -EINVAL;
2713

2714 2715 2716
	return retval;
}

2717
static void tree_stat(struct mem_cgroup *memcg, unsigned long *stat)
2718 2719
{
	struct mem_cgroup *iter;
2720
	int i;
2721

2722
	memset(stat, 0, sizeof(*stat) * MEMCG_NR_STAT);
2723

2724 2725 2726 2727
	for_each_mem_cgroup_tree(iter, memcg) {
		for (i = 0; i < MEMCG_NR_STAT; i++)
			stat[i] += mem_cgroup_read_stat(iter, i);
	}
2728 2729
}

2730
static void tree_events(struct mem_cgroup *memcg, unsigned long *events)
2731 2732
{
	struct mem_cgroup *iter;
2733
	int i;
2734

2735
	memset(events, 0, sizeof(*events) * MEMCG_NR_EVENTS);
2736

2737 2738 2739 2740
	for_each_mem_cgroup_tree(iter, memcg) {
		for (i = 0; i < MEMCG_NR_EVENTS; i++)
			events[i] += mem_cgroup_read_events(iter, i);
	}
2741 2742
}

2743
static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
2744
{
2745
	unsigned long val = 0;
2746

2747
	if (mem_cgroup_is_root(memcg)) {
2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758
		struct mem_cgroup *iter;

		for_each_mem_cgroup_tree(iter, memcg) {
			val += mem_cgroup_read_stat(iter,
					MEM_CGROUP_STAT_CACHE);
			val += mem_cgroup_read_stat(iter,
					MEM_CGROUP_STAT_RSS);
			if (swap)
				val += mem_cgroup_read_stat(iter,
						MEM_CGROUP_STAT_SWAP);
		}
2759
	} else {
2760
		if (!swap)
2761
			val = page_counter_read(&memcg->memory);
2762
		else
2763
			val = page_counter_read(&memcg->memsw);
2764
	}
2765
	return val;
2766 2767
}

2768 2769 2770 2771 2772 2773 2774
enum {
	RES_USAGE,
	RES_LIMIT,
	RES_MAX_USAGE,
	RES_FAILCNT,
	RES_SOFT_LIMIT,
};
2775

2776
static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
2777
			       struct cftype *cft)
B
Balbir Singh 已提交
2778
{
2779
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
2780
	struct page_counter *counter;
2781

2782
	switch (MEMFILE_TYPE(cft->private)) {
2783
	case _MEM:
2784 2785
		counter = &memcg->memory;
		break;
2786
	case _MEMSWAP:
2787 2788
		counter = &memcg->memsw;
		break;
2789
	case _KMEM:
2790
		counter = &memcg->kmem;
2791
		break;
V
Vladimir Davydov 已提交
2792
	case _TCP:
2793
		counter = &memcg->tcpmem;
V
Vladimir Davydov 已提交
2794
		break;
2795 2796 2797
	default:
		BUG();
	}
2798 2799 2800 2801

	switch (MEMFILE_ATTR(cft->private)) {
	case RES_USAGE:
		if (counter == &memcg->memory)
2802
			return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE;
2803
		if (counter == &memcg->memsw)
2804
			return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE;
2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816
		return (u64)page_counter_read(counter) * PAGE_SIZE;
	case RES_LIMIT:
		return (u64)counter->limit * PAGE_SIZE;
	case RES_MAX_USAGE:
		return (u64)counter->watermark * PAGE_SIZE;
	case RES_FAILCNT:
		return counter->failcnt;
	case RES_SOFT_LIMIT:
		return (u64)memcg->soft_limit * PAGE_SIZE;
	default:
		BUG();
	}
B
Balbir Singh 已提交
2817
}
2818

2819
#ifndef CONFIG_SLOB
2820
static int memcg_online_kmem(struct mem_cgroup *memcg)
2821 2822 2823
{
	int memcg_id;

2824 2825 2826
	if (cgroup_memory_nokmem)
		return 0;

2827
	BUG_ON(memcg->kmemcg_id >= 0);
2828
	BUG_ON(memcg->kmem_state);
2829

2830
	memcg_id = memcg_alloc_cache_id();
2831 2832
	if (memcg_id < 0)
		return memcg_id;
2833

2834
	static_branch_inc(&memcg_kmem_enabled_key);
2835
	/*
2836
	 * A memory cgroup is considered kmem-online as soon as it gets
V
Vladimir Davydov 已提交
2837
	 * kmemcg_id. Setting the id after enabling static branching will
2838 2839 2840
	 * guarantee no one starts accounting before all call sites are
	 * patched.
	 */
V
Vladimir Davydov 已提交
2841
	memcg->kmemcg_id = memcg_id;
2842
	memcg->kmem_state = KMEM_ONLINE;
2843 2844

	return 0;
2845 2846
}

2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879
static void memcg_offline_kmem(struct mem_cgroup *memcg)
{
	struct cgroup_subsys_state *css;
	struct mem_cgroup *parent, *child;
	int kmemcg_id;

	if (memcg->kmem_state != KMEM_ONLINE)
		return;
	/*
	 * Clear the online state before clearing memcg_caches array
	 * entries. The slab_mutex in memcg_deactivate_kmem_caches()
	 * guarantees that no cache will be created for this cgroup
	 * after we are done (see memcg_create_kmem_cache()).
	 */
	memcg->kmem_state = KMEM_ALLOCATED;

	memcg_deactivate_kmem_caches(memcg);

	kmemcg_id = memcg->kmemcg_id;
	BUG_ON(kmemcg_id < 0);

	parent = parent_mem_cgroup(memcg);
	if (!parent)
		parent = root_mem_cgroup;

	/*
	 * Change kmemcg_id of this cgroup and all its descendants to the
	 * parent's id, and then move all entries from this cgroup's list_lrus
	 * to ones of the parent. After we have finished, all list_lrus
	 * corresponding to this cgroup are guaranteed to remain empty. The
	 * ordering is imposed by list_lru_node->lock taken by
	 * memcg_drain_all_list_lrus().
	 */
2880
	rcu_read_lock(); /* can be called from css_free w/o cgroup_mutex */
2881 2882 2883 2884 2885 2886 2887
	css_for_each_descendant_pre(css, &memcg->css) {
		child = mem_cgroup_from_css(css);
		BUG_ON(child->kmemcg_id != kmemcg_id);
		child->kmemcg_id = parent->kmemcg_id;
		if (!memcg->use_hierarchy)
			break;
	}
2888 2889
	rcu_read_unlock();

2890 2891 2892 2893 2894 2895 2896
	memcg_drain_all_list_lrus(kmemcg_id, parent->kmemcg_id);

	memcg_free_cache_id(kmemcg_id);
}

static void memcg_free_kmem(struct mem_cgroup *memcg)
{
2897 2898 2899 2900
	/* css_alloc() failed, offlining didn't happen */
	if (unlikely(memcg->kmem_state == KMEM_ONLINE))
		memcg_offline_kmem(memcg);

2901 2902 2903 2904 2905 2906
	if (memcg->kmem_state == KMEM_ALLOCATED) {
		memcg_destroy_kmem_caches(memcg);
		static_branch_dec(&memcg_kmem_enabled_key);
		WARN_ON(page_counter_read(&memcg->kmem));
	}
}
2907
#else
2908
static int memcg_online_kmem(struct mem_cgroup *memcg)
2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919
{
	return 0;
}
static void memcg_offline_kmem(struct mem_cgroup *memcg)
{
}
static void memcg_free_kmem(struct mem_cgroup *memcg)
{
}
#endif /* !CONFIG_SLOB */

2920
static int memcg_update_kmem_limit(struct mem_cgroup *memcg,
2921
				   unsigned long limit)
2922
{
2923
	int ret;
2924 2925 2926 2927 2928

	mutex_lock(&memcg_limit_mutex);
	ret = page_counter_limit(&memcg->kmem, limit);
	mutex_unlock(&memcg_limit_mutex);
	return ret;
2929
}
2930

V
Vladimir Davydov 已提交
2931 2932 2933 2934 2935 2936
static int memcg_update_tcp_limit(struct mem_cgroup *memcg, unsigned long limit)
{
	int ret;

	mutex_lock(&memcg_limit_mutex);

2937
	ret = page_counter_limit(&memcg->tcpmem, limit);
V
Vladimir Davydov 已提交
2938 2939 2940
	if (ret)
		goto out;

2941
	if (!memcg->tcpmem_active) {
V
Vladimir Davydov 已提交
2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958
		/*
		 * The active flag needs to be written after the static_key
		 * update. This is what guarantees that the socket activation
		 * function is the last one to run. See sock_update_memcg() for
		 * details, and note that we don't mark any socket as belonging
		 * to this memcg until that flag is up.
		 *
		 * We need to do this, because static_keys will span multiple
		 * sites, but we can't control their order. If we mark a socket
		 * as accounted, but the accounting functions are not patched in
		 * yet, we'll lose accounting.
		 *
		 * We never race with the readers in sock_update_memcg(),
		 * because when this value change, the code to process it is not
		 * patched in yet.
		 */
		static_branch_inc(&memcg_sockets_enabled_key);
2959
		memcg->tcpmem_active = true;
V
Vladimir Davydov 已提交
2960 2961 2962 2963 2964 2965
	}
out:
	mutex_unlock(&memcg_limit_mutex);
	return ret;
}

2966 2967 2968 2969
/*
 * The user of this function is...
 * RES_LIMIT.
 */
2970 2971
static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
				char *buf, size_t nbytes, loff_t off)
B
Balbir Singh 已提交
2972
{
2973
	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
2974
	unsigned long nr_pages;
2975 2976
	int ret;

2977
	buf = strstrip(buf);
2978
	ret = page_counter_memparse(buf, "-1", &nr_pages);
2979 2980
	if (ret)
		return ret;
2981

2982
	switch (MEMFILE_ATTR(of_cft(of)->private)) {
2983
	case RES_LIMIT:
2984 2985 2986 2987
		if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
			ret = -EINVAL;
			break;
		}
2988 2989 2990
		switch (MEMFILE_TYPE(of_cft(of)->private)) {
		case _MEM:
			ret = mem_cgroup_resize_limit(memcg, nr_pages);
2991
			break;
2992 2993
		case _MEMSWAP:
			ret = mem_cgroup_resize_memsw_limit(memcg, nr_pages);
2994
			break;
2995 2996 2997
		case _KMEM:
			ret = memcg_update_kmem_limit(memcg, nr_pages);
			break;
V
Vladimir Davydov 已提交
2998 2999 3000
		case _TCP:
			ret = memcg_update_tcp_limit(memcg, nr_pages);
			break;
3001
		}
3002
		break;
3003 3004 3005
	case RES_SOFT_LIMIT:
		memcg->soft_limit = nr_pages;
		ret = 0;
3006 3007
		break;
	}
3008
	return ret ?: nbytes;
B
Balbir Singh 已提交
3009 3010
}

3011 3012
static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf,
				size_t nbytes, loff_t off)
3013
{
3014
	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3015
	struct page_counter *counter;
3016

3017 3018 3019 3020 3021 3022 3023 3024 3025 3026
	switch (MEMFILE_TYPE(of_cft(of)->private)) {
	case _MEM:
		counter = &memcg->memory;
		break;
	case _MEMSWAP:
		counter = &memcg->memsw;
		break;
	case _KMEM:
		counter = &memcg->kmem;
		break;
V
Vladimir Davydov 已提交
3027
	case _TCP:
3028
		counter = &memcg->tcpmem;
V
Vladimir Davydov 已提交
3029
		break;
3030 3031 3032
	default:
		BUG();
	}
3033

3034
	switch (MEMFILE_ATTR(of_cft(of)->private)) {
3035
	case RES_MAX_USAGE:
3036
		page_counter_reset_watermark(counter);
3037 3038
		break;
	case RES_FAILCNT:
3039
		counter->failcnt = 0;
3040
		break;
3041 3042
	default:
		BUG();
3043
	}
3044

3045
	return nbytes;
3046 3047
}

3048
static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
3049 3050
					struct cftype *cft)
{
3051
	return mem_cgroup_from_css(css)->move_charge_at_immigrate;
3052 3053
}

3054
#ifdef CONFIG_MMU
3055
static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3056 3057
					struct cftype *cft, u64 val)
{
3058
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3059

3060
	if (val & ~MOVE_MASK)
3061
		return -EINVAL;
3062

3063
	/*
3064 3065 3066 3067
	 * No kind of locking is needed in here, because ->can_attach() will
	 * check this value once in the beginning of the process, and then carry
	 * on with stale data. This means that changes to this value will only
	 * affect task migrations starting after the change.
3068
	 */
3069
	memcg->move_charge_at_immigrate = val;
3070 3071
	return 0;
}
3072
#else
3073
static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3074 3075 3076 3077 3078
					struct cftype *cft, u64 val)
{
	return -ENOSYS;
}
#endif
3079

3080
#ifdef CONFIG_NUMA
3081
static int memcg_numa_stat_show(struct seq_file *m, void *v)
3082
{
3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094
	struct numa_stat {
		const char *name;
		unsigned int lru_mask;
	};

	static const struct numa_stat stats[] = {
		{ "total", LRU_ALL },
		{ "file", LRU_ALL_FILE },
		{ "anon", LRU_ALL_ANON },
		{ "unevictable", BIT(LRU_UNEVICTABLE) },
	};
	const struct numa_stat *stat;
3095
	int nid;
3096
	unsigned long nr;
3097
	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
3098

3099 3100 3101 3102 3103 3104 3105 3106 3107
	for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
		nr = mem_cgroup_nr_lru_pages(memcg, stat->lru_mask);
		seq_printf(m, "%s=%lu", stat->name, nr);
		for_each_node_state(nid, N_MEMORY) {
			nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
							  stat->lru_mask);
			seq_printf(m, " N%d=%lu", nid, nr);
		}
		seq_putc(m, '\n');
3108 3109
	}

3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124
	for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
		struct mem_cgroup *iter;

		nr = 0;
		for_each_mem_cgroup_tree(iter, memcg)
			nr += mem_cgroup_nr_lru_pages(iter, stat->lru_mask);
		seq_printf(m, "hierarchical_%s=%lu", stat->name, nr);
		for_each_node_state(nid, N_MEMORY) {
			nr = 0;
			for_each_mem_cgroup_tree(iter, memcg)
				nr += mem_cgroup_node_nr_lru_pages(
					iter, nid, stat->lru_mask);
			seq_printf(m, " N%d=%lu", nid, nr);
		}
		seq_putc(m, '\n');
3125 3126 3127 3128 3129 3130
	}

	return 0;
}
#endif /* CONFIG_NUMA */

3131
static int memcg_stat_show(struct seq_file *m, void *v)
3132
{
3133
	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
3134
	unsigned long memory, memsw;
3135 3136
	struct mem_cgroup *mi;
	unsigned int i;
3137

3138 3139 3140 3141
	BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_stat_names) !=
		     MEM_CGROUP_STAT_NSTATS);
	BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_events_names) !=
		     MEM_CGROUP_EVENTS_NSTATS);
3142 3143
	BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS);

3144
	for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
3145
		if (i == MEM_CGROUP_STAT_SWAP && !do_memsw_account())
3146
			continue;
3147
		seq_printf(m, "%s %lu\n", mem_cgroup_stat_names[i],
3148
			   mem_cgroup_read_stat(memcg, i) * PAGE_SIZE);
3149
	}
L
Lee Schermerhorn 已提交
3150

3151 3152 3153 3154 3155 3156 3157 3158
	for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++)
		seq_printf(m, "%s %lu\n", mem_cgroup_events_names[i],
			   mem_cgroup_read_events(memcg, i));

	for (i = 0; i < NR_LRU_LISTS; i++)
		seq_printf(m, "%s %lu\n", mem_cgroup_lru_names[i],
			   mem_cgroup_nr_lru_pages(memcg, BIT(i)) * PAGE_SIZE);

K
KAMEZAWA Hiroyuki 已提交
3159
	/* Hierarchical information */
3160 3161 3162 3163
	memory = memsw = PAGE_COUNTER_MAX;
	for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) {
		memory = min(memory, mi->memory.limit);
		memsw = min(memsw, mi->memsw.limit);
3164
	}
3165 3166
	seq_printf(m, "hierarchical_memory_limit %llu\n",
		   (u64)memory * PAGE_SIZE);
3167
	if (do_memsw_account())
3168 3169
		seq_printf(m, "hierarchical_memsw_limit %llu\n",
			   (u64)memsw * PAGE_SIZE);
K
KOSAKI Motohiro 已提交
3170

3171
	for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
3172
		unsigned long long val = 0;
3173

3174
		if (i == MEM_CGROUP_STAT_SWAP && !do_memsw_account())
3175
			continue;
3176 3177
		for_each_mem_cgroup_tree(mi, memcg)
			val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE;
3178
		seq_printf(m, "total_%s %llu\n", mem_cgroup_stat_names[i], val);
3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195
	}

	for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
		unsigned long long val = 0;

		for_each_mem_cgroup_tree(mi, memcg)
			val += mem_cgroup_read_events(mi, i);
		seq_printf(m, "total_%s %llu\n",
			   mem_cgroup_events_names[i], val);
	}

	for (i = 0; i < NR_LRU_LISTS; i++) {
		unsigned long long val = 0;

		for_each_mem_cgroup_tree(mi, memcg)
			val += mem_cgroup_nr_lru_pages(mi, BIT(i)) * PAGE_SIZE;
		seq_printf(m, "total_%s %llu\n", mem_cgroup_lru_names[i], val);
3196
	}
K
KAMEZAWA Hiroyuki 已提交
3197

K
KOSAKI Motohiro 已提交
3198 3199
#ifdef CONFIG_DEBUG_VM
	{
3200 3201
		pg_data_t *pgdat;
		struct mem_cgroup_per_node *mz;
3202
		struct zone_reclaim_stat *rstat;
K
KOSAKI Motohiro 已提交
3203 3204 3205
		unsigned long recent_rotated[2] = {0, 0};
		unsigned long recent_scanned[2] = {0, 0};

3206 3207 3208
		for_each_online_pgdat(pgdat) {
			mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
			rstat = &mz->lruvec.reclaim_stat;
K
KOSAKI Motohiro 已提交
3209

3210 3211 3212 3213 3214
			recent_rotated[0] += rstat->recent_rotated[0];
			recent_rotated[1] += rstat->recent_rotated[1];
			recent_scanned[0] += rstat->recent_scanned[0];
			recent_scanned[1] += rstat->recent_scanned[1];
		}
3215 3216 3217 3218
		seq_printf(m, "recent_rotated_anon %lu\n", recent_rotated[0]);
		seq_printf(m, "recent_rotated_file %lu\n", recent_rotated[1]);
		seq_printf(m, "recent_scanned_anon %lu\n", recent_scanned[0]);
		seq_printf(m, "recent_scanned_file %lu\n", recent_scanned[1]);
K
KOSAKI Motohiro 已提交
3219 3220 3221
	}
#endif

3222 3223 3224
	return 0;
}

3225 3226
static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
				      struct cftype *cft)
K
KOSAKI Motohiro 已提交
3227
{
3228
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
K
KOSAKI Motohiro 已提交
3229

3230
	return mem_cgroup_swappiness(memcg);
K
KOSAKI Motohiro 已提交
3231 3232
}

3233 3234
static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
				       struct cftype *cft, u64 val)
K
KOSAKI Motohiro 已提交
3235
{
3236
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
K
KOSAKI Motohiro 已提交
3237

3238
	if (val > 100)
K
KOSAKI Motohiro 已提交
3239 3240
		return -EINVAL;

3241
	if (css->parent)
3242 3243 3244
		memcg->swappiness = val;
	else
		vm_swappiness = val;
3245

K
KOSAKI Motohiro 已提交
3246 3247 3248
	return 0;
}

3249 3250 3251
static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
{
	struct mem_cgroup_threshold_ary *t;
3252
	unsigned long usage;
3253 3254 3255 3256
	int i;

	rcu_read_lock();
	if (!swap)
3257
		t = rcu_dereference(memcg->thresholds.primary);
3258
	else
3259
		t = rcu_dereference(memcg->memsw_thresholds.primary);
3260 3261 3262 3263

	if (!t)
		goto unlock;

3264
	usage = mem_cgroup_usage(memcg, swap);
3265 3266

	/*
3267
	 * current_threshold points to threshold just below or equal to usage.
3268 3269 3270
	 * If it's not true, a threshold was crossed after last
	 * call of __mem_cgroup_threshold().
	 */
3271
	i = t->current_threshold;
3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294

	/*
	 * Iterate backward over array of thresholds starting from
	 * current_threshold and check if a threshold is crossed.
	 * If none of thresholds below usage is crossed, we read
	 * only one element of the array here.
	 */
	for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
		eventfd_signal(t->entries[i].eventfd, 1);

	/* i = current_threshold + 1 */
	i++;

	/*
	 * Iterate forward over array of thresholds starting from
	 * current_threshold+1 and check if a threshold is crossed.
	 * If none of thresholds above usage is crossed, we read
	 * only one element of the array here.
	 */
	for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
		eventfd_signal(t->entries[i].eventfd, 1);

	/* Update current_threshold */
3295
	t->current_threshold = i - 1;
3296 3297 3298 3299 3300 3301
unlock:
	rcu_read_unlock();
}

static void mem_cgroup_threshold(struct mem_cgroup *memcg)
{
3302 3303
	while (memcg) {
		__mem_cgroup_threshold(memcg, false);
3304
		if (do_memsw_account())
3305 3306 3307 3308
			__mem_cgroup_threshold(memcg, true);

		memcg = parent_mem_cgroup(memcg);
	}
3309 3310 3311 3312 3313 3314 3315
}

static int compare_thresholds(const void *a, const void *b)
{
	const struct mem_cgroup_threshold *_a = a;
	const struct mem_cgroup_threshold *_b = b;

3316 3317 3318 3319 3320 3321 3322
	if (_a->threshold > _b->threshold)
		return 1;

	if (_a->threshold < _b->threshold)
		return -1;

	return 0;
3323 3324
}

3325
static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
K
KAMEZAWA Hiroyuki 已提交
3326 3327 3328
{
	struct mem_cgroup_eventfd_list *ev;

3329 3330
	spin_lock(&memcg_oom_lock);

3331
	list_for_each_entry(ev, &memcg->oom_notify, list)
K
KAMEZAWA Hiroyuki 已提交
3332
		eventfd_signal(ev->eventfd, 1);
3333 3334

	spin_unlock(&memcg_oom_lock);
K
KAMEZAWA Hiroyuki 已提交
3335 3336 3337
	return 0;
}

3338
static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
K
KAMEZAWA Hiroyuki 已提交
3339
{
K
KAMEZAWA Hiroyuki 已提交
3340 3341
	struct mem_cgroup *iter;

3342
	for_each_mem_cgroup_tree(iter, memcg)
K
KAMEZAWA Hiroyuki 已提交
3343
		mem_cgroup_oom_notify_cb(iter);
K
KAMEZAWA Hiroyuki 已提交
3344 3345
}

3346
static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
T
Tejun Heo 已提交
3347
	struct eventfd_ctx *eventfd, const char *args, enum res_type type)
3348
{
3349 3350
	struct mem_cgroup_thresholds *thresholds;
	struct mem_cgroup_threshold_ary *new;
3351 3352
	unsigned long threshold;
	unsigned long usage;
3353
	int i, size, ret;
3354

3355
	ret = page_counter_memparse(args, "-1", &threshold);
3356 3357 3358 3359
	if (ret)
		return ret;

	mutex_lock(&memcg->thresholds_lock);
3360

3361
	if (type == _MEM) {
3362
		thresholds = &memcg->thresholds;
3363
		usage = mem_cgroup_usage(memcg, false);
3364
	} else if (type == _MEMSWAP) {
3365
		thresholds = &memcg->memsw_thresholds;
3366
		usage = mem_cgroup_usage(memcg, true);
3367
	} else
3368 3369 3370
		BUG();

	/* Check if a threshold crossed before adding a new one */
3371
	if (thresholds->primary)
3372 3373
		__mem_cgroup_threshold(memcg, type == _MEMSWAP);

3374
	size = thresholds->primary ? thresholds->primary->size + 1 : 1;
3375 3376

	/* Allocate memory for new array of thresholds */
3377
	new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold),
3378
			GFP_KERNEL);
3379
	if (!new) {
3380 3381 3382
		ret = -ENOMEM;
		goto unlock;
	}
3383
	new->size = size;
3384 3385

	/* Copy thresholds (if any) to new array */
3386 3387
	if (thresholds->primary) {
		memcpy(new->entries, thresholds->primary->entries, (size - 1) *
3388
				sizeof(struct mem_cgroup_threshold));
3389 3390
	}

3391
	/* Add new threshold */
3392 3393
	new->entries[size - 1].eventfd = eventfd;
	new->entries[size - 1].threshold = threshold;
3394 3395

	/* Sort thresholds. Registering of new threshold isn't time-critical */
3396
	sort(new->entries, size, sizeof(struct mem_cgroup_threshold),
3397 3398 3399
			compare_thresholds, NULL);

	/* Find current threshold */
3400
	new->current_threshold = -1;
3401
	for (i = 0; i < size; i++) {
3402
		if (new->entries[i].threshold <= usage) {
3403
			/*
3404 3405
			 * new->current_threshold will not be used until
			 * rcu_assign_pointer(), so it's safe to increment
3406 3407
			 * it here.
			 */
3408
			++new->current_threshold;
3409 3410
		} else
			break;
3411 3412
	}

3413 3414 3415 3416 3417
	/* Free old spare buffer and save old primary buffer as spare */
	kfree(thresholds->spare);
	thresholds->spare = thresholds->primary;

	rcu_assign_pointer(thresholds->primary, new);
3418

3419
	/* To be sure that nobody uses thresholds */
3420 3421 3422 3423 3424 3425 3426 3427
	synchronize_rcu();

unlock:
	mutex_unlock(&memcg->thresholds_lock);

	return ret;
}

3428
static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
T
Tejun Heo 已提交
3429 3430
	struct eventfd_ctx *eventfd, const char *args)
{
3431
	return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM);
T
Tejun Heo 已提交
3432 3433
}

3434
static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg,
T
Tejun Heo 已提交
3435 3436
	struct eventfd_ctx *eventfd, const char *args)
{
3437
	return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP);
T
Tejun Heo 已提交
3438 3439
}

3440
static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
T
Tejun Heo 已提交
3441
	struct eventfd_ctx *eventfd, enum res_type type)
3442
{
3443 3444
	struct mem_cgroup_thresholds *thresholds;
	struct mem_cgroup_threshold_ary *new;
3445
	unsigned long usage;
3446
	int i, j, size;
3447 3448

	mutex_lock(&memcg->thresholds_lock);
3449 3450

	if (type == _MEM) {
3451
		thresholds = &memcg->thresholds;
3452
		usage = mem_cgroup_usage(memcg, false);
3453
	} else if (type == _MEMSWAP) {
3454
		thresholds = &memcg->memsw_thresholds;
3455
		usage = mem_cgroup_usage(memcg, true);
3456
	} else
3457 3458
		BUG();

3459 3460 3461
	if (!thresholds->primary)
		goto unlock;

3462 3463 3464 3465
	/* Check if a threshold crossed before removing */
	__mem_cgroup_threshold(memcg, type == _MEMSWAP);

	/* Calculate new number of threshold */
3466 3467 3468
	size = 0;
	for (i = 0; i < thresholds->primary->size; i++) {
		if (thresholds->primary->entries[i].eventfd != eventfd)
3469 3470 3471
			size++;
	}

3472
	new = thresholds->spare;
3473

3474 3475
	/* Set thresholds array to NULL if we don't have thresholds */
	if (!size) {
3476 3477
		kfree(new);
		new = NULL;
3478
		goto swap_buffers;
3479 3480
	}

3481
	new->size = size;
3482 3483

	/* Copy thresholds and find current threshold */
3484 3485 3486
	new->current_threshold = -1;
	for (i = 0, j = 0; i < thresholds->primary->size; i++) {
		if (thresholds->primary->entries[i].eventfd == eventfd)
3487 3488
			continue;

3489
		new->entries[j] = thresholds->primary->entries[i];
3490
		if (new->entries[j].threshold <= usage) {
3491
			/*
3492
			 * new->current_threshold will not be used
3493 3494 3495
			 * until rcu_assign_pointer(), so it's safe to increment
			 * it here.
			 */
3496
			++new->current_threshold;
3497 3498 3499 3500
		}
		j++;
	}

3501
swap_buffers:
3502 3503
	/* Swap primary and spare array */
	thresholds->spare = thresholds->primary;
3504

3505
	rcu_assign_pointer(thresholds->primary, new);
3506

3507
	/* To be sure that nobody uses thresholds */
3508
	synchronize_rcu();
3509 3510 3511 3512 3513 3514

	/* If all events are unregistered, free the spare array */
	if (!new) {
		kfree(thresholds->spare);
		thresholds->spare = NULL;
	}
3515
unlock:
3516 3517
	mutex_unlock(&memcg->thresholds_lock);
}
3518

3519
static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
T
Tejun Heo 已提交
3520 3521
	struct eventfd_ctx *eventfd)
{
3522
	return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM);
T
Tejun Heo 已提交
3523 3524
}

3525
static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
T
Tejun Heo 已提交
3526 3527
	struct eventfd_ctx *eventfd)
{
3528
	return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP);
T
Tejun Heo 已提交
3529 3530
}

3531
static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
T
Tejun Heo 已提交
3532
	struct eventfd_ctx *eventfd, const char *args)
K
KAMEZAWA Hiroyuki 已提交
3533 3534 3535 3536 3537 3538 3539
{
	struct mem_cgroup_eventfd_list *event;

	event = kmalloc(sizeof(*event),	GFP_KERNEL);
	if (!event)
		return -ENOMEM;

3540
	spin_lock(&memcg_oom_lock);
K
KAMEZAWA Hiroyuki 已提交
3541 3542 3543 3544 3545

	event->eventfd = eventfd;
	list_add(&event->list, &memcg->oom_notify);

	/* already in OOM ? */
3546
	if (memcg->under_oom)
K
KAMEZAWA Hiroyuki 已提交
3547
		eventfd_signal(eventfd, 1);
3548
	spin_unlock(&memcg_oom_lock);
K
KAMEZAWA Hiroyuki 已提交
3549 3550 3551 3552

	return 0;
}

3553
static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg,
T
Tejun Heo 已提交
3554
	struct eventfd_ctx *eventfd)
K
KAMEZAWA Hiroyuki 已提交
3555 3556 3557
{
	struct mem_cgroup_eventfd_list *ev, *tmp;

3558
	spin_lock(&memcg_oom_lock);
K
KAMEZAWA Hiroyuki 已提交
3559

3560
	list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
K
KAMEZAWA Hiroyuki 已提交
3561 3562 3563 3564 3565 3566
		if (ev->eventfd == eventfd) {
			list_del(&ev->list);
			kfree(ev);
		}
	}

3567
	spin_unlock(&memcg_oom_lock);
K
KAMEZAWA Hiroyuki 已提交
3568 3569
}

3570
static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
3571
{
3572
	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf));
3573

3574
	seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable);
3575
	seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom);
3576 3577 3578
	return 0;
}

3579
static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
3580 3581
	struct cftype *cft, u64 val)
{
3582
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3583 3584

	/* cannot set to root cgroup and only 0 and 1 are allowed */
3585
	if (!css->parent || !((val == 0) || (val == 1)))
3586 3587
		return -EINVAL;

3588
	memcg->oom_kill_disable = val;
3589
	if (!val)
3590
		memcg_oom_recover(memcg);
3591

3592 3593 3594
	return 0;
}

3595 3596 3597 3598 3599 3600 3601
#ifdef CONFIG_CGROUP_WRITEBACK

struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg)
{
	return &memcg->cgwb_list;
}

T
Tejun Heo 已提交
3602 3603 3604 3605 3606 3607 3608 3609 3610 3611
static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
{
	return wb_domain_init(&memcg->cgwb_domain, gfp);
}

static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
{
	wb_domain_exit(&memcg->cgwb_domain);
}

3612 3613 3614 3615 3616
static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
{
	wb_domain_size_changed(&memcg->cgwb_domain);
}

T
Tejun Heo 已提交
3617 3618 3619 3620 3621 3622 3623 3624 3625 3626
struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);

	if (!memcg->css.parent)
		return NULL;

	return &memcg->cgwb_domain;
}

3627 3628 3629
/**
 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
 * @wb: bdi_writeback in question
3630 3631
 * @pfilepages: out parameter for number of file pages
 * @pheadroom: out parameter for number of allocatable pages according to memcg
3632 3633 3634
 * @pdirty: out parameter for number of dirty pages
 * @pwriteback: out parameter for number of pages under writeback
 *
3635 3636 3637
 * Determine the numbers of file, headroom, dirty, and writeback pages in
 * @wb's memcg.  File, dirty and writeback are self-explanatory.  Headroom
 * is a bit more involved.
3638
 *
3639 3640 3641 3642 3643
 * A memcg's headroom is "min(max, high) - used".  In the hierarchy, the
 * headroom is calculated as the lowest headroom of itself and the
 * ancestors.  Note that this doesn't consider the actual amount of
 * available memory in the system.  The caller should further cap
 * *@pheadroom accordingly.
3644
 */
3645 3646 3647
void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
			 unsigned long *pheadroom, unsigned long *pdirty,
			 unsigned long *pwriteback)
3648 3649 3650 3651 3652 3653 3654 3655
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
	struct mem_cgroup *parent;

	*pdirty = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_DIRTY);

	/* this should eventually include NR_UNSTABLE_NFS */
	*pwriteback = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_WRITEBACK);
3656 3657 3658
	*pfilepages = mem_cgroup_nr_lru_pages(memcg, (1 << LRU_INACTIVE_FILE) |
						     (1 << LRU_ACTIVE_FILE));
	*pheadroom = PAGE_COUNTER_MAX;
3659 3660 3661 3662 3663

	while ((parent = parent_mem_cgroup(memcg))) {
		unsigned long ceiling = min(memcg->memory.limit, memcg->high);
		unsigned long used = page_counter_read(&memcg->memory);

3664
		*pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
3665 3666 3667 3668
		memcg = parent;
	}
}

T
Tejun Heo 已提交
3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679
#else	/* CONFIG_CGROUP_WRITEBACK */

static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
{
	return 0;
}

static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
{
}

3680 3681 3682 3683
static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
{
}

3684 3685
#endif	/* CONFIG_CGROUP_WRITEBACK */

3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698
/*
 * DO NOT USE IN NEW FILES.
 *
 * "cgroup.event_control" implementation.
 *
 * This is way over-engineered.  It tries to support fully configurable
 * events for each user.  Such level of flexibility is completely
 * unnecessary especially in the light of the planned unified hierarchy.
 *
 * Please deprecate this and replace with something simpler if at all
 * possible.
 */

3699 3700 3701 3702 3703
/*
 * Unregister event and free resources.
 *
 * Gets called from workqueue.
 */
3704
static void memcg_event_remove(struct work_struct *work)
3705
{
3706 3707
	struct mem_cgroup_event *event =
		container_of(work, struct mem_cgroup_event, remove);
3708
	struct mem_cgroup *memcg = event->memcg;
3709 3710 3711

	remove_wait_queue(event->wqh, &event->wait);

3712
	event->unregister_event(memcg, event->eventfd);
3713 3714 3715 3716 3717 3718

	/* Notify userspace the event is going away. */
	eventfd_signal(event->eventfd, 1);

	eventfd_ctx_put(event->eventfd);
	kfree(event);
3719
	css_put(&memcg->css);
3720 3721 3722 3723 3724 3725 3726
}

/*
 * Gets called on POLLHUP on eventfd when user closes it.
 *
 * Called with wqh->lock held and interrupts disabled.
 */
3727 3728
static int memcg_event_wake(wait_queue_t *wait, unsigned mode,
			    int sync, void *key)
3729
{
3730 3731
	struct mem_cgroup_event *event =
		container_of(wait, struct mem_cgroup_event, wait);
3732
	struct mem_cgroup *memcg = event->memcg;
3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744
	unsigned long flags = (unsigned long)key;

	if (flags & POLLHUP) {
		/*
		 * If the event has been detached at cgroup removal, we
		 * can simply return knowing the other side will cleanup
		 * for us.
		 *
		 * We can't race against event freeing since the other
		 * side will require wqh->lock via remove_wait_queue(),
		 * which we hold.
		 */
3745
		spin_lock(&memcg->event_list_lock);
3746 3747 3748 3749 3750 3751 3752 3753
		if (!list_empty(&event->list)) {
			list_del_init(&event->list);
			/*
			 * We are in atomic context, but cgroup_event_remove()
			 * may sleep, so we have to call it in workqueue.
			 */
			schedule_work(&event->remove);
		}
3754
		spin_unlock(&memcg->event_list_lock);
3755 3756 3757 3758 3759
	}

	return 0;
}

3760
static void memcg_event_ptable_queue_proc(struct file *file,
3761 3762
		wait_queue_head_t *wqh, poll_table *pt)
{
3763 3764
	struct mem_cgroup_event *event =
		container_of(pt, struct mem_cgroup_event, pt);
3765 3766 3767 3768 3769 3770

	event->wqh = wqh;
	add_wait_queue(wqh, &event->wait);
}

/*
3771 3772
 * DO NOT USE IN NEW FILES.
 *
3773 3774 3775 3776 3777
 * Parse input and register new cgroup event handler.
 *
 * Input must be in format '<event_fd> <control_fd> <args>'.
 * Interpretation of args is defined by control file implementation.
 */
3778 3779
static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
					 char *buf, size_t nbytes, loff_t off)
3780
{
3781
	struct cgroup_subsys_state *css = of_css(of);
3782
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3783
	struct mem_cgroup_event *event;
3784 3785 3786 3787
	struct cgroup_subsys_state *cfile_css;
	unsigned int efd, cfd;
	struct fd efile;
	struct fd cfile;
3788
	const char *name;
3789 3790 3791
	char *endp;
	int ret;

3792 3793 3794
	buf = strstrip(buf);

	efd = simple_strtoul(buf, &endp, 10);
3795 3796
	if (*endp != ' ')
		return -EINVAL;
3797
	buf = endp + 1;
3798

3799
	cfd = simple_strtoul(buf, &endp, 10);
3800 3801
	if ((*endp != ' ') && (*endp != '\0'))
		return -EINVAL;
3802
	buf = endp + 1;
3803 3804 3805 3806 3807

	event = kzalloc(sizeof(*event), GFP_KERNEL);
	if (!event)
		return -ENOMEM;

3808
	event->memcg = memcg;
3809
	INIT_LIST_HEAD(&event->list);
3810 3811 3812
	init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc);
	init_waitqueue_func_entry(&event->wait, memcg_event_wake);
	INIT_WORK(&event->remove, memcg_event_remove);
3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837

	efile = fdget(efd);
	if (!efile.file) {
		ret = -EBADF;
		goto out_kfree;
	}

	event->eventfd = eventfd_ctx_fileget(efile.file);
	if (IS_ERR(event->eventfd)) {
		ret = PTR_ERR(event->eventfd);
		goto out_put_efile;
	}

	cfile = fdget(cfd);
	if (!cfile.file) {
		ret = -EBADF;
		goto out_put_eventfd;
	}

	/* the process need read permission on control file */
	/* AV: shouldn't we check that it's been opened for read instead? */
	ret = inode_permission(file_inode(cfile.file), MAY_READ);
	if (ret < 0)
		goto out_put_cfile;

3838 3839 3840 3841 3842
	/*
	 * Determine the event callbacks and set them in @event.  This used
	 * to be done via struct cftype but cgroup core no longer knows
	 * about these events.  The following is crude but the whole thing
	 * is for compatibility anyway.
3843 3844
	 *
	 * DO NOT ADD NEW FILES.
3845
	 */
A
Al Viro 已提交
3846
	name = cfile.file->f_path.dentry->d_name.name;
3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857

	if (!strcmp(name, "memory.usage_in_bytes")) {
		event->register_event = mem_cgroup_usage_register_event;
		event->unregister_event = mem_cgroup_usage_unregister_event;
	} else if (!strcmp(name, "memory.oom_control")) {
		event->register_event = mem_cgroup_oom_register_event;
		event->unregister_event = mem_cgroup_oom_unregister_event;
	} else if (!strcmp(name, "memory.pressure_level")) {
		event->register_event = vmpressure_register_event;
		event->unregister_event = vmpressure_unregister_event;
	} else if (!strcmp(name, "memory.memsw.usage_in_bytes")) {
T
Tejun Heo 已提交
3858 3859
		event->register_event = memsw_cgroup_usage_register_event;
		event->unregister_event = memsw_cgroup_usage_unregister_event;
3860 3861 3862 3863 3864
	} else {
		ret = -EINVAL;
		goto out_put_cfile;
	}

3865
	/*
3866 3867 3868
	 * Verify @cfile should belong to @css.  Also, remaining events are
	 * automatically removed on cgroup destruction but the removal is
	 * asynchronous, so take an extra ref on @css.
3869
	 */
A
Al Viro 已提交
3870
	cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent,
3871
					       &memory_cgrp_subsys);
3872
	ret = -EINVAL;
3873
	if (IS_ERR(cfile_css))
3874
		goto out_put_cfile;
3875 3876
	if (cfile_css != css) {
		css_put(cfile_css);
3877
		goto out_put_cfile;
3878
	}
3879

3880
	ret = event->register_event(memcg, event->eventfd, buf);
3881 3882 3883 3884 3885
	if (ret)
		goto out_put_css;

	efile.file->f_op->poll(efile.file, &event->pt);

3886 3887 3888
	spin_lock(&memcg->event_list_lock);
	list_add(&event->list, &memcg->event_list);
	spin_unlock(&memcg->event_list_lock);
3889 3890 3891 3892

	fdput(cfile);
	fdput(efile);

3893
	return nbytes;
3894 3895

out_put_css:
3896
	css_put(css);
3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908
out_put_cfile:
	fdput(cfile);
out_put_eventfd:
	eventfd_ctx_put(event->eventfd);
out_put_efile:
	fdput(efile);
out_kfree:
	kfree(event);

	return ret;
}

3909
static struct cftype mem_cgroup_legacy_files[] = {
B
Balbir Singh 已提交
3910
	{
3911
		.name = "usage_in_bytes",
3912
		.private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
3913
		.read_u64 = mem_cgroup_read_u64,
B
Balbir Singh 已提交
3914
	},
3915 3916
	{
		.name = "max_usage_in_bytes",
3917
		.private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
3918
		.write = mem_cgroup_reset,
3919
		.read_u64 = mem_cgroup_read_u64,
3920
	},
B
Balbir Singh 已提交
3921
	{
3922
		.name = "limit_in_bytes",
3923
		.private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
3924
		.write = mem_cgroup_write,
3925
		.read_u64 = mem_cgroup_read_u64,
B
Balbir Singh 已提交
3926
	},
3927 3928 3929
	{
		.name = "soft_limit_in_bytes",
		.private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
3930
		.write = mem_cgroup_write,
3931
		.read_u64 = mem_cgroup_read_u64,
3932
	},
B
Balbir Singh 已提交
3933 3934
	{
		.name = "failcnt",
3935
		.private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
3936
		.write = mem_cgroup_reset,
3937
		.read_u64 = mem_cgroup_read_u64,
B
Balbir Singh 已提交
3938
	},
3939 3940
	{
		.name = "stat",
3941
		.seq_show = memcg_stat_show,
3942
	},
3943 3944
	{
		.name = "force_empty",
3945
		.write = mem_cgroup_force_empty_write,
3946
	},
3947 3948 3949 3950 3951
	{
		.name = "use_hierarchy",
		.write_u64 = mem_cgroup_hierarchy_write,
		.read_u64 = mem_cgroup_hierarchy_read,
	},
3952
	{
3953
		.name = "cgroup.event_control",		/* XXX: for compat */
3954
		.write = memcg_write_event_control,
3955
		.flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE,
3956
	},
K
KOSAKI Motohiro 已提交
3957 3958 3959 3960 3961
	{
		.name = "swappiness",
		.read_u64 = mem_cgroup_swappiness_read,
		.write_u64 = mem_cgroup_swappiness_write,
	},
3962 3963 3964 3965 3966
	{
		.name = "move_charge_at_immigrate",
		.read_u64 = mem_cgroup_move_charge_read,
		.write_u64 = mem_cgroup_move_charge_write,
	},
K
KAMEZAWA Hiroyuki 已提交
3967 3968
	{
		.name = "oom_control",
3969
		.seq_show = mem_cgroup_oom_control_read,
3970
		.write_u64 = mem_cgroup_oom_control_write,
K
KAMEZAWA Hiroyuki 已提交
3971 3972
		.private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
	},
3973 3974 3975
	{
		.name = "pressure_level",
	},
3976 3977 3978
#ifdef CONFIG_NUMA
	{
		.name = "numa_stat",
3979
		.seq_show = memcg_numa_stat_show,
3980 3981
	},
#endif
3982 3983 3984
	{
		.name = "kmem.limit_in_bytes",
		.private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
3985
		.write = mem_cgroup_write,
3986
		.read_u64 = mem_cgroup_read_u64,
3987 3988 3989 3990
	},
	{
		.name = "kmem.usage_in_bytes",
		.private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
3991
		.read_u64 = mem_cgroup_read_u64,
3992 3993 3994 3995
	},
	{
		.name = "kmem.failcnt",
		.private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
3996
		.write = mem_cgroup_reset,
3997
		.read_u64 = mem_cgroup_read_u64,
3998 3999 4000 4001
	},
	{
		.name = "kmem.max_usage_in_bytes",
		.private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
4002
		.write = mem_cgroup_reset,
4003
		.read_u64 = mem_cgroup_read_u64,
4004
	},
4005 4006 4007
#ifdef CONFIG_SLABINFO
	{
		.name = "kmem.slabinfo",
4008 4009 4010 4011
		.seq_start = slab_start,
		.seq_next = slab_next,
		.seq_stop = slab_stop,
		.seq_show = memcg_slab_show,
4012 4013
	},
#endif
V
Vladimir Davydov 已提交
4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036
	{
		.name = "kmem.tcp.limit_in_bytes",
		.private = MEMFILE_PRIVATE(_TCP, RES_LIMIT),
		.write = mem_cgroup_write,
		.read_u64 = mem_cgroup_read_u64,
	},
	{
		.name = "kmem.tcp.usage_in_bytes",
		.private = MEMFILE_PRIVATE(_TCP, RES_USAGE),
		.read_u64 = mem_cgroup_read_u64,
	},
	{
		.name = "kmem.tcp.failcnt",
		.private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT),
		.write = mem_cgroup_reset,
		.read_u64 = mem_cgroup_read_u64,
	},
	{
		.name = "kmem.tcp.max_usage_in_bytes",
		.private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE),
		.write = mem_cgroup_reset,
		.read_u64 = mem_cgroup_read_u64,
	},
4037
	{ },	/* terminate */
4038
};
4039

4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093
/*
 * Private memory cgroup IDR
 *
 * Swap-out records and page cache shadow entries need to store memcg
 * references in constrained space, so we maintain an ID space that is
 * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of
 * memory-controlled cgroups to 64k.
 *
 * However, there usually are many references to the oflline CSS after
 * the cgroup has been destroyed, such as page cache or reclaimable
 * slab objects, that don't need to hang on to the ID. We want to keep
 * those dead CSS from occupying IDs, or we might quickly exhaust the
 * relatively small ID space and prevent the creation of new cgroups
 * even when there are much fewer than 64k cgroups - possibly none.
 *
 * Maintain a private 16-bit ID space for memcg, and allow the ID to
 * be freed and recycled when it's no longer needed, which is usually
 * when the CSS is offlined.
 *
 * The only exception to that are records of swapped out tmpfs/shmem
 * pages that need to be attributed to live ancestors on swapin. But
 * those references are manageable from userspace.
 */

static DEFINE_IDR(mem_cgroup_idr);

static void mem_cgroup_id_get(struct mem_cgroup *memcg)
{
	atomic_inc(&memcg->id.ref);
}

static void mem_cgroup_id_put(struct mem_cgroup *memcg)
{
	if (atomic_dec_and_test(&memcg->id.ref)) {
		idr_remove(&mem_cgroup_idr, memcg->id.id);
		memcg->id.id = 0;

		/* Memcg ID pins CSS */
		css_put(&memcg->css);
	}
}

/**
 * mem_cgroup_from_id - look up a memcg from a memcg id
 * @id: the memcg id to look up
 *
 * Caller must hold rcu_read_lock().
 */
struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
{
	WARN_ON_ONCE(!rcu_read_lock_held());
	return idr_find(&mem_cgroup_idr, id);
}

4094
static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
4095 4096
{
	struct mem_cgroup_per_node *pn;
4097
	int tmp = node;
4098 4099 4100 4101 4102 4103 4104 4105
	/*
	 * This routine is called against possible nodes.
	 * But it's BUG to call kmalloc() against offline node.
	 *
	 * TODO: this routine can waste much memory for nodes which will
	 *       never be onlined. It's better to use memory hotplug callback
	 *       function.
	 */
4106 4107
	if (!node_state(node, N_NORMAL_MEMORY))
		tmp = -1;
4108
	pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
4109 4110
	if (!pn)
		return 1;
4111

4112 4113 4114 4115 4116
	lruvec_init(&pn->lruvec);
	pn->usage_in_excess = 0;
	pn->on_tree = false;
	pn->memcg = memcg;

4117
	memcg->nodeinfo[node] = pn;
4118 4119 4120
	return 0;
}

4121
static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
4122
{
4123
	kfree(memcg->nodeinfo[node]);
4124 4125
}

4126
static void mem_cgroup_free(struct mem_cgroup *memcg)
4127
{
4128
	int node;
4129

4130
	memcg_wb_domain_exit(memcg);
4131
	for_each_node(node)
4132
		free_mem_cgroup_per_node_info(memcg, node);
4133
	free_percpu(memcg->stat);
4134
	kfree(memcg);
4135
}
4136

4137
static struct mem_cgroup *mem_cgroup_alloc(void)
B
Balbir Singh 已提交
4138
{
4139
	struct mem_cgroup *memcg;
4140
	size_t size;
4141
	int node;
B
Balbir Singh 已提交
4142

4143 4144 4145 4146
	size = sizeof(struct mem_cgroup);
	size += nr_node_ids * sizeof(struct mem_cgroup_per_node *);

	memcg = kzalloc(size, GFP_KERNEL);
4147
	if (!memcg)
4148 4149
		return NULL;

4150 4151 4152 4153 4154 4155
	memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL,
				 1, MEM_CGROUP_ID_MAX,
				 GFP_KERNEL);
	if (memcg->id.id < 0)
		goto fail;

4156 4157 4158
	memcg->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
	if (!memcg->stat)
		goto fail;
4159

B
Bob Liu 已提交
4160
	for_each_node(node)
4161
		if (alloc_mem_cgroup_per_node_info(memcg, node))
4162
			goto fail;
4163

4164 4165
	if (memcg_wb_domain_init(memcg, GFP_KERNEL))
		goto fail;
4166

4167
	INIT_WORK(&memcg->high_work, high_work_func);
4168 4169 4170 4171
	memcg->last_scanned_node = MAX_NUMNODES;
	INIT_LIST_HEAD(&memcg->oom_notify);
	mutex_init(&memcg->thresholds_lock);
	spin_lock_init(&memcg->move_lock);
4172
	vmpressure_init(&memcg->vmpressure);
4173 4174
	INIT_LIST_HEAD(&memcg->event_list);
	spin_lock_init(&memcg->event_list_lock);
4175
	memcg->socket_pressure = jiffies;
4176
#ifndef CONFIG_SLOB
V
Vladimir Davydov 已提交
4177 4178
	memcg->kmemcg_id = -1;
#endif
4179 4180 4181
#ifdef CONFIG_CGROUP_WRITEBACK
	INIT_LIST_HEAD(&memcg->cgwb_list);
#endif
4182
	idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
4183 4184
	return memcg;
fail:
4185 4186
	if (memcg->id.id > 0)
		idr_remove(&mem_cgroup_idr, memcg->id.id);
4187 4188
	mem_cgroup_free(memcg);
	return NULL;
4189 4190
}

4191 4192
static struct cgroup_subsys_state * __ref
mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
4193
{
4194 4195 4196
	struct mem_cgroup *parent = mem_cgroup_from_css(parent_css);
	struct mem_cgroup *memcg;
	long error = -ENOMEM;
4197

4198 4199 4200
	memcg = mem_cgroup_alloc();
	if (!memcg)
		return ERR_PTR(error);
4201

4202 4203 4204 4205 4206 4207 4208 4209
	memcg->high = PAGE_COUNTER_MAX;
	memcg->soft_limit = PAGE_COUNTER_MAX;
	if (parent) {
		memcg->swappiness = mem_cgroup_swappiness(parent);
		memcg->oom_kill_disable = parent->oom_kill_disable;
	}
	if (parent && parent->use_hierarchy) {
		memcg->use_hierarchy = true;
4210
		page_counter_init(&memcg->memory, &parent->memory);
4211
		page_counter_init(&memcg->swap, &parent->swap);
4212 4213
		page_counter_init(&memcg->memsw, &parent->memsw);
		page_counter_init(&memcg->kmem, &parent->kmem);
4214
		page_counter_init(&memcg->tcpmem, &parent->tcpmem);
4215
	} else {
4216
		page_counter_init(&memcg->memory, NULL);
4217
		page_counter_init(&memcg->swap, NULL);
4218 4219
		page_counter_init(&memcg->memsw, NULL);
		page_counter_init(&memcg->kmem, NULL);
4220
		page_counter_init(&memcg->tcpmem, NULL);
4221 4222 4223 4224 4225
		/*
		 * Deeper hierachy with use_hierarchy == false doesn't make
		 * much sense so let cgroup subsystem know about this
		 * unfortunate state in our controller.
		 */
4226
		if (parent != root_mem_cgroup)
4227
			memory_cgrp_subsys.broken_hierarchy = true;
4228
	}
4229

4230 4231 4232 4233 4234 4235
	/* The following stuff does not apply to the root */
	if (!parent) {
		root_mem_cgroup = memcg;
		return &memcg->css;
	}

4236
	error = memcg_online_kmem(memcg);
4237 4238
	if (error)
		goto fail;
4239

4240
	if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
4241
		static_branch_inc(&memcg_sockets_enabled_key);
4242

4243 4244 4245
	return &memcg->css;
fail:
	mem_cgroup_free(memcg);
4246
	return ERR_PTR(-ENOMEM);
4247 4248
}

4249
static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
4250
{
4251 4252 4253
	/* Online state pins memcg ID, memcg ID pins CSS */
	mem_cgroup_id_get(mem_cgroup_from_css(css));
	css_get(css);
4254
	return 0;
B
Balbir Singh 已提交
4255 4256
}

4257
static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
4258
{
4259
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4260
	struct mem_cgroup_event *event, *tmp;
4261 4262 4263 4264 4265 4266

	/*
	 * Unregister events and notify userspace.
	 * Notify userspace about cgroup removing only after rmdir of cgroup
	 * directory to avoid race between userspace and kernelspace.
	 */
4267 4268
	spin_lock(&memcg->event_list_lock);
	list_for_each_entry_safe(event, tmp, &memcg->event_list, list) {
4269 4270 4271
		list_del_init(&event->list);
		schedule_work(&event->remove);
	}
4272
	spin_unlock(&memcg->event_list_lock);
4273

4274
	memcg_offline_kmem(memcg);
4275
	wb_memcg_offline(memcg);
4276 4277

	mem_cgroup_id_put(memcg);
4278 4279
}

4280 4281 4282 4283 4284 4285 4286
static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);

	invalidate_reclaim_iterators(memcg);
}

4287
static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
B
Balbir Singh 已提交
4288
{
4289
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4290

4291
	if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
4292
		static_branch_dec(&memcg_sockets_enabled_key);
4293

4294
	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active)
V
Vladimir Davydov 已提交
4295
		static_branch_dec(&memcg_sockets_enabled_key);
4296

4297 4298 4299
	vmpressure_cleanup(&memcg->vmpressure);
	cancel_work_sync(&memcg->high_work);
	mem_cgroup_remove_from_trees(memcg);
4300
	memcg_free_kmem(memcg);
4301
	mem_cgroup_free(memcg);
B
Balbir Singh 已提交
4302 4303
}

4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320
/**
 * mem_cgroup_css_reset - reset the states of a mem_cgroup
 * @css: the target css
 *
 * Reset the states of the mem_cgroup associated with @css.  This is
 * invoked when the userland requests disabling on the default hierarchy
 * but the memcg is pinned through dependency.  The memcg should stop
 * applying policies and should revert to the vanilla state as it may be
 * made visible again.
 *
 * The current implementation only resets the essential configurations.
 * This needs to be expanded to cover all the visible parts.
 */
static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);

4321 4322 4323 4324 4325
	page_counter_limit(&memcg->memory, PAGE_COUNTER_MAX);
	page_counter_limit(&memcg->swap, PAGE_COUNTER_MAX);
	page_counter_limit(&memcg->memsw, PAGE_COUNTER_MAX);
	page_counter_limit(&memcg->kmem, PAGE_COUNTER_MAX);
	page_counter_limit(&memcg->tcpmem, PAGE_COUNTER_MAX);
4326 4327
	memcg->low = 0;
	memcg->high = PAGE_COUNTER_MAX;
4328
	memcg->soft_limit = PAGE_COUNTER_MAX;
4329
	memcg_wb_domain_size_changed(memcg);
4330 4331
}

4332
#ifdef CONFIG_MMU
4333
/* Handlers for move charge at task migration. */
4334
static int mem_cgroup_do_precharge(unsigned long count)
4335
{
4336
	int ret;
4337

4338 4339
	/* Try a single bulk charge without reclaim first, kswapd may wake */
	ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count);
4340
	if (!ret) {
4341 4342 4343
		mc.precharge += count;
		return ret;
	}
4344 4345

	/* Try charges one by one with reclaim */
4346
	while (count--) {
4347
		ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_NORETRY, 1);
4348 4349
		if (ret)
			return ret;
4350
		mc.precharge++;
4351
		cond_resched();
4352
	}
4353
	return 0;
4354 4355 4356 4357
}

union mc_target {
	struct page	*page;
4358
	swp_entry_t	ent;
4359 4360 4361
};

enum mc_target_type {
4362
	MC_TARGET_NONE = 0,
4363
	MC_TARGET_PAGE,
4364
	MC_TARGET_SWAP,
4365 4366
};

D
Daisuke Nishimura 已提交
4367 4368
static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
						unsigned long addr, pte_t ptent)
4369
{
D
Daisuke Nishimura 已提交
4370
	struct page *page = vm_normal_page(vma, addr, ptent);
4371

D
Daisuke Nishimura 已提交
4372 4373 4374
	if (!page || !page_mapped(page))
		return NULL;
	if (PageAnon(page)) {
4375
		if (!(mc.flags & MOVE_ANON))
D
Daisuke Nishimura 已提交
4376
			return NULL;
4377 4378 4379 4380
	} else {
		if (!(mc.flags & MOVE_FILE))
			return NULL;
	}
D
Daisuke Nishimura 已提交
4381 4382 4383 4384 4385 4386
	if (!get_page_unless_zero(page))
		return NULL;

	return page;
}

4387
#ifdef CONFIG_SWAP
D
Daisuke Nishimura 已提交
4388
static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
4389
			pte_t ptent, swp_entry_t *entry)
D
Daisuke Nishimura 已提交
4390 4391 4392 4393
{
	struct page *page = NULL;
	swp_entry_t ent = pte_to_swp_entry(ptent);

4394
	if (!(mc.flags & MOVE_ANON) || non_swap_entry(ent))
D
Daisuke Nishimura 已提交
4395
		return NULL;
4396 4397 4398 4399
	/*
	 * Because lookup_swap_cache() updates some statistics counter,
	 * we call find_get_page() with swapper_space directly.
	 */
4400
	page = find_get_page(swap_address_space(ent), ent.val);
4401
	if (do_memsw_account())
D
Daisuke Nishimura 已提交
4402 4403 4404 4405
		entry->val = ent.val;

	return page;
}
4406 4407
#else
static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
4408
			pte_t ptent, swp_entry_t *entry)
4409 4410 4411 4412
{
	return NULL;
}
#endif
D
Daisuke Nishimura 已提交
4413

4414 4415 4416 4417 4418 4419 4420 4421 4422
static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
			unsigned long addr, pte_t ptent, swp_entry_t *entry)
{
	struct page *page = NULL;
	struct address_space *mapping;
	pgoff_t pgoff;

	if (!vma->vm_file) /* anonymous vma */
		return NULL;
4423
	if (!(mc.flags & MOVE_FILE))
4424 4425 4426
		return NULL;

	mapping = vma->vm_file->f_mapping;
4427
	pgoff = linear_page_index(vma, addr);
4428 4429

	/* page is moved even if it's not RSS of this task(page-faulted). */
4430 4431
#ifdef CONFIG_SWAP
	/* shmem/tmpfs may report page out on swap: account for that too. */
4432 4433 4434 4435
	if (shmem_mapping(mapping)) {
		page = find_get_entry(mapping, pgoff);
		if (radix_tree_exceptional_entry(page)) {
			swp_entry_t swp = radix_to_swp_entry(page);
4436
			if (do_memsw_account())
4437 4438 4439 4440 4441 4442 4443
				*entry = swp;
			page = find_get_page(swap_address_space(swp), swp.val);
		}
	} else
		page = find_get_page(mapping, pgoff);
#else
	page = find_get_page(mapping, pgoff);
4444
#endif
4445 4446 4447
	return page;
}

4448 4449 4450
/**
 * mem_cgroup_move_account - move account of the page
 * @page: the page
4451
 * @compound: charge the page as compound or small page
4452 4453 4454
 * @from: mem_cgroup which the page is moved from.
 * @to:	mem_cgroup which the page is moved to. @from != @to.
 *
4455
 * The caller must make sure the page is not on LRU (isolate_page() is useful.)
4456 4457 4458 4459 4460
 *
 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
 * from old cgroup.
 */
static int mem_cgroup_move_account(struct page *page,
4461
				   bool compound,
4462 4463 4464 4465
				   struct mem_cgroup *from,
				   struct mem_cgroup *to)
{
	unsigned long flags;
4466
	unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
4467
	int ret;
4468
	bool anon;
4469 4470 4471

	VM_BUG_ON(from == to);
	VM_BUG_ON_PAGE(PageLRU(page), page);
4472
	VM_BUG_ON(compound && !PageTransHuge(page));
4473 4474

	/*
4475
	 * Prevent mem_cgroup_migrate() from looking at
4476
	 * page->mem_cgroup of its source page while we change it.
4477
	 */
4478
	ret = -EBUSY;
4479 4480 4481 4482 4483 4484 4485
	if (!trylock_page(page))
		goto out;

	ret = -EINVAL;
	if (page->mem_cgroup != from)
		goto out_unlock;

4486 4487
	anon = PageAnon(page);

4488 4489
	spin_lock_irqsave(&from->move_lock, flags);

4490
	if (!anon && page_mapped(page)) {
4491 4492 4493 4494 4495 4496
		__this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
			       nr_pages);
		__this_cpu_add(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
			       nr_pages);
	}

4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512
	/*
	 * move_lock grabbed above and caller set from->moving_account, so
	 * mem_cgroup_update_page_stat() will serialize updates to PageDirty.
	 * So mapping should be stable for dirty pages.
	 */
	if (!anon && PageDirty(page)) {
		struct address_space *mapping = page_mapping(page);

		if (mapping_cap_account_dirty(mapping)) {
			__this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_DIRTY],
				       nr_pages);
			__this_cpu_add(to->stat->count[MEM_CGROUP_STAT_DIRTY],
				       nr_pages);
		}
	}

4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532
	if (PageWriteback(page)) {
		__this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_WRITEBACK],
			       nr_pages);
		__this_cpu_add(to->stat->count[MEM_CGROUP_STAT_WRITEBACK],
			       nr_pages);
	}

	/*
	 * It is safe to change page->mem_cgroup here because the page
	 * is referenced, charged, and isolated - we can't race with
	 * uncharging, charging, migration, or LRU putback.
	 */

	/* caller should have done css_get */
	page->mem_cgroup = to;
	spin_unlock_irqrestore(&from->move_lock, flags);

	ret = 0;

	local_irq_disable();
4533
	mem_cgroup_charge_statistics(to, page, compound, nr_pages);
4534
	memcg_check_events(to, page);
4535
	mem_cgroup_charge_statistics(from, page, compound, -nr_pages);
4536 4537 4538 4539 4540 4541 4542 4543
	memcg_check_events(from, page);
	local_irq_enable();
out_unlock:
	unlock_page(page);
out:
	return ret;
}

4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562
/**
 * get_mctgt_type - get target type of moving charge
 * @vma: the vma the pte to be checked belongs
 * @addr: the address corresponding to the pte to be checked
 * @ptent: the pte to be checked
 * @target: the pointer the target page or swap ent will be stored(can be NULL)
 *
 * Returns
 *   0(MC_TARGET_NONE): if the pte is not a target for move charge.
 *   1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
 *     move charge. if @target is not NULL, the page is stored in target->page
 *     with extra refcnt got(Callers should handle it).
 *   2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
 *     target for charge migration. if @target is not NULL, the entry is stored
 *     in target->ent.
 *
 * Called with pte lock held.
 */

4563
static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
D
Daisuke Nishimura 已提交
4564 4565 4566
		unsigned long addr, pte_t ptent, union mc_target *target)
{
	struct page *page = NULL;
4567
	enum mc_target_type ret = MC_TARGET_NONE;
D
Daisuke Nishimura 已提交
4568 4569 4570 4571 4572
	swp_entry_t ent = { .val = 0 };

	if (pte_present(ptent))
		page = mc_handle_present_pte(vma, addr, ptent);
	else if (is_swap_pte(ptent))
4573
		page = mc_handle_swap_pte(vma, ptent, &ent);
4574
	else if (pte_none(ptent))
4575
		page = mc_handle_file_pte(vma, addr, ptent, &ent);
D
Daisuke Nishimura 已提交
4576 4577

	if (!page && !ent.val)
4578
		return ret;
4579 4580
	if (page) {
		/*
4581
		 * Do only loose check w/o serialization.
4582
		 * mem_cgroup_move_account() checks the page is valid or
4583
		 * not under LRU exclusion.
4584
		 */
4585
		if (page->mem_cgroup == mc.from) {
4586 4587 4588 4589 4590 4591 4592
			ret = MC_TARGET_PAGE;
			if (target)
				target->page = page;
		}
		if (!ret || !target)
			put_page(page);
	}
D
Daisuke Nishimura 已提交
4593 4594
	/* There is a swap entry and a page doesn't exist or isn't charged */
	if (ent.val && !ret &&
L
Li Zefan 已提交
4595
	    mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) {
4596 4597 4598
		ret = MC_TARGET_SWAP;
		if (target)
			target->ent = ent;
4599 4600 4601 4602
	}
	return ret;
}

4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/*
 * We don't consider swapping or file mapped pages because THP does not
 * support them for now.
 * Caller should make sure that pmd_trans_huge(pmd) is true.
 */
static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
		unsigned long addr, pmd_t pmd, union mc_target *target)
{
	struct page *page = NULL;
	enum mc_target_type ret = MC_TARGET_NONE;

	page = pmd_page(pmd);
4616
	VM_BUG_ON_PAGE(!page || !PageHead(page), page);
4617
	if (!(mc.flags & MOVE_ANON))
4618
		return ret;
4619
	if (page->mem_cgroup == mc.from) {
4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635
		ret = MC_TARGET_PAGE;
		if (target) {
			get_page(page);
			target->page = page;
		}
	}
	return ret;
}
#else
static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
		unsigned long addr, pmd_t pmd, union mc_target *target)
{
	return MC_TARGET_NONE;
}
#endif

4636 4637 4638 4639
static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
					unsigned long addr, unsigned long end,
					struct mm_walk *walk)
{
4640
	struct vm_area_struct *vma = walk->vma;
4641 4642 4643
	pte_t *pte;
	spinlock_t *ptl;

4644 4645
	ptl = pmd_trans_huge_lock(pmd, vma);
	if (ptl) {
4646 4647
		if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
			mc.precharge += HPAGE_PMD_NR;
4648
		spin_unlock(ptl);
4649
		return 0;
4650
	}
4651

4652 4653
	if (pmd_trans_unstable(pmd))
		return 0;
4654 4655
	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
	for (; addr != end; pte++, addr += PAGE_SIZE)
4656
		if (get_mctgt_type(vma, addr, *pte, NULL))
4657 4658 4659 4660
			mc.precharge++;	/* increment precharge temporarily */
	pte_unmap_unlock(pte - 1, ptl);
	cond_resched();

4661 4662 4663
	return 0;
}

4664 4665 4666 4667
static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
{
	unsigned long precharge;

4668 4669 4670 4671
	struct mm_walk mem_cgroup_count_precharge_walk = {
		.pmd_entry = mem_cgroup_count_precharge_pte_range,
		.mm = mm,
	};
4672
	down_read(&mm->mmap_sem);
4673
	walk_page_range(0, ~0UL, &mem_cgroup_count_precharge_walk);
4674
	up_read(&mm->mmap_sem);
4675 4676 4677 4678 4679 4680 4681 4682 4683

	precharge = mc.precharge;
	mc.precharge = 0;

	return precharge;
}

static int mem_cgroup_precharge_mc(struct mm_struct *mm)
{
4684 4685 4686 4687 4688
	unsigned long precharge = mem_cgroup_count_precharge(mm);

	VM_BUG_ON(mc.moving_task);
	mc.moving_task = current;
	return mem_cgroup_do_precharge(precharge);
4689 4690
}

4691 4692
/* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
static void __mem_cgroup_clear_mc(void)
4693
{
4694 4695 4696
	struct mem_cgroup *from = mc.from;
	struct mem_cgroup *to = mc.to;

4697
	/* we must uncharge all the leftover precharges from mc.to */
4698
	if (mc.precharge) {
4699
		cancel_charge(mc.to, mc.precharge);
4700 4701 4702 4703 4704 4705 4706
		mc.precharge = 0;
	}
	/*
	 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
	 * we must uncharge here.
	 */
	if (mc.moved_charge) {
4707
		cancel_charge(mc.from, mc.moved_charge);
4708
		mc.moved_charge = 0;
4709
	}
4710 4711 4712
	/* we must fixup refcnts and charges */
	if (mc.moved_swap) {
		/* uncharge swap account from the old cgroup */
4713
		if (!mem_cgroup_is_root(mc.from))
4714
			page_counter_uncharge(&mc.from->memsw, mc.moved_swap);
4715

4716
		/*
4717 4718
		 * we charged both to->memory and to->memsw, so we
		 * should uncharge to->memory.
4719
		 */
4720
		if (!mem_cgroup_is_root(mc.to))
4721 4722
			page_counter_uncharge(&mc.to->memory, mc.moved_swap);

4723
		css_put_many(&mc.from->css, mc.moved_swap);
4724

L
Li Zefan 已提交
4725
		/* we've already done css_get(mc.to) */
4726 4727
		mc.moved_swap = 0;
	}
4728 4729 4730 4731 4732 4733 4734
	memcg_oom_recover(from);
	memcg_oom_recover(to);
	wake_up_all(&mc.waitq);
}

static void mem_cgroup_clear_mc(void)
{
4735 4736
	struct mm_struct *mm = mc.mm;

4737 4738 4739 4740 4741 4742
	/*
	 * we must clear moving_task before waking up waiters at the end of
	 * task migration.
	 */
	mc.moving_task = NULL;
	__mem_cgroup_clear_mc();
4743
	spin_lock(&mc.lock);
4744 4745
	mc.from = NULL;
	mc.to = NULL;
4746
	mc.mm = NULL;
4747
	spin_unlock(&mc.lock);
4748 4749

	mmput(mm);
4750 4751
}

4752
static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
4753
{
4754
	struct cgroup_subsys_state *css;
4755
	struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */
4756
	struct mem_cgroup *from;
4757
	struct task_struct *leader, *p;
4758
	struct mm_struct *mm;
4759
	unsigned long move_flags;
4760
	int ret = 0;
4761

4762 4763
	/* charge immigration isn't supported on the default hierarchy */
	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
4764 4765
		return 0;

4766 4767 4768 4769 4770 4771 4772
	/*
	 * Multi-process migrations only happen on the default hierarchy
	 * where charge immigration is not used.  Perform charge
	 * immigration if @tset contains a leader and whine if there are
	 * multiple.
	 */
	p = NULL;
4773
	cgroup_taskset_for_each_leader(leader, css, tset) {
4774 4775
		WARN_ON_ONCE(p);
		p = leader;
4776
		memcg = mem_cgroup_from_css(css);
4777 4778 4779 4780
	}
	if (!p)
		return 0;

4781 4782 4783 4784 4785 4786 4787 4788 4789
	/*
	 * We are now commited to this value whatever it is. Changes in this
	 * tunable will only affect upcoming migrations, not the current one.
	 * So we need to save it, and keep it going.
	 */
	move_flags = READ_ONCE(memcg->move_charge_at_immigrate);
	if (!move_flags)
		return 0;

4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805
	from = mem_cgroup_from_task(p);

	VM_BUG_ON(from == memcg);

	mm = get_task_mm(p);
	if (!mm)
		return 0;
	/* We move charges only when we move a owner of the mm */
	if (mm->owner == p) {
		VM_BUG_ON(mc.from);
		VM_BUG_ON(mc.to);
		VM_BUG_ON(mc.precharge);
		VM_BUG_ON(mc.moved_charge);
		VM_BUG_ON(mc.moved_swap);

		spin_lock(&mc.lock);
4806
		mc.mm = mm;
4807 4808 4809 4810 4811 4812 4813 4814 4815
		mc.from = from;
		mc.to = memcg;
		mc.flags = move_flags;
		spin_unlock(&mc.lock);
		/* We set mc.moving_task later */

		ret = mem_cgroup_precharge_mc(mm);
		if (ret)
			mem_cgroup_clear_mc();
4816 4817
	} else {
		mmput(mm);
4818 4819 4820 4821
	}
	return ret;
}

4822
static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
4823
{
4824 4825
	if (mc.to)
		mem_cgroup_clear_mc();
4826 4827
}

4828 4829 4830
static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
				unsigned long addr, unsigned long end,
				struct mm_walk *walk)
4831
{
4832
	int ret = 0;
4833
	struct vm_area_struct *vma = walk->vma;
4834 4835
	pte_t *pte;
	spinlock_t *ptl;
4836 4837 4838
	enum mc_target_type target_type;
	union mc_target target;
	struct page *page;
4839

4840 4841
	ptl = pmd_trans_huge_lock(pmd, vma);
	if (ptl) {
4842
		if (mc.precharge < HPAGE_PMD_NR) {
4843
			spin_unlock(ptl);
4844 4845 4846 4847 4848 4849
			return 0;
		}
		target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
		if (target_type == MC_TARGET_PAGE) {
			page = target.page;
			if (!isolate_lru_page(page)) {
4850
				if (!mem_cgroup_move_account(page, true,
4851
							     mc.from, mc.to)) {
4852 4853 4854 4855 4856 4857 4858
					mc.precharge -= HPAGE_PMD_NR;
					mc.moved_charge += HPAGE_PMD_NR;
				}
				putback_lru_page(page);
			}
			put_page(page);
		}
4859
		spin_unlock(ptl);
4860
		return 0;
4861 4862
	}

4863 4864
	if (pmd_trans_unstable(pmd))
		return 0;
4865 4866 4867 4868
retry:
	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
	for (; addr != end; addr += PAGE_SIZE) {
		pte_t ptent = *(pte++);
4869
		swp_entry_t ent;
4870 4871 4872 4873

		if (!mc.precharge)
			break;

4874
		switch (get_mctgt_type(vma, addr, ptent, &target)) {
4875 4876
		case MC_TARGET_PAGE:
			page = target.page;
4877 4878 4879 4880 4881 4882 4883 4884
			/*
			 * We can have a part of the split pmd here. Moving it
			 * can be done but it would be too convoluted so simply
			 * ignore such a partial THP and keep it in original
			 * memcg. There should be somebody mapping the head.
			 */
			if (PageTransCompound(page))
				goto put;
4885 4886
			if (isolate_lru_page(page))
				goto put;
4887 4888
			if (!mem_cgroup_move_account(page, false,
						mc.from, mc.to)) {
4889
				mc.precharge--;
4890 4891
				/* we uncharge from mc.from later. */
				mc.moved_charge++;
4892 4893
			}
			putback_lru_page(page);
4894
put:			/* get_mctgt_type() gets the page */
4895 4896
			put_page(page);
			break;
4897 4898
		case MC_TARGET_SWAP:
			ent = target.ent;
4899
			if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
4900
				mc.precharge--;
4901 4902 4903
				/* we fixup refcnts and charges later. */
				mc.moved_swap++;
			}
4904
			break;
4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918
		default:
			break;
		}
	}
	pte_unmap_unlock(pte - 1, ptl);
	cond_resched();

	if (addr != end) {
		/*
		 * We have consumed all precharges we got in can_attach().
		 * We try charge one by one, but don't do any additional
		 * charges to mc.to if we have failed in charge once in attach()
		 * phase.
		 */
4919
		ret = mem_cgroup_do_precharge(1);
4920 4921 4922 4923 4924 4925 4926
		if (!ret)
			goto retry;
	}

	return ret;
}

4927
static void mem_cgroup_move_charge(void)
4928
{
4929 4930
	struct mm_walk mem_cgroup_move_charge_walk = {
		.pmd_entry = mem_cgroup_move_charge_pte_range,
4931
		.mm = mc.mm,
4932
	};
4933 4934

	lru_add_drain_all();
4935
	/*
4936 4937 4938
	 * Signal lock_page_memcg() to take the memcg's move_lock
	 * while we're moving its pages to another memcg. Then wait
	 * for already started RCU-only updates to finish.
4939 4940 4941
	 */
	atomic_inc(&mc.from->moving_account);
	synchronize_rcu();
4942
retry:
4943
	if (unlikely(!down_read_trylock(&mc.mm->mmap_sem))) {
4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954
		/*
		 * Someone who are holding the mmap_sem might be waiting in
		 * waitq. So we cancel all extra charges, wake up all waiters,
		 * and retry. Because we cancel precharges, we might not be able
		 * to move enough charges, but moving charge is a best-effort
		 * feature anyway, so it wouldn't be a big problem.
		 */
		__mem_cgroup_clear_mc();
		cond_resched();
		goto retry;
	}
4955 4956 4957 4958 4959
	/*
	 * When we have consumed all precharges and failed in doing
	 * additional charge, the page walk just aborts.
	 */
	walk_page_range(0, ~0UL, &mem_cgroup_move_charge_walk);
4960
	up_read(&mc.mm->mmap_sem);
4961
	atomic_dec(&mc.from->moving_account);
4962 4963
}

4964
static void mem_cgroup_move_task(void)
B
Balbir Singh 已提交
4965
{
4966 4967
	if (mc.to) {
		mem_cgroup_move_charge();
4968
		mem_cgroup_clear_mc();
4969
	}
B
Balbir Singh 已提交
4970
}
4971
#else	/* !CONFIG_MMU */
4972
static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
4973 4974 4975
{
	return 0;
}
4976
static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
4977 4978
{
}
4979
static void mem_cgroup_move_task(void)
4980 4981 4982
{
}
#endif
B
Balbir Singh 已提交
4983

4984 4985
/*
 * Cgroup retains root cgroups across [un]mount cycles making it necessary
4986 4987
 * to verify whether we're attached to the default hierarchy on each mount
 * attempt.
4988
 */
4989
static void mem_cgroup_bind(struct cgroup_subsys_state *root_css)
4990 4991
{
	/*
4992
	 * use_hierarchy is forced on the default hierarchy.  cgroup core
4993 4994 4995
	 * guarantees that @root doesn't have any children, so turning it
	 * on for the root memcg is enough.
	 */
4996
	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
4997 4998 4999
		root_mem_cgroup->use_hierarchy = true;
	else
		root_mem_cgroup->use_hierarchy = false;
5000 5001
}

5002 5003 5004
static u64 memory_current_read(struct cgroup_subsys_state *css,
			       struct cftype *cft)
{
5005 5006 5007
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);

	return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE;
5008 5009 5010 5011 5012
}

static int memory_low_show(struct seq_file *m, void *v)
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5013
	unsigned long low = READ_ONCE(memcg->low);
5014 5015

	if (low == PAGE_COUNTER_MAX)
5016
		seq_puts(m, "max\n");
5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030
	else
		seq_printf(m, "%llu\n", (u64)low * PAGE_SIZE);

	return 0;
}

static ssize_t memory_low_write(struct kernfs_open_file *of,
				char *buf, size_t nbytes, loff_t off)
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
	unsigned long low;
	int err;

	buf = strstrip(buf);
5031
	err = page_counter_memparse(buf, "max", &low);
5032 5033 5034 5035 5036 5037 5038 5039 5040 5041 5042
	if (err)
		return err;

	memcg->low = low;

	return nbytes;
}

static int memory_high_show(struct seq_file *m, void *v)
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5043
	unsigned long high = READ_ONCE(memcg->high);
5044 5045

	if (high == PAGE_COUNTER_MAX)
5046
		seq_puts(m, "max\n");
5047 5048 5049 5050 5051 5052 5053 5054 5055 5056
	else
		seq_printf(m, "%llu\n", (u64)high * PAGE_SIZE);

	return 0;
}

static ssize_t memory_high_write(struct kernfs_open_file *of,
				 char *buf, size_t nbytes, loff_t off)
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5057
	unsigned long nr_pages;
5058 5059 5060 5061
	unsigned long high;
	int err;

	buf = strstrip(buf);
5062
	err = page_counter_memparse(buf, "max", &high);
5063 5064 5065 5066 5067
	if (err)
		return err;

	memcg->high = high;

5068 5069 5070 5071 5072
	nr_pages = page_counter_read(&memcg->memory);
	if (nr_pages > high)
		try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
					     GFP_KERNEL, true);

5073
	memcg_wb_domain_size_changed(memcg);
5074 5075 5076 5077 5078 5079
	return nbytes;
}

static int memory_max_show(struct seq_file *m, void *v)
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5080
	unsigned long max = READ_ONCE(memcg->memory.limit);
5081 5082

	if (max == PAGE_COUNTER_MAX)
5083
		seq_puts(m, "max\n");
5084 5085 5086 5087 5088 5089 5090 5091 5092 5093
	else
		seq_printf(m, "%llu\n", (u64)max * PAGE_SIZE);

	return 0;
}

static ssize_t memory_max_write(struct kernfs_open_file *of,
				char *buf, size_t nbytes, loff_t off)
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5094 5095
	unsigned int nr_reclaims = MEM_CGROUP_RECLAIM_RETRIES;
	bool drained = false;
5096 5097 5098 5099
	unsigned long max;
	int err;

	buf = strstrip(buf);
5100
	err = page_counter_memparse(buf, "max", &max);
5101 5102 5103
	if (err)
		return err;

5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119 5120 5121 5122 5123 5124 5125 5126 5127 5128 5129 5130 5131 5132 5133
	xchg(&memcg->memory.limit, max);

	for (;;) {
		unsigned long nr_pages = page_counter_read(&memcg->memory);

		if (nr_pages <= max)
			break;

		if (signal_pending(current)) {
			err = -EINTR;
			break;
		}

		if (!drained) {
			drain_all_stock(memcg);
			drained = true;
			continue;
		}

		if (nr_reclaims) {
			if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
							  GFP_KERNEL, true))
				nr_reclaims--;
			continue;
		}

		mem_cgroup_events(memcg, MEMCG_OOM, 1);
		if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
			break;
	}
5134

5135
	memcg_wb_domain_size_changed(memcg);
5136 5137 5138 5139 5140 5141 5142 5143 5144 5145 5146 5147 5148 5149 5150
	return nbytes;
}

static int memory_events_show(struct seq_file *m, void *v)
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));

	seq_printf(m, "low %lu\n", mem_cgroup_read_events(memcg, MEMCG_LOW));
	seq_printf(m, "high %lu\n", mem_cgroup_read_events(memcg, MEMCG_HIGH));
	seq_printf(m, "max %lu\n", mem_cgroup_read_events(memcg, MEMCG_MAX));
	seq_printf(m, "oom %lu\n", mem_cgroup_read_events(memcg, MEMCG_OOM));

	return 0;
}

5151 5152 5153
static int memory_stat_show(struct seq_file *m, void *v)
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5154 5155
	unsigned long stat[MEMCG_NR_STAT];
	unsigned long events[MEMCG_NR_EVENTS];
5156 5157 5158 5159 5160 5161 5162 5163 5164 5165 5166 5167 5168
	int i;

	/*
	 * Provide statistics on the state of the memory subsystem as
	 * well as cumulative event counters that show past behavior.
	 *
	 * This list is ordered following a combination of these gradients:
	 * 1) generic big picture -> specifics and details
	 * 2) reflecting userspace activity -> reflecting kernel heuristics
	 *
	 * Current memory state:
	 */

5169 5170 5171
	tree_stat(memcg, stat);
	tree_events(memcg, events);

5172
	seq_printf(m, "anon %llu\n",
5173
		   (u64)stat[MEM_CGROUP_STAT_RSS] * PAGE_SIZE);
5174
	seq_printf(m, "file %llu\n",
5175
		   (u64)stat[MEM_CGROUP_STAT_CACHE] * PAGE_SIZE);
5176 5177
	seq_printf(m, "kernel_stack %llu\n",
		   (u64)stat[MEMCG_KERNEL_STACK] * PAGE_SIZE);
5178 5179 5180
	seq_printf(m, "slab %llu\n",
		   (u64)(stat[MEMCG_SLAB_RECLAIMABLE] +
			 stat[MEMCG_SLAB_UNRECLAIMABLE]) * PAGE_SIZE);
5181
	seq_printf(m, "sock %llu\n",
5182
		   (u64)stat[MEMCG_SOCK] * PAGE_SIZE);
5183 5184

	seq_printf(m, "file_mapped %llu\n",
5185
		   (u64)stat[MEM_CGROUP_STAT_FILE_MAPPED] * PAGE_SIZE);
5186
	seq_printf(m, "file_dirty %llu\n",
5187
		   (u64)stat[MEM_CGROUP_STAT_DIRTY] * PAGE_SIZE);
5188
	seq_printf(m, "file_writeback %llu\n",
5189
		   (u64)stat[MEM_CGROUP_STAT_WRITEBACK] * PAGE_SIZE);
5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200

	for (i = 0; i < NR_LRU_LISTS; i++) {
		struct mem_cgroup *mi;
		unsigned long val = 0;

		for_each_mem_cgroup_tree(mi, memcg)
			val += mem_cgroup_nr_lru_pages(mi, BIT(i));
		seq_printf(m, "%s %llu\n",
			   mem_cgroup_lru_names[i], (u64)val * PAGE_SIZE);
	}

5201 5202 5203 5204 5205
	seq_printf(m, "slab_reclaimable %llu\n",
		   (u64)stat[MEMCG_SLAB_RECLAIMABLE] * PAGE_SIZE);
	seq_printf(m, "slab_unreclaimable %llu\n",
		   (u64)stat[MEMCG_SLAB_UNRECLAIMABLE] * PAGE_SIZE);

5206 5207 5208
	/* Accumulated memory events */

	seq_printf(m, "pgfault %lu\n",
5209
		   events[MEM_CGROUP_EVENTS_PGFAULT]);
5210
	seq_printf(m, "pgmajfault %lu\n",
5211
		   events[MEM_CGROUP_EVENTS_PGMAJFAULT]);
5212 5213 5214 5215

	return 0;
}

5216 5217 5218
static struct cftype memory_files[] = {
	{
		.name = "current",
5219
		.flags = CFTYPE_NOT_ON_ROOT,
5220 5221 5222 5223 5224 5225 5226 5227 5228 5229 5230 5231 5232 5233 5234 5235 5236 5237 5238 5239 5240 5241 5242
		.read_u64 = memory_current_read,
	},
	{
		.name = "low",
		.flags = CFTYPE_NOT_ON_ROOT,
		.seq_show = memory_low_show,
		.write = memory_low_write,
	},
	{
		.name = "high",
		.flags = CFTYPE_NOT_ON_ROOT,
		.seq_show = memory_high_show,
		.write = memory_high_write,
	},
	{
		.name = "max",
		.flags = CFTYPE_NOT_ON_ROOT,
		.seq_show = memory_max_show,
		.write = memory_max_write,
	},
	{
		.name = "events",
		.flags = CFTYPE_NOT_ON_ROOT,
5243
		.file_offset = offsetof(struct mem_cgroup, events_file),
5244 5245
		.seq_show = memory_events_show,
	},
5246 5247 5248 5249 5250
	{
		.name = "stat",
		.flags = CFTYPE_NOT_ON_ROOT,
		.seq_show = memory_stat_show,
	},
5251 5252 5253
	{ }	/* terminate */
};

5254
struct cgroup_subsys memory_cgrp_subsys = {
5255
	.css_alloc = mem_cgroup_css_alloc,
5256
	.css_online = mem_cgroup_css_online,
5257
	.css_offline = mem_cgroup_css_offline,
5258
	.css_released = mem_cgroup_css_released,
5259
	.css_free = mem_cgroup_css_free,
5260
	.css_reset = mem_cgroup_css_reset,
5261 5262
	.can_attach = mem_cgroup_can_attach,
	.cancel_attach = mem_cgroup_cancel_attach,
5263
	.post_attach = mem_cgroup_move_task,
5264
	.bind = mem_cgroup_bind,
5265 5266
	.dfl_cftypes = memory_files,
	.legacy_cftypes = mem_cgroup_legacy_files,
5267
	.early_init = 0,
B
Balbir Singh 已提交
5268
};
5269

5270 5271 5272 5273 5274 5275 5276 5277 5278 5279 5280 5281 5282 5283 5284 5285 5286 5287 5288 5289 5290 5291
/**
 * mem_cgroup_low - check if memory consumption is below the normal range
 * @root: the highest ancestor to consider
 * @memcg: the memory cgroup to check
 *
 * Returns %true if memory consumption of @memcg, and that of all
 * configurable ancestors up to @root, is below the normal range.
 */
bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg)
{
	if (mem_cgroup_disabled())
		return false;

	/*
	 * The toplevel group doesn't have a configurable range, so
	 * it's never low when looked at directly, and it is not
	 * considered an ancestor when assessing the hierarchy.
	 */

	if (memcg == root_mem_cgroup)
		return false;

M
Michal Hocko 已提交
5292
	if (page_counter_read(&memcg->memory) >= memcg->low)
5293 5294 5295 5296 5297 5298 5299 5300
		return false;

	while (memcg != root) {
		memcg = parent_mem_cgroup(memcg);

		if (memcg == root_mem_cgroup)
			break;

M
Michal Hocko 已提交
5301
		if (page_counter_read(&memcg->memory) >= memcg->low)
5302 5303 5304 5305 5306
			return false;
	}
	return true;
}

5307 5308 5309 5310 5311 5312
/**
 * mem_cgroup_try_charge - try charging a page
 * @page: page to charge
 * @mm: mm context of the victim
 * @gfp_mask: reclaim mode
 * @memcgp: charged memcg return
5313
 * @compound: charge the page as compound or small page
5314 5315 5316 5317 5318 5319 5320 5321 5322 5323 5324 5325
 *
 * Try to charge @page to the memcg that @mm belongs to, reclaiming
 * pages according to @gfp_mask if necessary.
 *
 * Returns 0 on success, with *@memcgp pointing to the charged memcg.
 * Otherwise, an error code is returned.
 *
 * After page->mapping has been set up, the caller must finalize the
 * charge with mem_cgroup_commit_charge().  Or abort the transaction
 * with mem_cgroup_cancel_charge() in case page instantiation fails.
 */
int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
5326 5327
			  gfp_t gfp_mask, struct mem_cgroup **memcgp,
			  bool compound)
5328 5329
{
	struct mem_cgroup *memcg = NULL;
5330
	unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
5331 5332 5333 5334 5335 5336 5337 5338 5339 5340 5341 5342 5343
	int ret = 0;

	if (mem_cgroup_disabled())
		goto out;

	if (PageSwapCache(page)) {
		/*
		 * Every swap fault against a single page tries to charge the
		 * page, bail as early as possible.  shmem_unuse() encounters
		 * already charged pages, too.  The USED bit is protected by
		 * the page lock, which serializes swap cache removal, which
		 * in turn serializes uncharging.
		 */
5344
		VM_BUG_ON_PAGE(!PageLocked(page), page);
5345
		if (page->mem_cgroup)
5346
			goto out;
5347

5348
		if (do_swap_account) {
5349 5350 5351 5352 5353 5354 5355 5356 5357
			swp_entry_t ent = { .val = page_private(page), };
			unsigned short id = lookup_swap_cgroup_id(ent);

			rcu_read_lock();
			memcg = mem_cgroup_from_id(id);
			if (memcg && !css_tryget_online(&memcg->css))
				memcg = NULL;
			rcu_read_unlock();
		}
5358 5359 5360 5361 5362 5363 5364 5365 5366 5367 5368 5369 5370 5371 5372 5373 5374 5375
	}

	if (!memcg)
		memcg = get_mem_cgroup_from_mm(mm);

	ret = try_charge(memcg, gfp_mask, nr_pages);

	css_put(&memcg->css);
out:
	*memcgp = memcg;
	return ret;
}

/**
 * mem_cgroup_commit_charge - commit a page charge
 * @page: page to charge
 * @memcg: memcg to charge the page to
 * @lrucare: page might be on LRU already
5376
 * @compound: charge the page as compound or small page
5377 5378 5379 5380 5381 5382 5383 5384 5385 5386 5387 5388
 *
 * Finalize a charge transaction started by mem_cgroup_try_charge(),
 * after page->mapping has been set up.  This must happen atomically
 * as part of the page instantiation, i.e. under the page table lock
 * for anonymous pages, under the page lock for page and swap cache.
 *
 * In addition, the page must not be on the LRU during the commit, to
 * prevent racing with task migration.  If it might be, use @lrucare.
 *
 * Use mem_cgroup_cancel_charge() to cancel the transaction instead.
 */
void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
5389
			      bool lrucare, bool compound)
5390
{
5391
	unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
5392 5393 5394 5395 5396 5397 5398 5399 5400 5401 5402 5403 5404 5405

	VM_BUG_ON_PAGE(!page->mapping, page);
	VM_BUG_ON_PAGE(PageLRU(page) && !lrucare, page);

	if (mem_cgroup_disabled())
		return;
	/*
	 * Swap faults will attempt to charge the same page multiple
	 * times.  But reuse_swap_page() might have removed the page
	 * from swapcache already, so we can't check PageSwapCache().
	 */
	if (!memcg)
		return;

5406 5407 5408
	commit_charge(page, memcg, lrucare);

	local_irq_disable();
5409
	mem_cgroup_charge_statistics(memcg, page, compound, nr_pages);
5410 5411
	memcg_check_events(memcg, page);
	local_irq_enable();
5412

5413
	if (do_memsw_account() && PageSwapCache(page)) {
5414 5415 5416 5417 5418 5419 5420 5421 5422 5423 5424 5425 5426 5427
		swp_entry_t entry = { .val = page_private(page) };
		/*
		 * The swap entry might not get freed for a long time,
		 * let's not wait for it.  The page already received a
		 * memory+swap charge, drop the swap entry duplicate.
		 */
		mem_cgroup_uncharge_swap(entry);
	}
}

/**
 * mem_cgroup_cancel_charge - cancel a page charge
 * @page: page to charge
 * @memcg: memcg to charge the page to
5428
 * @compound: charge the page as compound or small page
5429 5430 5431
 *
 * Cancel a charge transaction started by mem_cgroup_try_charge().
 */
5432 5433
void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg,
		bool compound)
5434
{
5435
	unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
5436 5437 5438 5439 5440 5441 5442 5443 5444 5445 5446 5447 5448 5449

	if (mem_cgroup_disabled())
		return;
	/*
	 * Swap faults will attempt to charge the same page multiple
	 * times.  But reuse_swap_page() might have removed the page
	 * from swapcache already, so we can't check PageSwapCache().
	 */
	if (!memcg)
		return;

	cancel_charge(memcg, nr_pages);
}

5450 5451
static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout,
			   unsigned long nr_anon, unsigned long nr_file,
5452 5453
			   unsigned long nr_huge, unsigned long nr_kmem,
			   struct page *dummy_page)
5454
{
5455
	unsigned long nr_pages = nr_anon + nr_file + nr_kmem;
5456 5457
	unsigned long flags;

5458
	if (!mem_cgroup_is_root(memcg)) {
5459
		page_counter_uncharge(&memcg->memory, nr_pages);
5460
		if (do_memsw_account())
5461
			page_counter_uncharge(&memcg->memsw, nr_pages);
5462 5463
		if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && nr_kmem)
			page_counter_uncharge(&memcg->kmem, nr_kmem);
5464 5465
		memcg_oom_recover(memcg);
	}
5466 5467 5468 5469 5470 5471

	local_irq_save(flags);
	__this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS], nr_anon);
	__this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_CACHE], nr_file);
	__this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], nr_huge);
	__this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT], pgpgout);
5472
	__this_cpu_add(memcg->stat->nr_page_events, nr_pages);
5473 5474
	memcg_check_events(memcg, dummy_page);
	local_irq_restore(flags);
5475 5476

	if (!mem_cgroup_is_root(memcg))
5477
		css_put_many(&memcg->css, nr_pages);
5478 5479 5480 5481 5482 5483 5484 5485
}

static void uncharge_list(struct list_head *page_list)
{
	struct mem_cgroup *memcg = NULL;
	unsigned long nr_anon = 0;
	unsigned long nr_file = 0;
	unsigned long nr_huge = 0;
5486
	unsigned long nr_kmem = 0;
5487 5488 5489 5490
	unsigned long pgpgout = 0;
	struct list_head *next;
	struct page *page;

5491 5492 5493 5494
	/*
	 * Note that the list can be a single page->lru; hence the
	 * do-while loop instead of a simple list_for_each_entry().
	 */
5495 5496 5497 5498 5499 5500 5501 5502
	next = page_list->next;
	do {
		page = list_entry(next, struct page, lru);
		next = page->lru.next;

		VM_BUG_ON_PAGE(PageLRU(page), page);
		VM_BUG_ON_PAGE(page_count(page), page);

5503
		if (!page->mem_cgroup)
5504 5505 5506 5507
			continue;

		/*
		 * Nobody should be changing or seriously looking at
5508
		 * page->mem_cgroup at this point, we have fully
5509
		 * exclusive access to the page.
5510 5511
		 */

5512
		if (memcg != page->mem_cgroup) {
5513
			if (memcg) {
5514
				uncharge_batch(memcg, pgpgout, nr_anon, nr_file,
5515 5516 5517
					       nr_huge, nr_kmem, page);
				pgpgout = nr_anon = nr_file =
					nr_huge = nr_kmem = 0;
5518
			}
5519
			memcg = page->mem_cgroup;
5520 5521
		}

5522 5523
		if (!PageKmemcg(page)) {
			unsigned int nr_pages = 1;
5524

5525 5526 5527 5528 5529 5530 5531 5532 5533 5534 5535
			if (PageTransHuge(page)) {
				nr_pages <<= compound_order(page);
				nr_huge += nr_pages;
			}
			if (PageAnon(page))
				nr_anon += nr_pages;
			else
				nr_file += nr_pages;
			pgpgout++;
		} else
			nr_kmem += 1 << compound_order(page);
5536

5537
		page->mem_cgroup = NULL;
5538 5539 5540
	} while (next != page_list);

	if (memcg)
5541
		uncharge_batch(memcg, pgpgout, nr_anon, nr_file,
5542
			       nr_huge, nr_kmem, page);
5543 5544
}

5545 5546 5547 5548 5549 5550 5551 5552 5553 5554 5555 5556
/**
 * mem_cgroup_uncharge - uncharge a page
 * @page: page to uncharge
 *
 * Uncharge a page previously charged with mem_cgroup_try_charge() and
 * mem_cgroup_commit_charge().
 */
void mem_cgroup_uncharge(struct page *page)
{
	if (mem_cgroup_disabled())
		return;

5557
	/* Don't touch page->lru of any random page, pre-check: */
5558
	if (!page->mem_cgroup)
5559 5560
		return;

5561 5562 5563
	INIT_LIST_HEAD(&page->lru);
	uncharge_list(&page->lru);
}
5564

5565 5566 5567 5568 5569 5570 5571 5572 5573 5574 5575
/**
 * mem_cgroup_uncharge_list - uncharge a list of page
 * @page_list: list of pages to uncharge
 *
 * Uncharge a list of pages previously charged with
 * mem_cgroup_try_charge() and mem_cgroup_commit_charge().
 */
void mem_cgroup_uncharge_list(struct list_head *page_list)
{
	if (mem_cgroup_disabled())
		return;
5576

5577 5578
	if (!list_empty(page_list))
		uncharge_list(page_list);
5579 5580 5581
}

/**
5582 5583 5584
 * mem_cgroup_migrate - charge a page's replacement
 * @oldpage: currently circulating page
 * @newpage: replacement page
5585
 *
5586 5587
 * Charge @newpage as a replacement page for @oldpage. @oldpage will
 * be uncharged upon free.
5588 5589 5590
 *
 * Both pages must be locked, @newpage->mapping must be set up.
 */
5591
void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
5592
{
5593
	struct mem_cgroup *memcg;
5594 5595
	unsigned int nr_pages;
	bool compound;
5596
	unsigned long flags;
5597 5598 5599 5600

	VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
	VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
	VM_BUG_ON_PAGE(PageAnon(oldpage) != PageAnon(newpage), newpage);
5601 5602
	VM_BUG_ON_PAGE(PageTransHuge(oldpage) != PageTransHuge(newpage),
		       newpage);
5603 5604 5605 5606 5607

	if (mem_cgroup_disabled())
		return;

	/* Page cache replacement: new page already charged? */
5608
	if (newpage->mem_cgroup)
5609 5610
		return;

5611
	/* Swapcache readahead pages can get replaced before being charged */
5612
	memcg = oldpage->mem_cgroup;
5613
	if (!memcg)
5614 5615
		return;

5616 5617 5618 5619 5620 5621 5622 5623
	/* Force-charge the new page. The old one will be freed soon */
	compound = PageTransHuge(newpage);
	nr_pages = compound ? hpage_nr_pages(newpage) : 1;

	page_counter_charge(&memcg->memory, nr_pages);
	if (do_memsw_account())
		page_counter_charge(&memcg->memsw, nr_pages);
	css_get_many(&memcg->css, nr_pages);
5624

5625
	commit_charge(newpage, memcg, false);
5626

5627
	local_irq_save(flags);
5628 5629
	mem_cgroup_charge_statistics(memcg, newpage, compound, nr_pages);
	memcg_check_events(memcg, newpage);
5630
	local_irq_restore(flags);
5631 5632
}

5633
DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
5634 5635 5636 5637 5638 5639 5640 5641 5642 5643 5644 5645 5646 5647 5648 5649 5650 5651 5652 5653 5654 5655
EXPORT_SYMBOL(memcg_sockets_enabled_key);

void sock_update_memcg(struct sock *sk)
{
	struct mem_cgroup *memcg;

	/* Socket cloning can throw us here with sk_cgrp already
	 * filled. It won't however, necessarily happen from
	 * process context. So the test for root memcg given
	 * the current task's memcg won't help us in this case.
	 *
	 * Respecting the original socket's memcg is a better
	 * decision in this case.
	 */
	if (sk->sk_memcg) {
		BUG_ON(mem_cgroup_is_root(sk->sk_memcg));
		css_get(&sk->sk_memcg->css);
		return;
	}

	rcu_read_lock();
	memcg = mem_cgroup_from_task(current);
5656 5657
	if (memcg == root_mem_cgroup)
		goto out;
5658
	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active)
5659 5660
		goto out;
	if (css_tryget_online(&memcg->css))
5661
		sk->sk_memcg = memcg;
5662
out:
5663 5664 5665 5666 5667 5668 5669 5670 5671 5672 5673 5674 5675 5676 5677 5678 5679 5680 5681 5682
	rcu_read_unlock();
}
EXPORT_SYMBOL(sock_update_memcg);

void sock_release_memcg(struct sock *sk)
{
	WARN_ON(!sk->sk_memcg);
	css_put(&sk->sk_memcg->css);
}

/**
 * mem_cgroup_charge_skmem - charge socket memory
 * @memcg: memcg to charge
 * @nr_pages: number of pages to charge
 *
 * Charges @nr_pages to @memcg. Returns %true if the charge fit within
 * @memcg's configured limit, %false if the charge had to be forced.
 */
bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
{
5683
	gfp_t gfp_mask = GFP_KERNEL;
5684

5685
	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
5686
		struct page_counter *fail;
5687

5688 5689
		if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) {
			memcg->tcpmem_pressure = 0;
5690 5691
			return true;
		}
5692 5693
		page_counter_charge(&memcg->tcpmem, nr_pages);
		memcg->tcpmem_pressure = 1;
5694
		return false;
5695
	}
5696

5697 5698 5699 5700
	/* Don't block in the packet receive path */
	if (in_softirq())
		gfp_mask = GFP_NOWAIT;

5701 5702
	this_cpu_add(memcg->stat->count[MEMCG_SOCK], nr_pages);

5703 5704 5705 5706
	if (try_charge(memcg, gfp_mask, nr_pages) == 0)
		return true;

	try_charge(memcg, gfp_mask|__GFP_NOFAIL, nr_pages);
5707 5708 5709 5710 5711 5712 5713 5714 5715 5716
	return false;
}

/**
 * mem_cgroup_uncharge_skmem - uncharge socket memory
 * @memcg - memcg to uncharge
 * @nr_pages - number of pages to uncharge
 */
void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
{
5717
	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
5718
		page_counter_uncharge(&memcg->tcpmem, nr_pages);
5719 5720
		return;
	}
5721

5722 5723
	this_cpu_sub(memcg->stat->count[MEMCG_SOCK], nr_pages);

5724 5725
	page_counter_uncharge(&memcg->memory, nr_pages);
	css_put_many(&memcg->css, nr_pages);
5726 5727
}

5728 5729 5730 5731 5732 5733 5734 5735 5736
static int __init cgroup_memory(char *s)
{
	char *token;

	while ((token = strsep(&s, ",")) != NULL) {
		if (!*token)
			continue;
		if (!strcmp(token, "nosocket"))
			cgroup_memory_nosocket = true;
5737 5738
		if (!strcmp(token, "nokmem"))
			cgroup_memory_nokmem = true;
5739 5740 5741 5742
	}
	return 0;
}
__setup("cgroup.memory=", cgroup_memory);
5743

5744
/*
5745 5746 5747 5748 5749 5750
 * subsys_initcall() for memory controller.
 *
 * Some parts like hotcpu_notifier() have to be initialized from this context
 * because of lock dependencies (cgroup_lock -> cpu hotplug) but basically
 * everything that doesn't depend on a specific mem_cgroup structure should
 * be initialized from here.
5751 5752 5753
 */
static int __init mem_cgroup_init(void)
{
5754 5755
	int cpu, node;

5756
	hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
5757 5758 5759 5760 5761 5762 5763 5764 5765 5766 5767

	for_each_possible_cpu(cpu)
		INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
			  drain_local_stock);

	for_each_node(node) {
		struct mem_cgroup_tree_per_node *rtpn;

		rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL,
				    node_online(node) ? node : NUMA_NO_NODE);

5768 5769
		rtpn->rb_root = RB_ROOT;
		spin_lock_init(&rtpn->lock);
5770 5771 5772
		soft_limit_tree.rb_tree_per_node[node] = rtpn;
	}

5773 5774 5775
	return 0;
}
subsys_initcall(mem_cgroup_init);
5776 5777 5778 5779 5780 5781 5782 5783 5784 5785 5786 5787 5788 5789 5790 5791 5792

#ifdef CONFIG_MEMCG_SWAP
/**
 * mem_cgroup_swapout - transfer a memsw charge to swap
 * @page: page whose memsw charge to transfer
 * @entry: swap entry to move the charge to
 *
 * Transfer the memsw charge of @page to @entry.
 */
void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
{
	struct mem_cgroup *memcg;
	unsigned short oldid;

	VM_BUG_ON_PAGE(PageLRU(page), page);
	VM_BUG_ON_PAGE(page_count(page), page);

5793
	if (!do_memsw_account())
5794 5795 5796 5797 5798 5799 5800 5801
		return;

	memcg = page->mem_cgroup;

	/* Readahead page, never charged */
	if (!memcg)
		return;

5802
	mem_cgroup_id_get(memcg);
5803 5804 5805 5806 5807 5808 5809 5810 5811
	oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
	VM_BUG_ON_PAGE(oldid, page);
	mem_cgroup_swap_statistics(memcg, true);

	page->mem_cgroup = NULL;

	if (!mem_cgroup_is_root(memcg))
		page_counter_uncharge(&memcg->memory, 1);

5812 5813 5814 5815 5816 5817 5818
	/*
	 * Interrupts should be disabled here because the caller holds the
	 * mapping->tree_lock lock which is taken with interrupts-off. It is
	 * important here to have the interrupts disabled because it is the
	 * only synchronisation we have for udpating the per-CPU variables.
	 */
	VM_BUG_ON(!irqs_disabled());
5819
	mem_cgroup_charge_statistics(memcg, page, false, -1);
5820
	memcg_check_events(memcg, page);
5821 5822 5823

	if (!mem_cgroup_is_root(memcg))
		css_put(&memcg->css);
5824 5825
}

5826 5827 5828 5829 5830 5831 5832 5833 5834 5835 5836 5837 5838 5839 5840 5841 5842 5843 5844 5845 5846 5847 5848 5849 5850 5851 5852 5853
/*
 * mem_cgroup_try_charge_swap - try charging a swap entry
 * @page: page being added to swap
 * @entry: swap entry to charge
 *
 * Try to charge @entry to the memcg that @page belongs to.
 *
 * Returns 0 on success, -ENOMEM on failure.
 */
int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
{
	struct mem_cgroup *memcg;
	struct page_counter *counter;
	unsigned short oldid;

	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) || !do_swap_account)
		return 0;

	memcg = page->mem_cgroup;

	/* Readahead page, never charged */
	if (!memcg)
		return 0;

	if (!mem_cgroup_is_root(memcg) &&
	    !page_counter_try_charge(&memcg->swap, 1, &counter))
		return -ENOMEM;

5854
	mem_cgroup_id_get(memcg);
5855 5856 5857 5858 5859 5860 5861
	oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
	VM_BUG_ON_PAGE(oldid, page);
	mem_cgroup_swap_statistics(memcg, true);

	return 0;
}

5862 5863 5864 5865
/**
 * mem_cgroup_uncharge_swap - uncharge a swap entry
 * @entry: swap entry to uncharge
 *
5866
 * Drop the swap charge associated with @entry.
5867 5868 5869 5870 5871 5872
 */
void mem_cgroup_uncharge_swap(swp_entry_t entry)
{
	struct mem_cgroup *memcg;
	unsigned short id;

5873
	if (!do_swap_account)
5874 5875 5876 5877
		return;

	id = swap_cgroup_record(entry, 0);
	rcu_read_lock();
5878
	memcg = mem_cgroup_from_id(id);
5879
	if (memcg) {
5880 5881 5882 5883 5884 5885
		if (!mem_cgroup_is_root(memcg)) {
			if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
				page_counter_uncharge(&memcg->swap, 1);
			else
				page_counter_uncharge(&memcg->memsw, 1);
		}
5886
		mem_cgroup_swap_statistics(memcg, false);
5887
		mem_cgroup_id_put(memcg);
5888 5889 5890 5891
	}
	rcu_read_unlock();
}

5892 5893 5894 5895 5896 5897 5898 5899 5900 5901 5902 5903 5904
long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
{
	long nr_swap_pages = get_nr_swap_pages();

	if (!do_swap_account || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
		return nr_swap_pages;
	for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
		nr_swap_pages = min_t(long, nr_swap_pages,
				      READ_ONCE(memcg->swap.limit) -
				      page_counter_read(&memcg->swap));
	return nr_swap_pages;
}

5905 5906 5907 5908 5909 5910 5911 5912 5913 5914 5915 5916 5917 5918 5919 5920 5921 5922 5923 5924 5925 5926
bool mem_cgroup_swap_full(struct page *page)
{
	struct mem_cgroup *memcg;

	VM_BUG_ON_PAGE(!PageLocked(page), page);

	if (vm_swap_full())
		return true;
	if (!do_swap_account || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
		return false;

	memcg = page->mem_cgroup;
	if (!memcg)
		return false;

	for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
		if (page_counter_read(&memcg->swap) * 2 >= memcg->swap.limit)
			return true;

	return false;
}

5927 5928 5929 5930 5931 5932 5933 5934 5935 5936 5937 5938 5939 5940 5941 5942 5943
/* for remember boot option*/
#ifdef CONFIG_MEMCG_SWAP_ENABLED
static int really_do_swap_account __initdata = 1;
#else
static int really_do_swap_account __initdata;
#endif

static int __init enable_swap_account(char *s)
{
	if (!strcmp(s, "1"))
		really_do_swap_account = 1;
	else if (!strcmp(s, "0"))
		really_do_swap_account = 0;
	return 1;
}
__setup("swapaccount=", enable_swap_account);

5944 5945 5946 5947 5948 5949 5950 5951 5952 5953 5954 5955 5956 5957 5958 5959 5960 5961 5962 5963 5964 5965 5966 5967 5968 5969 5970 5971 5972 5973 5974 5975 5976 5977 5978 5979 5980 5981 5982 5983 5984 5985 5986 5987 5988 5989 5990 5991 5992 5993 5994 5995 5996 5997 5998 5999 6000
static u64 swap_current_read(struct cgroup_subsys_state *css,
			     struct cftype *cft)
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);

	return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE;
}

static int swap_max_show(struct seq_file *m, void *v)
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
	unsigned long max = READ_ONCE(memcg->swap.limit);

	if (max == PAGE_COUNTER_MAX)
		seq_puts(m, "max\n");
	else
		seq_printf(m, "%llu\n", (u64)max * PAGE_SIZE);

	return 0;
}

static ssize_t swap_max_write(struct kernfs_open_file *of,
			      char *buf, size_t nbytes, loff_t off)
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
	unsigned long max;
	int err;

	buf = strstrip(buf);
	err = page_counter_memparse(buf, "max", &max);
	if (err)
		return err;

	mutex_lock(&memcg_limit_mutex);
	err = page_counter_limit(&memcg->swap, max);
	mutex_unlock(&memcg_limit_mutex);
	if (err)
		return err;

	return nbytes;
}

static struct cftype swap_files[] = {
	{
		.name = "swap.current",
		.flags = CFTYPE_NOT_ON_ROOT,
		.read_u64 = swap_current_read,
	},
	{
		.name = "swap.max",
		.flags = CFTYPE_NOT_ON_ROOT,
		.seq_show = swap_max_show,
		.write = swap_max_write,
	},
	{ }	/* terminate */
};

6001 6002 6003 6004 6005 6006 6007 6008 6009 6010 6011 6012 6013 6014 6015 6016 6017 6018 6019 6020 6021 6022 6023 6024 6025 6026 6027 6028 6029 6030 6031
static struct cftype memsw_cgroup_files[] = {
	{
		.name = "memsw.usage_in_bytes",
		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
		.read_u64 = mem_cgroup_read_u64,
	},
	{
		.name = "memsw.max_usage_in_bytes",
		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
		.write = mem_cgroup_reset,
		.read_u64 = mem_cgroup_read_u64,
	},
	{
		.name = "memsw.limit_in_bytes",
		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
		.write = mem_cgroup_write,
		.read_u64 = mem_cgroup_read_u64,
	},
	{
		.name = "memsw.failcnt",
		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
		.write = mem_cgroup_reset,
		.read_u64 = mem_cgroup_read_u64,
	},
	{ },	/* terminate */
};

static int __init mem_cgroup_swap_init(void)
{
	if (!mem_cgroup_disabled() && really_do_swap_account) {
		do_swap_account = 1;
6032 6033
		WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys,
					       swap_files));
6034 6035 6036 6037 6038 6039 6040 6041
		WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys,
						  memsw_cgroup_files));
	}
	return 0;
}
subsys_initcall(mem_cgroup_swap_init);

#endif /* CONFIG_MEMCG_SWAP */