memcontrol.c 190.6 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-or-later
B
Balbir Singh 已提交
2 3 4 5 6
/* memcontrol.c - Memory Controller
 *
 * Copyright IBM Corporation, 2007
 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
 *
7 8 9
 * Copyright 2007 OpenVZ SWsoft Inc
 * Author: Pavel Emelianov <xemul@openvz.org>
 *
10 11 12 13
 * Memory thresholds
 * Copyright (C) 2009 Nokia Corporation
 * Author: Kirill A. Shutemov
 *
14 15 16 17
 * Kernel Memory Controller
 * Copyright (C) 2012 Parallels Inc. and Google Inc.
 * Authors: Glauber Costa and Suleiman Souhlal
 *
18 19 20 21 22
 * Native page reclaim
 * Charge lifetime sanitation
 * Lockless page tracking & accounting
 * Unified hierarchy configuration model
 * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner
23 24 25
 *
 * Per memcg lru locking
 * Copyright (C) 2020 Alibaba, Inc, Alex Shi
B
Balbir Singh 已提交
26 27
 */

28
#include <linux/page_counter.h>
B
Balbir Singh 已提交
29 30
#include <linux/memcontrol.h>
#include <linux/cgroup.h>
31
#include <linux/pagewalk.h>
32
#include <linux/sched/mm.h>
33
#include <linux/shmem_fs.h>
34
#include <linux/hugetlb.h>
K
KAMEZAWA Hiroyuki 已提交
35
#include <linux/pagemap.h>
36
#include <linux/vm_event_item.h>
37
#include <linux/smp.h>
38
#include <linux/page-flags.h>
39
#include <linux/backing-dev.h>
40 41
#include <linux/bit_spinlock.h>
#include <linux/rcupdate.h>
42
#include <linux/limits.h>
43
#include <linux/export.h>
44
#include <linux/mutex.h>
45
#include <linux/rbtree.h>
46
#include <linux/slab.h>
47
#include <linux/swap.h>
48
#include <linux/swapops.h>
49
#include <linux/spinlock.h>
50
#include <linux/eventfd.h>
51
#include <linux/poll.h>
52
#include <linux/sort.h>
53
#include <linux/fs.h>
54
#include <linux/seq_file.h>
55
#include <linux/vmpressure.h>
56
#include <linux/mm_inline.h>
57
#include <linux/swap_cgroup.h>
58
#include <linux/cpu.h>
59
#include <linux/oom.h>
60
#include <linux/lockdep.h>
61
#include <linux/file.h>
62
#include <linux/tracehook.h>
63
#include <linux/psi.h>
64
#include <linux/seq_buf.h>
K
KAMEZAWA Hiroyuki 已提交
65
#include "internal.h"
G
Glauber Costa 已提交
66
#include <net/sock.h>
M
Michal Hocko 已提交
67
#include <net/ip.h>
68
#include "slab.h"
B
Balbir Singh 已提交
69

70
#include <linux/uaccess.h>
71

72 73
#include <trace/events/vmscan.h>

74 75
struct cgroup_subsys memory_cgrp_subsys __read_mostly;
EXPORT_SYMBOL(memory_cgrp_subsys);
76

77 78
struct mem_cgroup *root_mem_cgroup __read_mostly;

79 80 81
/* Active memory cgroup to use from an interrupt context */
DEFINE_PER_CPU(struct mem_cgroup *, int_active_memcg);

82 83 84
/* Socket memory accounting disabled? */
static bool cgroup_memory_nosocket;

85 86 87
/* Kernel memory accounting disabled? */
static bool cgroup_memory_nokmem;

88
/* Whether the swap controller is active */
A
Andrew Morton 已提交
89
#ifdef CONFIG_MEMCG_SWAP
90
bool cgroup_memory_noswap __read_mostly;
91
#else
92
#define cgroup_memory_noswap		1
93
#endif
94

95 96 97 98
#ifdef CONFIG_CGROUP_WRITEBACK
static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq);
#endif

99 100 101
/* Whether legacy memory+swap accounting is active */
static bool do_memsw_account(void)
{
102
	return !cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_noswap;
103 104
}

105 106
#define THRESHOLDS_EVENTS_TARGET 128
#define SOFTLIMIT_EVENTS_TARGET 1024
107

108 109 110 111 112
/*
 * Cgroups above their limits are maintained in a RB-Tree, independent of
 * their hierarchy representation
 */

113
struct mem_cgroup_tree_per_node {
114
	struct rb_root rb_root;
115
	struct rb_node *rb_rightmost;
116 117 118 119 120 121 122 123 124
	spinlock_t lock;
};

struct mem_cgroup_tree {
	struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
};

static struct mem_cgroup_tree soft_limit_tree __read_mostly;

K
KAMEZAWA Hiroyuki 已提交
125 126 127 128 129
/* for OOM */
struct mem_cgroup_eventfd_list {
	struct list_head list;
	struct eventfd_ctx *eventfd;
};
130

131 132 133
/*
 * cgroup_event represents events which userspace want to receive.
 */
134
struct mem_cgroup_event {
135
	/*
136
	 * memcg which the event belongs to.
137
	 */
138
	struct mem_cgroup *memcg;
139 140 141 142 143 144 145 146
	/*
	 * eventfd to signal userspace about the event.
	 */
	struct eventfd_ctx *eventfd;
	/*
	 * Each of these stored in a list by the cgroup.
	 */
	struct list_head list;
147 148 149 150 151
	/*
	 * register_event() callback will be used to add new userspace
	 * waiter for changes related to this event.  Use eventfd_signal()
	 * on eventfd to send notification to userspace.
	 */
152
	int (*register_event)(struct mem_cgroup *memcg,
T
Tejun Heo 已提交
153
			      struct eventfd_ctx *eventfd, const char *args);
154 155 156 157 158
	/*
	 * unregister_event() callback will be called when userspace closes
	 * the eventfd or on cgroup removing.  This callback must be set,
	 * if you want provide notification functionality.
	 */
159
	void (*unregister_event)(struct mem_cgroup *memcg,
160
				 struct eventfd_ctx *eventfd);
161 162 163 164 165 166
	/*
	 * All fields below needed to unregister event when
	 * userspace closes eventfd.
	 */
	poll_table pt;
	wait_queue_head_t *wqh;
167
	wait_queue_entry_t wait;
168 169 170
	struct work_struct remove;
};

171 172
static void mem_cgroup_threshold(struct mem_cgroup *memcg);
static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
173

174 175
/* Stuffs for move charges at task migration. */
/*
176
 * Types of charges to be moved.
177
 */
178 179 180
#define MOVE_ANON	0x1U
#define MOVE_FILE	0x2U
#define MOVE_MASK	(MOVE_ANON | MOVE_FILE)
181

182 183
/* "mc" and its members are protected by cgroup_mutex */
static struct move_charge_struct {
184
	spinlock_t	  lock; /* for from, to */
185
	struct mm_struct  *mm;
186 187
	struct mem_cgroup *from;
	struct mem_cgroup *to;
188
	unsigned long flags;
189
	unsigned long precharge;
190
	unsigned long moved_charge;
191
	unsigned long moved_swap;
192 193 194
	struct task_struct *moving_task;	/* a task moving charges */
	wait_queue_head_t waitq;		/* a waitq for other context */
} mc = {
195
	.lock = __SPIN_LOCK_UNLOCKED(mc.lock),
196 197
	.waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
};
198

199 200 201 202
/*
 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
 * limit reclaim to prevent infinite loops, if they ever occur.
 */
203
#define	MEM_CGROUP_MAX_RECLAIM_LOOPS		100
204
#define	MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS	2
205

206
/* for encoding cft->private value on file */
G
Glauber Costa 已提交
207 208 209 210
enum res_type {
	_MEM,
	_MEMSWAP,
	_OOM_TYPE,
211
	_KMEM,
V
Vladimir Davydov 已提交
212
	_TCP,
G
Glauber Costa 已提交
213 214
};

215 216
#define MEMFILE_PRIVATE(x, val)	((x) << 16 | (val))
#define MEMFILE_TYPE(val)	((val) >> 16 & 0xffff)
217
#define MEMFILE_ATTR(val)	((val) & 0xffff)
K
KAMEZAWA Hiroyuki 已提交
218 219
/* Used for OOM nofiier */
#define OOM_CONTROL		(0)
220

221 222 223 224 225 226 227 228 229 230 231 232 233 234 235
/*
 * Iteration constructs for visiting all cgroups (under a tree).  If
 * loops are exited prematurely (break), mem_cgroup_iter_break() must
 * be used for reference counting.
 */
#define for_each_mem_cgroup_tree(iter, root)		\
	for (iter = mem_cgroup_iter(root, NULL, NULL);	\
	     iter != NULL;				\
	     iter = mem_cgroup_iter(root, iter, NULL))

#define for_each_mem_cgroup(iter)			\
	for (iter = mem_cgroup_iter(NULL, NULL, NULL);	\
	     iter != NULL;				\
	     iter = mem_cgroup_iter(NULL, iter, NULL))

236 237 238 239 240 241
static inline bool should_force_charge(void)
{
	return tsk_is_oom_victim(current) || fatal_signal_pending(current) ||
		(current->flags & PF_EXITING);
}

242 243 244 245 246 247 248 249 250 251 252 253 254
/* Some nice accessors for the vmpressure. */
struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
{
	if (!memcg)
		memcg = root_mem_cgroup;
	return &memcg->vmpressure;
}

struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr)
{
	return &container_of(vmpr, struct mem_cgroup, vmpressure)->css;
}

255
#ifdef CONFIG_MEMCG_KMEM
R
Roman Gushchin 已提交
256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347
extern spinlock_t css_set_lock;

static void obj_cgroup_release(struct percpu_ref *ref)
{
	struct obj_cgroup *objcg = container_of(ref, struct obj_cgroup, refcnt);
	struct mem_cgroup *memcg;
	unsigned int nr_bytes;
	unsigned int nr_pages;
	unsigned long flags;

	/*
	 * At this point all allocated objects are freed, and
	 * objcg->nr_charged_bytes can't have an arbitrary byte value.
	 * However, it can be PAGE_SIZE or (x * PAGE_SIZE).
	 *
	 * The following sequence can lead to it:
	 * 1) CPU0: objcg == stock->cached_objcg
	 * 2) CPU1: we do a small allocation (e.g. 92 bytes),
	 *          PAGE_SIZE bytes are charged
	 * 3) CPU1: a process from another memcg is allocating something,
	 *          the stock if flushed,
	 *          objcg->nr_charged_bytes = PAGE_SIZE - 92
	 * 5) CPU0: we do release this object,
	 *          92 bytes are added to stock->nr_bytes
	 * 6) CPU0: stock is flushed,
	 *          92 bytes are added to objcg->nr_charged_bytes
	 *
	 * In the result, nr_charged_bytes == PAGE_SIZE.
	 * This page will be uncharged in obj_cgroup_release().
	 */
	nr_bytes = atomic_read(&objcg->nr_charged_bytes);
	WARN_ON_ONCE(nr_bytes & (PAGE_SIZE - 1));
	nr_pages = nr_bytes >> PAGE_SHIFT;

	spin_lock_irqsave(&css_set_lock, flags);
	memcg = obj_cgroup_memcg(objcg);
	if (nr_pages)
		__memcg_kmem_uncharge(memcg, nr_pages);
	list_del(&objcg->list);
	mem_cgroup_put(memcg);
	spin_unlock_irqrestore(&css_set_lock, flags);

	percpu_ref_exit(ref);
	kfree_rcu(objcg, rcu);
}

static struct obj_cgroup *obj_cgroup_alloc(void)
{
	struct obj_cgroup *objcg;
	int ret;

	objcg = kzalloc(sizeof(struct obj_cgroup), GFP_KERNEL);
	if (!objcg)
		return NULL;

	ret = percpu_ref_init(&objcg->refcnt, obj_cgroup_release, 0,
			      GFP_KERNEL);
	if (ret) {
		kfree(objcg);
		return NULL;
	}
	INIT_LIST_HEAD(&objcg->list);
	return objcg;
}

static void memcg_reparent_objcgs(struct mem_cgroup *memcg,
				  struct mem_cgroup *parent)
{
	struct obj_cgroup *objcg, *iter;

	objcg = rcu_replace_pointer(memcg->objcg, NULL, true);

	spin_lock_irq(&css_set_lock);

	/* Move active objcg to the parent's list */
	xchg(&objcg->memcg, parent);
	css_get(&parent->css);
	list_add(&objcg->list, &parent->objcg_list);

	/* Move already reparented objcgs to the parent's list */
	list_for_each_entry(iter, &memcg->objcg_list, list) {
		css_get(&parent->css);
		xchg(&iter->memcg, parent);
		css_put(&memcg->css);
	}
	list_splice(&memcg->objcg_list, &parent->objcg_list);

	spin_unlock_irq(&css_set_lock);

	percpu_ref_kill(&objcg->refcnt);
}

348
/*
349
 * This will be used as a shrinker list's index.
L
Li Zefan 已提交
350 351 352 353 354
 * The main reason for not using cgroup id for this:
 *  this works better in sparse environments, where we have a lot of memcgs,
 *  but only a few kmem-limited. Or also, if we have, for instance, 200
 *  memcgs, and none but the 200th is kmem-limited, we'd have to have a
 *  200 entry array for that.
355
 *
356 357
 * The current size of the caches array is stored in memcg_nr_cache_ids. It
 * will double each time we have to increase it.
358
 */
359 360
static DEFINE_IDA(memcg_cache_ida);
int memcg_nr_cache_ids;
361

362 363 364 365 366 367 368 369 370 371 372 373 374
/* Protects memcg_nr_cache_ids */
static DECLARE_RWSEM(memcg_cache_ids_sem);

void memcg_get_cache_ids(void)
{
	down_read(&memcg_cache_ids_sem);
}

void memcg_put_cache_ids(void)
{
	up_read(&memcg_cache_ids_sem);
}

375 376 377 378 379 380
/*
 * MIN_SIZE is different than 1, because we would like to avoid going through
 * the alloc/free process all the time. In a small machine, 4 kmem-limited
 * cgroups is a reasonable guess. In the future, it could be a parameter or
 * tunable, but that is strictly not necessary.
 *
L
Li Zefan 已提交
381
 * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get
382 383
 * this constant directly from cgroup, but it is understandable that this is
 * better kept as an internal representation in cgroup.c. In any case, the
L
Li Zefan 已提交
384
 * cgrp_id space is not getting any smaller, and we don't have to necessarily
385 386 387
 * increase ours as well if it increases.
 */
#define MEMCG_CACHES_MIN_SIZE 4
L
Li Zefan 已提交
388
#define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX
389

390 391
/*
 * A lot of the calls to the cache allocation functions are expected to be
392
 * inlined by the compiler. Since the calls to memcg_slab_pre_alloc_hook() are
393 394 395
 * conditional to this static branch, we'll have to allow modules that does
 * kmem_cache_alloc and the such to see this symbol as well
 */
396
DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key);
397
EXPORT_SYMBOL(memcg_kmem_enabled_key);
398
#endif
399

400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422
static int memcg_shrinker_map_size;
static DEFINE_MUTEX(memcg_shrinker_map_mutex);

static void memcg_free_shrinker_map_rcu(struct rcu_head *head)
{
	kvfree(container_of(head, struct memcg_shrinker_map, rcu));
}

static int memcg_expand_one_shrinker_map(struct mem_cgroup *memcg,
					 int size, int old_size)
{
	struct memcg_shrinker_map *new, *old;
	int nid;

	lockdep_assert_held(&memcg_shrinker_map_mutex);

	for_each_node(nid) {
		old = rcu_dereference_protected(
			mem_cgroup_nodeinfo(memcg, nid)->shrinker_map, true);
		/* Not yet online memcg */
		if (!old)
			return 0;

423
		new = kvmalloc_node(sizeof(*new) + size, GFP_KERNEL, nid);
424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466
		if (!new)
			return -ENOMEM;

		/* Set all old bits, clear all new bits */
		memset(new->map, (int)0xff, old_size);
		memset((void *)new->map + old_size, 0, size - old_size);

		rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, new);
		call_rcu(&old->rcu, memcg_free_shrinker_map_rcu);
	}

	return 0;
}

static void memcg_free_shrinker_maps(struct mem_cgroup *memcg)
{
	struct mem_cgroup_per_node *pn;
	struct memcg_shrinker_map *map;
	int nid;

	if (mem_cgroup_is_root(memcg))
		return;

	for_each_node(nid) {
		pn = mem_cgroup_nodeinfo(memcg, nid);
		map = rcu_dereference_protected(pn->shrinker_map, true);
		if (map)
			kvfree(map);
		rcu_assign_pointer(pn->shrinker_map, NULL);
	}
}

static int memcg_alloc_shrinker_maps(struct mem_cgroup *memcg)
{
	struct memcg_shrinker_map *map;
	int nid, size, ret = 0;

	if (mem_cgroup_is_root(memcg))
		return 0;

	mutex_lock(&memcg_shrinker_map_mutex);
	size = memcg_shrinker_map_size;
	for_each_node(nid) {
467
		map = kvzalloc_node(sizeof(*map) + size, GFP_KERNEL, nid);
468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497
		if (!map) {
			memcg_free_shrinker_maps(memcg);
			ret = -ENOMEM;
			break;
		}
		rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, map);
	}
	mutex_unlock(&memcg_shrinker_map_mutex);

	return ret;
}

int memcg_expand_shrinker_maps(int new_id)
{
	int size, old_size, ret = 0;
	struct mem_cgroup *memcg;

	size = DIV_ROUND_UP(new_id + 1, BITS_PER_LONG) * sizeof(unsigned long);
	old_size = memcg_shrinker_map_size;
	if (size <= old_size)
		return 0;

	mutex_lock(&memcg_shrinker_map_mutex);
	if (!root_mem_cgroup)
		goto unlock;

	for_each_mem_cgroup(memcg) {
		if (mem_cgroup_is_root(memcg))
			continue;
		ret = memcg_expand_one_shrinker_map(memcg, size, old_size);
498 499
		if (ret) {
			mem_cgroup_iter_break(NULL, memcg);
500
			goto unlock;
501
		}
502 503 504 505 506 507 508
	}
unlock:
	if (!ret)
		memcg_shrinker_map_size = size;
	mutex_unlock(&memcg_shrinker_map_mutex);
	return ret;
}
509 510 511 512 513 514 515 516

void memcg_set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id)
{
	if (shrinker_id >= 0 && memcg && !mem_cgroup_is_root(memcg)) {
		struct memcg_shrinker_map *map;

		rcu_read_lock();
		map = rcu_dereference(memcg->nodeinfo[nid]->shrinker_map);
517 518
		/* Pairs with smp mb in shrink_slab() */
		smp_mb__before_atomic();
519 520 521 522 523
		set_bit(shrinker_id, map->map);
		rcu_read_unlock();
	}
}

524 525 526 527 528 529 530 531 532 533 534 535 536 537 538
/**
 * mem_cgroup_css_from_page - css of the memcg associated with a page
 * @page: page of interest
 *
 * If memcg is bound to the default hierarchy, css of the memcg associated
 * with @page is returned.  The returned css remains associated with @page
 * until it is released.
 *
 * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup
 * is returned.
 */
struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page)
{
	struct mem_cgroup *memcg;

539
	memcg = page_memcg(page);
540

541
	if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
542 543 544 545 546
		memcg = root_mem_cgroup;

	return &memcg->css;
}

547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565
/**
 * page_cgroup_ino - return inode number of the memcg a page is charged to
 * @page: the page
 *
 * Look up the closest online ancestor of the memory cgroup @page is charged to
 * and return its inode number or 0 if @page is not charged to any cgroup. It
 * is safe to call this function without holding a reference to @page.
 *
 * Note, this function is inherently racy, because there is nothing to prevent
 * the cgroup inode from getting torn down and potentially reallocated a moment
 * after page_cgroup_ino() returns, so it only should be used by callers that
 * do not care (such as procfs interfaces).
 */
ino_t page_cgroup_ino(struct page *page)
{
	struct mem_cgroup *memcg;
	unsigned long ino = 0;

	rcu_read_lock();
566
	memcg = page_memcg_check(page);
567

568 569 570 571 572 573 574 575
	while (memcg && !(memcg->css.flags & CSS_ONLINE))
		memcg = parent_mem_cgroup(memcg);
	if (memcg)
		ino = cgroup_ino(memcg->css.cgroup);
	rcu_read_unlock();
	return ino;
}

576 577
static struct mem_cgroup_per_node *
mem_cgroup_page_nodeinfo(struct mem_cgroup *memcg, struct page *page)
578
{
579
	int nid = page_to_nid(page);
580

581
	return memcg->nodeinfo[nid];
582 583
}

584 585
static struct mem_cgroup_tree_per_node *
soft_limit_tree_node(int nid)
586
{
587
	return soft_limit_tree.rb_tree_per_node[nid];
588 589
}

590
static struct mem_cgroup_tree_per_node *
591 592 593 594
soft_limit_tree_from_page(struct page *page)
{
	int nid = page_to_nid(page);

595
	return soft_limit_tree.rb_tree_per_node[nid];
596 597
}

598 599
static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_node *mz,
					 struct mem_cgroup_tree_per_node *mctz,
600
					 unsigned long new_usage_in_excess)
601 602 603
{
	struct rb_node **p = &mctz->rb_root.rb_node;
	struct rb_node *parent = NULL;
604
	struct mem_cgroup_per_node *mz_node;
605
	bool rightmost = true;
606 607 608 609 610 611 612 613 614

	if (mz->on_tree)
		return;

	mz->usage_in_excess = new_usage_in_excess;
	if (!mz->usage_in_excess)
		return;
	while (*p) {
		parent = *p;
615
		mz_node = rb_entry(parent, struct mem_cgroup_per_node,
616
					tree_node);
617
		if (mz->usage_in_excess < mz_node->usage_in_excess) {
618
			p = &(*p)->rb_left;
619
			rightmost = false;
620
		} else {
621
			p = &(*p)->rb_right;
622
		}
623
	}
624 625 626 627

	if (rightmost)
		mctz->rb_rightmost = &mz->tree_node;

628 629 630 631 632
	rb_link_node(&mz->tree_node, parent, p);
	rb_insert_color(&mz->tree_node, &mctz->rb_root);
	mz->on_tree = true;
}

633 634
static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
					 struct mem_cgroup_tree_per_node *mctz)
635 636 637
{
	if (!mz->on_tree)
		return;
638 639 640 641

	if (&mz->tree_node == mctz->rb_rightmost)
		mctz->rb_rightmost = rb_prev(&mz->tree_node);

642 643 644 645
	rb_erase(&mz->tree_node, &mctz->rb_root);
	mz->on_tree = false;
}

646 647
static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
				       struct mem_cgroup_tree_per_node *mctz)
648
{
649 650 651
	unsigned long flags;

	spin_lock_irqsave(&mctz->lock, flags);
652
	__mem_cgroup_remove_exceeded(mz, mctz);
653
	spin_unlock_irqrestore(&mctz->lock, flags);
654 655
}

656 657 658
static unsigned long soft_limit_excess(struct mem_cgroup *memcg)
{
	unsigned long nr_pages = page_counter_read(&memcg->memory);
659
	unsigned long soft_limit = READ_ONCE(memcg->soft_limit);
660 661 662 663 664 665 666
	unsigned long excess = 0;

	if (nr_pages > soft_limit)
		excess = nr_pages - soft_limit;

	return excess;
}
667 668 669

static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
{
670
	unsigned long excess;
671 672
	struct mem_cgroup_per_node *mz;
	struct mem_cgroup_tree_per_node *mctz;
673

674
	mctz = soft_limit_tree_from_page(page);
675 676
	if (!mctz)
		return;
677 678 679 680 681
	/*
	 * Necessary to update all ancestors when hierarchy is used.
	 * because their event counter is not touched.
	 */
	for (; memcg; memcg = parent_mem_cgroup(memcg)) {
682
		mz = mem_cgroup_page_nodeinfo(memcg, page);
683
		excess = soft_limit_excess(memcg);
684 685 686 687 688
		/*
		 * We have to update the tree if mz is on RB-tree or
		 * mem is over its softlimit.
		 */
		if (excess || mz->on_tree) {
689 690 691
			unsigned long flags;

			spin_lock_irqsave(&mctz->lock, flags);
692 693
			/* if on-tree, remove it */
			if (mz->on_tree)
694
				__mem_cgroup_remove_exceeded(mz, mctz);
695 696 697 698
			/*
			 * Insert again. mz->usage_in_excess will be updated.
			 * If excess is 0, no tree ops.
			 */
699
			__mem_cgroup_insert_exceeded(mz, mctz, excess);
700
			spin_unlock_irqrestore(&mctz->lock, flags);
701 702 703 704 705 706
		}
	}
}

static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
{
707 708 709
	struct mem_cgroup_tree_per_node *mctz;
	struct mem_cgroup_per_node *mz;
	int nid;
710

711
	for_each_node(nid) {
712 713
		mz = mem_cgroup_nodeinfo(memcg, nid);
		mctz = soft_limit_tree_node(nid);
714 715
		if (mctz)
			mem_cgroup_remove_exceeded(mz, mctz);
716 717 718
	}
}

719 720
static struct mem_cgroup_per_node *
__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
721
{
722
	struct mem_cgroup_per_node *mz;
723 724 725

retry:
	mz = NULL;
726
	if (!mctz->rb_rightmost)
727 728
		goto done;		/* Nothing to reclaim from */

729 730
	mz = rb_entry(mctz->rb_rightmost,
		      struct mem_cgroup_per_node, tree_node);
731 732 733 734 735
	/*
	 * Remove the node now but someone else can add it back,
	 * we will to add it back at the end of reclaim to its correct
	 * position in the tree.
	 */
736
	__mem_cgroup_remove_exceeded(mz, mctz);
737
	if (!soft_limit_excess(mz->memcg) ||
S
Shakeel Butt 已提交
738
	    !css_tryget(&mz->memcg->css))
739 740 741 742 743
		goto retry;
done:
	return mz;
}

744 745
static struct mem_cgroup_per_node *
mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
746
{
747
	struct mem_cgroup_per_node *mz;
748

749
	spin_lock_irq(&mctz->lock);
750
	mz = __mem_cgroup_largest_soft_limit_node(mctz);
751
	spin_unlock_irq(&mctz->lock);
752 753 754
	return mz;
}

755 756 757 758 759 760 761 762
/**
 * __mod_memcg_state - update cgroup memory statistics
 * @memcg: the memory cgroup
 * @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item
 * @val: delta to add to the counter, can be negative
 */
void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val)
{
763
	long x, threshold = MEMCG_CHARGE_BATCH;
764 765 766 767

	if (mem_cgroup_disabled())
		return;

768
	if (memcg_stat_item_in_bytes(idx))
769 770
		threshold <<= PAGE_SHIFT;

771
	x = val + __this_cpu_read(memcg->vmstats_percpu->stat[idx]);
772
	if (unlikely(abs(x) > threshold)) {
773 774
		struct mem_cgroup *mi;

775 776 777 778 779
		/*
		 * Batch local counters to keep them in sync with
		 * the hierarchical ones.
		 */
		__this_cpu_add(memcg->vmstats_local->stat[idx], x);
780 781
		for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
			atomic_long_add(x, &mi->vmstats[idx]);
782 783 784 785 786
		x = 0;
	}
	__this_cpu_write(memcg->vmstats_percpu->stat[idx], x);
}

787 788 789 790 791 792 793 794 795 796 797
static struct mem_cgroup_per_node *
parent_nodeinfo(struct mem_cgroup_per_node *pn, int nid)
{
	struct mem_cgroup *parent;

	parent = parent_mem_cgroup(pn->memcg);
	if (!parent)
		return NULL;
	return mem_cgroup_nodeinfo(parent, nid);
}

798 799
void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
			      int val)
800 801
{
	struct mem_cgroup_per_node *pn;
802
	struct mem_cgroup *memcg;
803
	long x, threshold = MEMCG_CHARGE_BATCH;
804 805

	pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
806
	memcg = pn->memcg;
807 808

	/* Update memcg */
809
	__mod_memcg_state(memcg, idx, val);
810

811 812 813
	/* Update lruvec */
	__this_cpu_add(pn->lruvec_stat_local->count[idx], val);

814 815 816
	if (vmstat_item_in_bytes(idx))
		threshold <<= PAGE_SHIFT;

817
	x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]);
818
	if (unlikely(abs(x) > threshold)) {
819
		pg_data_t *pgdat = lruvec_pgdat(lruvec);
820 821 822 823
		struct mem_cgroup_per_node *pi;

		for (pi = pn; pi; pi = parent_nodeinfo(pi, pgdat->node_id))
			atomic_long_add(x, &pi->lruvec_stat[idx]);
824 825 826 827 828
		x = 0;
	}
	__this_cpu_write(pn->lruvec_stat_cpu->count[idx], x);
}

829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849
/**
 * __mod_lruvec_state - update lruvec memory statistics
 * @lruvec: the lruvec
 * @idx: the stat item
 * @val: delta to add to the counter, can be negative
 *
 * The lruvec is the intersection of the NUMA node and a cgroup. This
 * function updates the all three counters that are affected by a
 * change of state at this level: per-node, per-cgroup, per-lruvec.
 */
void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
			int val)
{
	/* Update node */
	__mod_node_page_state(lruvec_pgdat(lruvec), idx, val);

	/* Update memcg and lruvec */
	if (!mem_cgroup_disabled())
		__mod_memcg_lruvec_state(lruvec, idx, val);
}

850 851 852 853
void __mod_lruvec_page_state(struct page *page, enum node_stat_item idx,
			     int val)
{
	struct page *head = compound_head(page); /* rmap on tail pages */
854
	struct mem_cgroup *memcg = page_memcg(head);
855 856 857 858
	pg_data_t *pgdat = page_pgdat(page);
	struct lruvec *lruvec;

	/* Untracked pages have no memcg, no lruvec. Update only the node */
859
	if (!memcg) {
860 861 862 863
		__mod_node_page_state(pgdat, idx, val);
		return;
	}

864
	lruvec = mem_cgroup_lruvec(memcg, pgdat);
865 866
	__mod_lruvec_state(lruvec, idx, val);
}
867
EXPORT_SYMBOL(__mod_lruvec_page_state);
868

869
void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val)
870
{
871
	pg_data_t *pgdat = page_pgdat(virt_to_page(p));
872 873 874 875
	struct mem_cgroup *memcg;
	struct lruvec *lruvec;

	rcu_read_lock();
876
	memcg = mem_cgroup_from_obj(p);
877

878 879 880 881 882 883 884
	/*
	 * Untracked pages have no memcg, no lruvec. Update only the
	 * node. If we reparent the slab objects to the root memcg,
	 * when we free the slab object, we need to update the per-memcg
	 * vmstats to keep it correct for the root memcg.
	 */
	if (!memcg) {
885 886
		__mod_node_page_state(pgdat, idx, val);
	} else {
887
		lruvec = mem_cgroup_lruvec(memcg, pgdat);
888 889 890 891 892
		__mod_lruvec_state(lruvec, idx, val);
	}
	rcu_read_unlock();
}

893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908
/**
 * __count_memcg_events - account VM events in a cgroup
 * @memcg: the memory cgroup
 * @idx: the event item
 * @count: the number of events that occured
 */
void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
			  unsigned long count)
{
	unsigned long x;

	if (mem_cgroup_disabled())
		return;

	x = count + __this_cpu_read(memcg->vmstats_percpu->events[idx]);
	if (unlikely(x > MEMCG_CHARGE_BATCH)) {
909 910
		struct mem_cgroup *mi;

911 912 913 914 915
		/*
		 * Batch local counters to keep them in sync with
		 * the hierarchical ones.
		 */
		__this_cpu_add(memcg->vmstats_local->events[idx], x);
916 917
		for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
			atomic_long_add(x, &mi->vmevents[idx]);
918 919 920 921 922
		x = 0;
	}
	__this_cpu_write(memcg->vmstats_percpu->events[idx], x);
}

923
static unsigned long memcg_events(struct mem_cgroup *memcg, int event)
924
{
925
	return atomic_long_read(&memcg->vmevents[event]);
926 927
}

928 929
static unsigned long memcg_events_local(struct mem_cgroup *memcg, int event)
{
930 931 932 933 934 935
	long x = 0;
	int cpu;

	for_each_possible_cpu(cpu)
		x += per_cpu(memcg->vmstats_local->events[event], cpu);
	return x;
936 937
}

938
static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
939
					 struct page *page,
940
					 int nr_pages)
941
{
942 943
	/* pagein of a big page is an event. So, ignore page size */
	if (nr_pages > 0)
944
		__count_memcg_events(memcg, PGPGIN, 1);
945
	else {
946
		__count_memcg_events(memcg, PGPGOUT, 1);
947 948
		nr_pages = -nr_pages; /* for event */
	}
949

950
	__this_cpu_add(memcg->vmstats_percpu->nr_page_events, nr_pages);
951 952
}

953 954
static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
				       enum mem_cgroup_events_target target)
955 956 957
{
	unsigned long val, next;

958 959
	val = __this_cpu_read(memcg->vmstats_percpu->nr_page_events);
	next = __this_cpu_read(memcg->vmstats_percpu->targets[target]);
960
	/* from time_after() in jiffies.h */
961
	if ((long)(next - val) < 0) {
962 963 964 965
		switch (target) {
		case MEM_CGROUP_TARGET_THRESH:
			next = val + THRESHOLDS_EVENTS_TARGET;
			break;
966 967 968
		case MEM_CGROUP_TARGET_SOFTLIMIT:
			next = val + SOFTLIMIT_EVENTS_TARGET;
			break;
969 970 971
		default:
			break;
		}
972
		__this_cpu_write(memcg->vmstats_percpu->targets[target], next);
973
		return true;
974
	}
975
	return false;
976 977 978 979 980 981
}

/*
 * Check events in order.
 *
 */
982
static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
983 984
{
	/* threshold event is triggered in finer grain than soft limit */
985 986
	if (unlikely(mem_cgroup_event_ratelimit(memcg,
						MEM_CGROUP_TARGET_THRESH))) {
987
		bool do_softlimit;
988

989 990
		do_softlimit = mem_cgroup_event_ratelimit(memcg,
						MEM_CGROUP_TARGET_SOFTLIMIT);
991
		mem_cgroup_threshold(memcg);
992 993
		if (unlikely(do_softlimit))
			mem_cgroup_update_tree(memcg, page);
994
	}
995 996
}

997
struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
998
{
999 1000 1001 1002 1003 1004 1005 1006
	/*
	 * mm_update_next_owner() may clear mm->owner to NULL
	 * if it races with swapoff, page migration, etc.
	 * So this can be called with p == NULL.
	 */
	if (unlikely(!p))
		return NULL;

1007
	return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
1008
}
M
Michal Hocko 已提交
1009
EXPORT_SYMBOL(mem_cgroup_from_task);
1010

1011 1012 1013 1014 1015 1016 1017 1018 1019
/**
 * get_mem_cgroup_from_mm: Obtain a reference on given mm_struct's memcg.
 * @mm: mm from which memcg should be extracted. It can be NULL.
 *
 * Obtain a reference on mm->memcg and returns it if successful. Otherwise
 * root_mem_cgroup is returned. However if mem_cgroup is disabled, NULL is
 * returned.
 */
struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
1020
{
1021 1022 1023 1024
	struct mem_cgroup *memcg;

	if (mem_cgroup_disabled())
		return NULL;
1025

1026 1027
	rcu_read_lock();
	do {
1028 1029 1030 1031 1032 1033
		/*
		 * Page cache insertions can happen withou an
		 * actual mm context, e.g. during disk probing
		 * on boot, loopback IO, acct() writes etc.
		 */
		if (unlikely(!mm))
1034
			memcg = root_mem_cgroup;
1035 1036 1037 1038 1039
		else {
			memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
			if (unlikely(!memcg))
				memcg = root_mem_cgroup;
		}
1040
	} while (!css_tryget(&memcg->css));
1041
	rcu_read_unlock();
1042
	return memcg;
1043
}
1044 1045
EXPORT_SYMBOL(get_mem_cgroup_from_mm);

1046 1047 1048 1049 1050 1051 1052 1053 1054
/**
 * get_mem_cgroup_from_page: Obtain a reference on given page's memcg.
 * @page: page from which memcg should be extracted.
 *
 * Obtain a reference on page->memcg and returns it if successful. Otherwise
 * root_mem_cgroup is returned.
 */
struct mem_cgroup *get_mem_cgroup_from_page(struct page *page)
{
1055
	struct mem_cgroup *memcg = page_memcg(page);
1056 1057 1058 1059 1060

	if (mem_cgroup_disabled())
		return NULL;

	rcu_read_lock();
S
Shakeel Butt 已提交
1061 1062
	/* Page should not get uncharged and freed memcg under us. */
	if (!memcg || WARN_ON_ONCE(!css_tryget(&memcg->css)))
1063 1064 1065 1066 1067 1068
		memcg = root_mem_cgroup;
	rcu_read_unlock();
	return memcg;
}
EXPORT_SYMBOL(get_mem_cgroup_from_page);

1069
static __always_inline struct mem_cgroup *active_memcg(void)
1070
{
1071 1072 1073 1074 1075
	if (in_interrupt())
		return this_cpu_read(int_active_memcg);
	else
		return current->active_memcg;
}
1076

1077 1078 1079
static __always_inline struct mem_cgroup *get_active_memcg(void)
{
	struct mem_cgroup *memcg;
1080

1081 1082 1083
	rcu_read_lock();
	memcg = active_memcg();
	if (memcg) {
S
Shakeel Butt 已提交
1084
		/* current->active_memcg must hold a ref. */
1085
		if (WARN_ON_ONCE(!css_tryget(&memcg->css)))
S
Shakeel Butt 已提交
1086 1087
			memcg = root_mem_cgroup;
		else
1088 1089
			memcg = current->active_memcg;
	}
1090 1091 1092 1093 1094
	rcu_read_unlock();

	return memcg;
}

1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107
static __always_inline bool memcg_kmem_bypass(void)
{
	/* Allow remote memcg charging from any context. */
	if (unlikely(active_memcg()))
		return false;

	/* Memcg to charge can't be determined. */
	if (in_interrupt() || !current->mm || (current->flags & PF_KTHREAD))
		return true;

	return false;
}

1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118
/**
 * If active memcg is set, do not fallback to current->mm->memcg.
 */
static __always_inline struct mem_cgroup *get_mem_cgroup_from_current(void)
{
	if (memcg_kmem_bypass())
		return NULL;

	if (unlikely(active_memcg()))
		return get_active_memcg();

1119 1120
	return get_mem_cgroup_from_mm(current->mm);
}
1121

1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134
/**
 * mem_cgroup_iter - iterate over memory cgroup hierarchy
 * @root: hierarchy root
 * @prev: previously returned memcg, NULL on first invocation
 * @reclaim: cookie for shared reclaim walks, NULL for full walks
 *
 * Returns references to children of the hierarchy below @root, or
 * @root itself, or %NULL after a full round-trip.
 *
 * Caller must pass the return value in @prev on subsequent
 * invocations for reference counting, or use mem_cgroup_iter_break()
 * to cancel a hierarchy walk before the round-trip is complete.
 *
1135 1136 1137
 * Reclaimers can specify a node in @reclaim to divide up the memcgs
 * in the hierarchy among all concurrent reclaimers operating on the
 * same node.
1138
 */
1139
struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
1140
				   struct mem_cgroup *prev,
1141
				   struct mem_cgroup_reclaim_cookie *reclaim)
K
KAMEZAWA Hiroyuki 已提交
1142
{
1143
	struct mem_cgroup_reclaim_iter *iter;
1144
	struct cgroup_subsys_state *css = NULL;
1145
	struct mem_cgroup *memcg = NULL;
1146
	struct mem_cgroup *pos = NULL;
1147

1148 1149
	if (mem_cgroup_disabled())
		return NULL;
1150

1151 1152
	if (!root)
		root = root_mem_cgroup;
K
KAMEZAWA Hiroyuki 已提交
1153

1154
	if (prev && !reclaim)
1155
		pos = prev;
K
KAMEZAWA Hiroyuki 已提交
1156

1157
	rcu_read_lock();
M
Michal Hocko 已提交
1158

1159
	if (reclaim) {
1160
		struct mem_cgroup_per_node *mz;
1161

1162
		mz = mem_cgroup_nodeinfo(root, reclaim->pgdat->node_id);
1163
		iter = &mz->iter;
1164 1165 1166 1167

		if (prev && reclaim->generation != iter->generation)
			goto out_unlock;

1168
		while (1) {
1169
			pos = READ_ONCE(iter->position);
1170 1171
			if (!pos || css_tryget(&pos->css))
				break;
1172
			/*
1173 1174 1175 1176 1177 1178
			 * css reference reached zero, so iter->position will
			 * be cleared by ->css_released. However, we should not
			 * rely on this happening soon, because ->css_released
			 * is called from a work queue, and by busy-waiting we
			 * might block it. So we clear iter->position right
			 * away.
1179
			 */
1180 1181
			(void)cmpxchg(&iter->position, pos, NULL);
		}
1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198
	}

	if (pos)
		css = &pos->css;

	for (;;) {
		css = css_next_descendant_pre(css, &root->css);
		if (!css) {
			/*
			 * Reclaimers share the hierarchy walk, and a
			 * new one might jump in right at the end of
			 * the hierarchy - make sure they see at least
			 * one group and restart from the beginning.
			 */
			if (!prev)
				continue;
			break;
1199
		}
K
KAMEZAWA Hiroyuki 已提交
1200

1201 1202 1203 1204 1205 1206
		/*
		 * Verify the css and acquire a reference.  The root
		 * is provided by the caller, so we know it's alive
		 * and kicking, and don't take an extra reference.
		 */
		memcg = mem_cgroup_from_css(css);
K
KAMEZAWA Hiroyuki 已提交
1207

1208 1209
		if (css == &root->css)
			break;
K
KAMEZAWA Hiroyuki 已提交
1210

1211 1212
		if (css_tryget(css))
			break;
1213

1214
		memcg = NULL;
1215
	}
1216 1217 1218

	if (reclaim) {
		/*
1219 1220 1221
		 * The position could have already been updated by a competing
		 * thread, so check that the value hasn't changed since we read
		 * it to avoid reclaiming from the same cgroup twice.
1222
		 */
1223 1224
		(void)cmpxchg(&iter->position, pos, memcg);

1225 1226 1227 1228 1229 1230 1231
		if (pos)
			css_put(&pos->css);

		if (!memcg)
			iter->generation++;
		else if (!prev)
			reclaim->generation = iter->generation;
1232
	}
1233

1234 1235
out_unlock:
	rcu_read_unlock();
1236 1237 1238
	if (prev && prev != root)
		css_put(&prev->css);

1239
	return memcg;
K
KAMEZAWA Hiroyuki 已提交
1240
}
K
KAMEZAWA Hiroyuki 已提交
1241

1242 1243 1244 1245 1246 1247 1248
/**
 * mem_cgroup_iter_break - abort a hierarchy walk prematurely
 * @root: hierarchy root
 * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
 */
void mem_cgroup_iter_break(struct mem_cgroup *root,
			   struct mem_cgroup *prev)
1249 1250 1251 1252 1253 1254
{
	if (!root)
		root = root_mem_cgroup;
	if (prev && prev != root)
		css_put(&prev->css);
}
K
KAMEZAWA Hiroyuki 已提交
1255

1256 1257
static void __invalidate_reclaim_iterators(struct mem_cgroup *from,
					struct mem_cgroup *dead_memcg)
1258 1259
{
	struct mem_cgroup_reclaim_iter *iter;
1260 1261
	struct mem_cgroup_per_node *mz;
	int nid;
1262

1263 1264
	for_each_node(nid) {
		mz = mem_cgroup_nodeinfo(from, nid);
1265 1266
		iter = &mz->iter;
		cmpxchg(&iter->position, dead_memcg, NULL);
1267 1268 1269
	}
}

1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290
static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
{
	struct mem_cgroup *memcg = dead_memcg;
	struct mem_cgroup *last;

	do {
		__invalidate_reclaim_iterators(memcg, dead_memcg);
		last = memcg;
	} while ((memcg = parent_mem_cgroup(memcg)));

	/*
	 * When cgruop1 non-hierarchy mode is used,
	 * parent_mem_cgroup() does not walk all the way up to the
	 * cgroup root (root_mem_cgroup). So we have to handle
	 * dead_memcg from cgroup root separately.
	 */
	if (last != root_mem_cgroup)
		__invalidate_reclaim_iterators(root_mem_cgroup,
						dead_memcg);
}

1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315
/**
 * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
 * @memcg: hierarchy root
 * @fn: function to call for each task
 * @arg: argument passed to @fn
 *
 * This function iterates over tasks attached to @memcg or to any of its
 * descendants and calls @fn for each task. If @fn returns a non-zero
 * value, the function breaks the iteration loop and returns the value.
 * Otherwise, it will iterate over all tasks and return 0.
 *
 * This function must not be called for the root memory cgroup.
 */
int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
			  int (*fn)(struct task_struct *, void *), void *arg)
{
	struct mem_cgroup *iter;
	int ret = 0;

	BUG_ON(memcg == root_mem_cgroup);

	for_each_mem_cgroup_tree(iter, memcg) {
		struct css_task_iter it;
		struct task_struct *task;

1316
		css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it);
1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327
		while (!ret && (task = css_task_iter_next(&it)))
			ret = fn(task, arg);
		css_task_iter_end(&it);
		if (ret) {
			mem_cgroup_iter_break(memcg, iter);
			break;
		}
	}
	return ret;
}

1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348
#ifdef CONFIG_DEBUG_VM
void lruvec_memcg_debug(struct lruvec *lruvec, struct page *page)
{
	struct mem_cgroup *memcg;

	if (mem_cgroup_disabled())
		return;

	memcg = page_memcg(page);

	if (!memcg)
		VM_BUG_ON_PAGE(lruvec_memcg(lruvec) != root_mem_cgroup, page);
	else
		VM_BUG_ON_PAGE(lruvec_memcg(lruvec) != memcg, page);
}
#endif

/**
 * lock_page_lruvec - lock and return lruvec for a given page.
 * @page: the page
 *
1349 1350 1351 1352 1353
 * These functions are safe to use under any of the following conditions:
 * - page locked
 * - PageLRU cleared
 * - lock_page_memcg()
 * - page->_refcount is zero
1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393
 */
struct lruvec *lock_page_lruvec(struct page *page)
{
	struct lruvec *lruvec;
	struct pglist_data *pgdat = page_pgdat(page);

	lruvec = mem_cgroup_page_lruvec(page, pgdat);
	spin_lock(&lruvec->lru_lock);

	lruvec_memcg_debug(lruvec, page);

	return lruvec;
}

struct lruvec *lock_page_lruvec_irq(struct page *page)
{
	struct lruvec *lruvec;
	struct pglist_data *pgdat = page_pgdat(page);

	lruvec = mem_cgroup_page_lruvec(page, pgdat);
	spin_lock_irq(&lruvec->lru_lock);

	lruvec_memcg_debug(lruvec, page);

	return lruvec;
}

struct lruvec *lock_page_lruvec_irqsave(struct page *page, unsigned long *flags)
{
	struct lruvec *lruvec;
	struct pglist_data *pgdat = page_pgdat(page);

	lruvec = mem_cgroup_page_lruvec(page, pgdat);
	spin_lock_irqsave(&lruvec->lru_lock, *flags);

	lruvec_memcg_debug(lruvec, page);

	return lruvec;
}

1394
/**
1395 1396 1397
 * mem_cgroup_update_lru_size - account for adding or removing an lru page
 * @lruvec: mem_cgroup per zone lru vector
 * @lru: index of lru list the page is sitting on
1398
 * @zid: zone id of the accounted pages
1399
 * @nr_pages: positive when adding or negative when removing
1400
 *
1401 1402 1403
 * This function must be called under lru_lock, just before a page is added
 * to or just after a page is removed from an lru list (that ordering being
 * so as to allow it to check that lru_size 0 is consistent with list_empty).
1404
 */
1405
void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
1406
				int zid, int nr_pages)
1407
{
1408
	struct mem_cgroup_per_node *mz;
1409
	unsigned long *lru_size;
1410
	long size;
1411 1412 1413 1414

	if (mem_cgroup_disabled())
		return;

1415
	mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
1416
	lru_size = &mz->lru_zone_size[zid][lru];
1417 1418 1419 1420 1421

	if (nr_pages < 0)
		*lru_size += nr_pages;

	size = *lru_size;
1422 1423 1424
	if (WARN_ONCE(size < 0,
		"%s(%p, %d, %d): lru_size %ld\n",
		__func__, lruvec, lru, nr_pages, size)) {
1425 1426 1427 1428 1429 1430
		VM_BUG_ON(1);
		*lru_size = 0;
	}

	if (nr_pages > 0)
		*lru_size += nr_pages;
K
KAMEZAWA Hiroyuki 已提交
1431
}
1432

1433
/**
1434
 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
W
Wanpeng Li 已提交
1435
 * @memcg: the memory cgroup
1436
 *
1437
 * Returns the maximum amount of memory @mem can be charged with, in
1438
 * pages.
1439
 */
1440
static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1441
{
1442 1443 1444
	unsigned long margin = 0;
	unsigned long count;
	unsigned long limit;
1445

1446
	count = page_counter_read(&memcg->memory);
1447
	limit = READ_ONCE(memcg->memory.max);
1448 1449 1450
	if (count < limit)
		margin = limit - count;

1451
	if (do_memsw_account()) {
1452
		count = page_counter_read(&memcg->memsw);
1453
		limit = READ_ONCE(memcg->memsw.max);
1454
		if (count < limit)
1455
			margin = min(margin, limit - count);
1456 1457
		else
			margin = 0;
1458 1459 1460
	}

	return margin;
1461 1462
}

1463
/*
Q
Qiang Huang 已提交
1464
 * A routine for checking "mem" is under move_account() or not.
1465
 *
Q
Qiang Huang 已提交
1466 1467 1468
 * Checking a cgroup is mc.from or mc.to or under hierarchy of
 * moving cgroups. This is for waiting at high-memory pressure
 * caused by "move".
1469
 */
1470
static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
1471
{
1472 1473
	struct mem_cgroup *from;
	struct mem_cgroup *to;
1474
	bool ret = false;
1475 1476 1477 1478 1479 1480 1481 1482 1483
	/*
	 * Unlike task_move routines, we access mc.to, mc.from not under
	 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
	 */
	spin_lock(&mc.lock);
	from = mc.from;
	to = mc.to;
	if (!from)
		goto unlock;
1484

1485 1486
	ret = mem_cgroup_is_descendant(from, memcg) ||
		mem_cgroup_is_descendant(to, memcg);
1487 1488
unlock:
	spin_unlock(&mc.lock);
1489 1490 1491
	return ret;
}

1492
static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
1493 1494
{
	if (mc.moving_task && current != mc.moving_task) {
1495
		if (mem_cgroup_under_move(memcg)) {
1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507
			DEFINE_WAIT(wait);
			prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
			/* moving charge context might have finished. */
			if (mc.moving_task)
				schedule();
			finish_wait(&mc.waitq, &wait);
			return true;
		}
	}
	return false;
}

1508 1509 1510 1511 1512
struct memory_stat {
	const char *name;
	unsigned int idx;
};

1513
static const struct memory_stat memory_stats[] = {
1514 1515 1516 1517 1518 1519 1520 1521 1522 1523
	{ "anon",			NR_ANON_MAPPED			},
	{ "file",			NR_FILE_PAGES			},
	{ "kernel_stack",		NR_KERNEL_STACK_KB		},
	{ "pagetables",			NR_PAGETABLE			},
	{ "percpu",			MEMCG_PERCPU_B			},
	{ "sock",			MEMCG_SOCK			},
	{ "shmem",			NR_SHMEM			},
	{ "file_mapped",		NR_FILE_MAPPED			},
	{ "file_dirty",			NR_FILE_DIRTY			},
	{ "file_writeback",		NR_WRITEBACK			},
1524
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1525 1526 1527
	{ "anon_thp",			NR_ANON_THPS			},
	{ "file_thp",			NR_FILE_THPS			},
	{ "shmem_thp",			NR_SHMEM_THPS			},
1528
#endif
1529 1530 1531 1532 1533 1534 1535
	{ "inactive_anon",		NR_INACTIVE_ANON		},
	{ "active_anon",		NR_ACTIVE_ANON			},
	{ "inactive_file",		NR_INACTIVE_FILE		},
	{ "active_file",		NR_ACTIVE_FILE			},
	{ "unevictable",		NR_UNEVICTABLE			},
	{ "slab_reclaimable",		NR_SLAB_RECLAIMABLE_B		},
	{ "slab_unreclaimable",		NR_SLAB_UNRECLAIMABLE_B		},
1536 1537

	/* The memory events */
1538 1539 1540 1541 1542 1543 1544
	{ "workingset_refault_anon",	WORKINGSET_REFAULT_ANON		},
	{ "workingset_refault_file",	WORKINGSET_REFAULT_FILE		},
	{ "workingset_activate_anon",	WORKINGSET_ACTIVATE_ANON	},
	{ "workingset_activate_file",	WORKINGSET_ACTIVATE_FILE	},
	{ "workingset_restore_anon",	WORKINGSET_RESTORE_ANON		},
	{ "workingset_restore_file",	WORKINGSET_RESTORE_FILE		},
	{ "workingset_nodereclaim",	WORKINGSET_NODERECLAIM		},
1545 1546
};

1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574
/* Translate stat items to the correct unit for memory.stat output */
static int memcg_page_state_unit(int item)
{
	switch (item) {
	case MEMCG_PERCPU_B:
	case NR_SLAB_RECLAIMABLE_B:
	case NR_SLAB_UNRECLAIMABLE_B:
	case WORKINGSET_REFAULT_ANON:
	case WORKINGSET_REFAULT_FILE:
	case WORKINGSET_ACTIVATE_ANON:
	case WORKINGSET_ACTIVATE_FILE:
	case WORKINGSET_RESTORE_ANON:
	case WORKINGSET_RESTORE_FILE:
	case WORKINGSET_NODERECLAIM:
		return 1;
	case NR_KERNEL_STACK_KB:
		return SZ_1K;
	default:
		return PAGE_SIZE;
	}
}

static inline unsigned long memcg_page_state_output(struct mem_cgroup *memcg,
						    int item)
{
	return memcg_page_state(memcg, item) * memcg_page_state_unit(item);
}

1575 1576 1577 1578
static char *memory_stat_format(struct mem_cgroup *memcg)
{
	struct seq_buf s;
	int i;
1579

1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594
	seq_buf_init(&s, kmalloc(PAGE_SIZE, GFP_KERNEL), PAGE_SIZE);
	if (!s.buffer)
		return NULL;

	/*
	 * Provide statistics on the state of the memory subsystem as
	 * well as cumulative event counters that show past behavior.
	 *
	 * This list is ordered following a combination of these gradients:
	 * 1) generic big picture -> specifics and details
	 * 2) reflecting userspace activity -> reflecting kernel heuristics
	 *
	 * Current memory state:
	 */

1595 1596
	for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
		u64 size;
1597

1598
		size = memcg_page_state_output(memcg, memory_stats[i].idx);
1599
		seq_buf_printf(&s, "%s %llu\n", memory_stats[i].name, size);
1600

1601
		if (unlikely(memory_stats[i].idx == NR_SLAB_UNRECLAIMABLE_B)) {
1602 1603
			size += memcg_page_state_output(memcg,
							NR_SLAB_RECLAIMABLE_B);
1604 1605 1606
			seq_buf_printf(&s, "slab %llu\n", size);
		}
	}
1607 1608 1609

	/* Accumulated memory events */

1610 1611 1612 1613 1614 1615
	seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGFAULT),
		       memcg_events(memcg, PGFAULT));
	seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGMAJFAULT),
		       memcg_events(memcg, PGMAJFAULT));
	seq_buf_printf(&s, "%s %lu\n",  vm_event_name(PGREFILL),
		       memcg_events(memcg, PGREFILL));
1616 1617 1618 1619 1620 1621
	seq_buf_printf(&s, "pgscan %lu\n",
		       memcg_events(memcg, PGSCAN_KSWAPD) +
		       memcg_events(memcg, PGSCAN_DIRECT));
	seq_buf_printf(&s, "pgsteal %lu\n",
		       memcg_events(memcg, PGSTEAL_KSWAPD) +
		       memcg_events(memcg, PGSTEAL_DIRECT));
1622 1623 1624 1625 1626 1627 1628 1629
	seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGACTIVATE),
		       memcg_events(memcg, PGACTIVATE));
	seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGDEACTIVATE),
		       memcg_events(memcg, PGDEACTIVATE));
	seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGLAZYFREE),
		       memcg_events(memcg, PGLAZYFREE));
	seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGLAZYFREED),
		       memcg_events(memcg, PGLAZYFREED));
1630 1631

#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1632
	seq_buf_printf(&s, "%s %lu\n", vm_event_name(THP_FAULT_ALLOC),
1633
		       memcg_events(memcg, THP_FAULT_ALLOC));
1634
	seq_buf_printf(&s, "%s %lu\n", vm_event_name(THP_COLLAPSE_ALLOC),
1635 1636 1637 1638 1639 1640 1641 1642
		       memcg_events(memcg, THP_COLLAPSE_ALLOC));
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */

	/* The above should easily fit into one page */
	WARN_ON_ONCE(seq_buf_has_overflowed(&s));

	return s.buffer;
}
1643

1644
#define K(x) ((x) << (PAGE_SHIFT-10))
1645
/**
1646 1647
 * mem_cgroup_print_oom_context: Print OOM information relevant to
 * memory controller.
1648 1649 1650 1651 1652 1653
 * @memcg: The memory cgroup that went over limit
 * @p: Task that is going to be killed
 *
 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
 * enabled
 */
1654
void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
1655 1656 1657
{
	rcu_read_lock();

1658 1659 1660 1661 1662
	if (memcg) {
		pr_cont(",oom_memcg=");
		pr_cont_cgroup_path(memcg->css.cgroup);
	} else
		pr_cont(",global_oom");
1663
	if (p) {
1664
		pr_cont(",task_memcg=");
1665 1666
		pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
	}
1667
	rcu_read_unlock();
1668 1669 1670 1671 1672 1673 1674 1675 1676
}

/**
 * mem_cgroup_print_oom_meminfo: Print OOM memory information relevant to
 * memory controller.
 * @memcg: The memory cgroup that went over limit
 */
void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
{
1677
	char *buf;
1678

1679 1680
	pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
		K((u64)page_counter_read(&memcg->memory)),
1681
		K((u64)READ_ONCE(memcg->memory.max)), memcg->memory.failcnt);
1682 1683 1684
	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
		pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n",
			K((u64)page_counter_read(&memcg->swap)),
1685
			K((u64)READ_ONCE(memcg->swap.max)), memcg->swap.failcnt);
1686 1687 1688 1689 1690 1691 1692
	else {
		pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
			K((u64)page_counter_read(&memcg->memsw)),
			K((u64)memcg->memsw.max), memcg->memsw.failcnt);
		pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
			K((u64)page_counter_read(&memcg->kmem)),
			K((u64)memcg->kmem.max), memcg->kmem.failcnt);
1693
	}
1694 1695 1696 1697 1698 1699 1700 1701 1702

	pr_info("Memory cgroup stats for ");
	pr_cont_cgroup_path(memcg->css.cgroup);
	pr_cont(":");
	buf = memory_stat_format(memcg);
	if (!buf)
		return;
	pr_info("%s", buf);
	kfree(buf);
1703 1704
}

D
David Rientjes 已提交
1705 1706 1707
/*
 * Return the memory (and swap, if configured) limit for a memcg.
 */
1708
unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
D
David Rientjes 已提交
1709
{
1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722
	unsigned long max = READ_ONCE(memcg->memory.max);

	if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
		if (mem_cgroup_swappiness(memcg))
			max += min(READ_ONCE(memcg->swap.max),
				   (unsigned long)total_swap_pages);
	} else { /* v1 */
		if (mem_cgroup_swappiness(memcg)) {
			/* Calculate swap excess capacity from memsw limit */
			unsigned long swap = READ_ONCE(memcg->memsw.max) - max;

			max += min(swap, (unsigned long)total_swap_pages);
		}
1723
	}
1724
	return max;
D
David Rientjes 已提交
1725 1726
}

1727 1728 1729 1730 1731
unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
{
	return page_counter_read(&memcg->memory);
}

1732
static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1733
				     int order)
1734
{
1735 1736 1737
	struct oom_control oc = {
		.zonelist = NULL,
		.nodemask = NULL,
1738
		.memcg = memcg,
1739 1740 1741
		.gfp_mask = gfp_mask,
		.order = order,
	};
1742
	bool ret = true;
1743

1744 1745
	if (mutex_lock_killable(&oom_lock))
		return true;
1746 1747 1748 1749

	if (mem_cgroup_margin(memcg) >= (1 << order))
		goto unlock;

1750 1751 1752 1753 1754
	/*
	 * A few threads which were not waiting at mutex_lock_killable() can
	 * fail to bail out. Therefore, check again after holding oom_lock.
	 */
	ret = should_force_charge() || out_of_memory(&oc);
1755 1756

unlock:
1757
	mutex_unlock(&oom_lock);
1758
	return ret;
1759 1760
}

1761
static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
1762
				   pg_data_t *pgdat,
1763 1764 1765 1766 1767 1768 1769 1770 1771
				   gfp_t gfp_mask,
				   unsigned long *total_scanned)
{
	struct mem_cgroup *victim = NULL;
	int total = 0;
	int loop = 0;
	unsigned long excess;
	unsigned long nr_scanned;
	struct mem_cgroup_reclaim_cookie reclaim = {
1772
		.pgdat = pgdat,
1773 1774
	};

1775
	excess = soft_limit_excess(root_memcg);
1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800

	while (1) {
		victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
		if (!victim) {
			loop++;
			if (loop >= 2) {
				/*
				 * If we have not been able to reclaim
				 * anything, it might because there are
				 * no reclaimable pages under this hierarchy
				 */
				if (!total)
					break;
				/*
				 * We want to do more targeted reclaim.
				 * excess >> 2 is not to excessive so as to
				 * reclaim too much, nor too less that we keep
				 * coming back to reclaim from this cgroup
				 */
				if (total >= (excess >> 2) ||
					(loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
					break;
			}
			continue;
		}
1801
		total += mem_cgroup_shrink_node(victim, gfp_mask, false,
1802
					pgdat, &nr_scanned);
1803
		*total_scanned += nr_scanned;
1804
		if (!soft_limit_excess(root_memcg))
1805
			break;
1806
	}
1807 1808
	mem_cgroup_iter_break(root_memcg, victim);
	return total;
1809 1810
}

1811 1812 1813 1814 1815 1816
#ifdef CONFIG_LOCKDEP
static struct lockdep_map memcg_oom_lock_dep_map = {
	.name = "memcg_oom_lock",
};
#endif

1817 1818
static DEFINE_SPINLOCK(memcg_oom_lock);

K
KAMEZAWA Hiroyuki 已提交
1819 1820 1821 1822
/*
 * Check OOM-Killer is already running under our hierarchy.
 * If someone is running, return false.
 */
1823
static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
K
KAMEZAWA Hiroyuki 已提交
1824
{
1825
	struct mem_cgroup *iter, *failed = NULL;
1826

1827 1828
	spin_lock(&memcg_oom_lock);

1829
	for_each_mem_cgroup_tree(iter, memcg) {
1830
		if (iter->oom_lock) {
1831 1832 1833 1834 1835
			/*
			 * this subtree of our hierarchy is already locked
			 * so we cannot give a lock.
			 */
			failed = iter;
1836 1837
			mem_cgroup_iter_break(memcg, iter);
			break;
1838 1839
		} else
			iter->oom_lock = true;
K
KAMEZAWA Hiroyuki 已提交
1840
	}
K
KAMEZAWA Hiroyuki 已提交
1841

1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852
	if (failed) {
		/*
		 * OK, we failed to lock the whole subtree so we have
		 * to clean up what we set up to the failing subtree
		 */
		for_each_mem_cgroup_tree(iter, memcg) {
			if (iter == failed) {
				mem_cgroup_iter_break(memcg, iter);
				break;
			}
			iter->oom_lock = false;
1853
		}
1854 1855
	} else
		mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
1856 1857 1858 1859

	spin_unlock(&memcg_oom_lock);

	return !failed;
1860
}
1861

1862
static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
1863
{
K
KAMEZAWA Hiroyuki 已提交
1864 1865
	struct mem_cgroup *iter;

1866
	spin_lock(&memcg_oom_lock);
1867
	mutex_release(&memcg_oom_lock_dep_map, _RET_IP_);
1868
	for_each_mem_cgroup_tree(iter, memcg)
1869
		iter->oom_lock = false;
1870
	spin_unlock(&memcg_oom_lock);
1871 1872
}

1873
static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
1874 1875 1876
{
	struct mem_cgroup *iter;

1877
	spin_lock(&memcg_oom_lock);
1878
	for_each_mem_cgroup_tree(iter, memcg)
1879 1880
		iter->under_oom++;
	spin_unlock(&memcg_oom_lock);
1881 1882
}

1883
static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
1884 1885 1886
{
	struct mem_cgroup *iter;

K
KAMEZAWA Hiroyuki 已提交
1887
	/*
1888 1889
	 * Be careful about under_oom underflows becase a child memcg
	 * could have been added after mem_cgroup_mark_under_oom.
K
KAMEZAWA Hiroyuki 已提交
1890
	 */
1891
	spin_lock(&memcg_oom_lock);
1892
	for_each_mem_cgroup_tree(iter, memcg)
1893 1894 1895
		if (iter->under_oom > 0)
			iter->under_oom--;
	spin_unlock(&memcg_oom_lock);
1896 1897
}

K
KAMEZAWA Hiroyuki 已提交
1898 1899
static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);

K
KAMEZAWA Hiroyuki 已提交
1900
struct oom_wait_info {
1901
	struct mem_cgroup *memcg;
1902
	wait_queue_entry_t	wait;
K
KAMEZAWA Hiroyuki 已提交
1903 1904
};

1905
static int memcg_oom_wake_function(wait_queue_entry_t *wait,
K
KAMEZAWA Hiroyuki 已提交
1906 1907
	unsigned mode, int sync, void *arg)
{
1908 1909
	struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
	struct mem_cgroup *oom_wait_memcg;
K
KAMEZAWA Hiroyuki 已提交
1910 1911 1912
	struct oom_wait_info *oom_wait_info;

	oom_wait_info = container_of(wait, struct oom_wait_info, wait);
1913
	oom_wait_memcg = oom_wait_info->memcg;
K
KAMEZAWA Hiroyuki 已提交
1914

1915 1916
	if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) &&
	    !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg))
K
KAMEZAWA Hiroyuki 已提交
1917 1918 1919 1920
		return 0;
	return autoremove_wake_function(wait, mode, sync, arg);
}

1921
static void memcg_oom_recover(struct mem_cgroup *memcg)
1922
{
1923 1924 1925 1926 1927 1928 1929 1930 1931
	/*
	 * For the following lockless ->under_oom test, the only required
	 * guarantee is that it must see the state asserted by an OOM when
	 * this function is called as a result of userland actions
	 * triggered by the notification of the OOM.  This is trivially
	 * achieved by invoking mem_cgroup_mark_under_oom() before
	 * triggering notification.
	 */
	if (memcg && memcg->under_oom)
1932
		__wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
1933 1934
}

1935 1936 1937 1938 1939 1940 1941 1942
enum oom_status {
	OOM_SUCCESS,
	OOM_FAILED,
	OOM_ASYNC,
	OOM_SKIPPED
};

static enum oom_status mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
1943
{
1944 1945 1946
	enum oom_status ret;
	bool locked;

1947 1948 1949
	if (order > PAGE_ALLOC_COSTLY_ORDER)
		return OOM_SKIPPED;

1950 1951
	memcg_memory_event(memcg, MEMCG_OOM);

K
KAMEZAWA Hiroyuki 已提交
1952
	/*
1953 1954 1955 1956
	 * We are in the middle of the charge context here, so we
	 * don't want to block when potentially sitting on a callstack
	 * that holds all kinds of filesystem and mm locks.
	 *
1957 1958 1959 1960
	 * cgroup1 allows disabling the OOM killer and waiting for outside
	 * handling until the charge can succeed; remember the context and put
	 * the task to sleep at the end of the page fault when all locks are
	 * released.
1961
	 *
1962 1963 1964 1965 1966 1967 1968
	 * On the other hand, in-kernel OOM killer allows for an async victim
	 * memory reclaim (oom_reaper) and that means that we are not solely
	 * relying on the oom victim to make a forward progress and we can
	 * invoke the oom killer here.
	 *
	 * Please note that mem_cgroup_out_of_memory might fail to find a
	 * victim and then we have to bail out from the charge path.
K
KAMEZAWA Hiroyuki 已提交
1969
	 */
1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980
	if (memcg->oom_kill_disable) {
		if (!current->in_user_fault)
			return OOM_SKIPPED;
		css_get(&memcg->css);
		current->memcg_in_oom = memcg;
		current->memcg_oom_gfp_mask = mask;
		current->memcg_oom_order = order;

		return OOM_ASYNC;
	}

1981 1982 1983 1984 1985 1986 1987 1988
	mem_cgroup_mark_under_oom(memcg);

	locked = mem_cgroup_oom_trylock(memcg);

	if (locked)
		mem_cgroup_oom_notify(memcg);

	mem_cgroup_unmark_under_oom(memcg);
1989
	if (mem_cgroup_out_of_memory(memcg, mask, order))
1990 1991 1992 1993 1994 1995
		ret = OOM_SUCCESS;
	else
		ret = OOM_FAILED;

	if (locked)
		mem_cgroup_oom_unlock(memcg);
1996

1997
	return ret;
1998 1999 2000 2001
}

/**
 * mem_cgroup_oom_synchronize - complete memcg OOM handling
2002
 * @handle: actually kill/wait or just clean up the OOM state
2003
 *
2004 2005
 * This has to be called at the end of a page fault if the memcg OOM
 * handler was enabled.
2006
 *
2007
 * Memcg supports userspace OOM handling where failed allocations must
2008 2009 2010 2011
 * sleep on a waitqueue until the userspace task resolves the
 * situation.  Sleeping directly in the charge context with all kinds
 * of locks held is not a good idea, instead we remember an OOM state
 * in the task and mem_cgroup_oom_synchronize() has to be called at
2012
 * the end of the page fault to complete the OOM handling.
2013 2014
 *
 * Returns %true if an ongoing memcg OOM situation was detected and
2015
 * completed, %false otherwise.
2016
 */
2017
bool mem_cgroup_oom_synchronize(bool handle)
2018
{
T
Tejun Heo 已提交
2019
	struct mem_cgroup *memcg = current->memcg_in_oom;
2020
	struct oom_wait_info owait;
2021
	bool locked;
2022 2023 2024

	/* OOM is global, do not handle */
	if (!memcg)
2025
		return false;
2026

2027
	if (!handle)
2028
		goto cleanup;
2029 2030 2031 2032 2033

	owait.memcg = memcg;
	owait.wait.flags = 0;
	owait.wait.func = memcg_oom_wake_function;
	owait.wait.private = current;
2034
	INIT_LIST_HEAD(&owait.wait.entry);
K
KAMEZAWA Hiroyuki 已提交
2035

2036
	prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
2037 2038 2039 2040 2041 2042 2043 2044 2045 2046
	mem_cgroup_mark_under_oom(memcg);

	locked = mem_cgroup_oom_trylock(memcg);

	if (locked)
		mem_cgroup_oom_notify(memcg);

	if (locked && !memcg->oom_kill_disable) {
		mem_cgroup_unmark_under_oom(memcg);
		finish_wait(&memcg_oom_waitq, &owait.wait);
T
Tejun Heo 已提交
2047 2048
		mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask,
					 current->memcg_oom_order);
2049
	} else {
2050
		schedule();
2051 2052 2053 2054 2055
		mem_cgroup_unmark_under_oom(memcg);
		finish_wait(&memcg_oom_waitq, &owait.wait);
	}

	if (locked) {
2056 2057 2058 2059 2060 2061 2062 2063
		mem_cgroup_oom_unlock(memcg);
		/*
		 * There is no guarantee that an OOM-lock contender
		 * sees the wakeups triggered by the OOM kill
		 * uncharges.  Wake any sleepers explicitely.
		 */
		memcg_oom_recover(memcg);
	}
2064
cleanup:
T
Tejun Heo 已提交
2065
	current->memcg_in_oom = NULL;
2066
	css_put(&memcg->css);
K
KAMEZAWA Hiroyuki 已提交
2067
	return true;
2068 2069
}

2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097
/**
 * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM
 * @victim: task to be killed by the OOM killer
 * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM
 *
 * Returns a pointer to a memory cgroup, which has to be cleaned up
 * by killing all belonging OOM-killable tasks.
 *
 * Caller has to call mem_cgroup_put() on the returned non-NULL memcg.
 */
struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
					    struct mem_cgroup *oom_domain)
{
	struct mem_cgroup *oom_group = NULL;
	struct mem_cgroup *memcg;

	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
		return NULL;

	if (!oom_domain)
		oom_domain = root_mem_cgroup;

	rcu_read_lock();

	memcg = mem_cgroup_from_task(victim);
	if (memcg == root_mem_cgroup)
		goto out;

2098 2099 2100 2101 2102 2103 2104 2105
	/*
	 * If the victim task has been asynchronously moved to a different
	 * memory cgroup, we might end up killing tasks outside oom_domain.
	 * In this case it's better to ignore memory.group.oom.
	 */
	if (unlikely(!mem_cgroup_is_descendant(memcg, oom_domain)))
		goto out;

2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133
	/*
	 * Traverse the memory cgroup hierarchy from the victim task's
	 * cgroup up to the OOMing cgroup (or root) to find the
	 * highest-level memory cgroup with oom.group set.
	 */
	for (; memcg; memcg = parent_mem_cgroup(memcg)) {
		if (memcg->oom_group)
			oom_group = memcg;

		if (memcg == oom_domain)
			break;
	}

	if (oom_group)
		css_get(&oom_group->css);
out:
	rcu_read_unlock();

	return oom_group;
}

void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
{
	pr_info("Tasks in ");
	pr_cont_cgroup_path(memcg->css.cgroup);
	pr_cont(" are going to be killed due to memory.oom.group set\n");
}

2134
/**
2135
 * lock_page_memcg - lock a page and memcg binding
2136
 * @page: the page
2137
 *
2138
 * This function protects unlocked LRU pages from being moved to
2139 2140 2141 2142 2143
 * another cgroup.
 *
 * It ensures lifetime of the returned memcg. Caller is responsible
 * for the lifetime of the page; __unlock_page_memcg() is available
 * when @page might get freed inside the locked section.
2144
 */
2145
struct mem_cgroup *lock_page_memcg(struct page *page)
2146
{
2147
	struct page *head = compound_head(page); /* rmap on tail pages */
2148
	struct mem_cgroup *memcg;
2149
	unsigned long flags;
2150

2151 2152 2153 2154
	/*
	 * The RCU lock is held throughout the transaction.  The fast
	 * path can get away without acquiring the memcg->move_lock
	 * because page moving starts with an RCU grace period.
2155 2156 2157 2158 2159 2160 2161
	 *
	 * The RCU lock also protects the memcg from being freed when
	 * the page state that is going to change is the only thing
	 * preventing the page itself from being freed. E.g. writeback
	 * doesn't hold a page reference and relies on PG_writeback to
	 * keep off truncation, migration and so forth.
         */
2162 2163 2164
	rcu_read_lock();

	if (mem_cgroup_disabled())
2165
		return NULL;
2166
again:
2167
	memcg = page_memcg(head);
2168
	if (unlikely(!memcg))
2169
		return NULL;
2170

2171 2172 2173 2174 2175 2176
#ifdef CONFIG_PROVE_LOCKING
	local_irq_save(flags);
	might_lock(&memcg->move_lock);
	local_irq_restore(flags);
#endif

Q
Qiang Huang 已提交
2177
	if (atomic_read(&memcg->moving_account) <= 0)
2178
		return memcg;
2179

2180
	spin_lock_irqsave(&memcg->move_lock, flags);
2181
	if (memcg != page_memcg(head)) {
2182
		spin_unlock_irqrestore(&memcg->move_lock, flags);
2183 2184
		goto again;
	}
2185 2186 2187 2188

	/*
	 * When charge migration first begins, we can have locked and
	 * unlocked page stat updates happening concurrently.  Track
2189
	 * the task who has the lock for unlock_page_memcg().
2190 2191 2192
	 */
	memcg->move_lock_task = current;
	memcg->move_lock_flags = flags;
2193

2194
	return memcg;
2195
}
2196
EXPORT_SYMBOL(lock_page_memcg);
2197

2198
/**
2199 2200 2201 2202
 * __unlock_page_memcg - unlock and unpin a memcg
 * @memcg: the memcg
 *
 * Unlock and unpin a memcg returned by lock_page_memcg().
2203
 */
2204
void __unlock_page_memcg(struct mem_cgroup *memcg)
2205
{
2206 2207 2208 2209 2210 2211 2212 2213
	if (memcg && memcg->move_lock_task == current) {
		unsigned long flags = memcg->move_lock_flags;

		memcg->move_lock_task = NULL;
		memcg->move_lock_flags = 0;

		spin_unlock_irqrestore(&memcg->move_lock, flags);
	}
2214

2215
	rcu_read_unlock();
2216
}
2217 2218

/**
2219
 * unlock_page_memcg - unlock a page and memcg binding
2220 2221 2222 2223
 * @page: the page
 */
void unlock_page_memcg(struct page *page)
{
2224 2225
	struct page *head = compound_head(page);

2226
	__unlock_page_memcg(page_memcg(head));
2227
}
2228
EXPORT_SYMBOL(unlock_page_memcg);
2229

2230 2231
struct memcg_stock_pcp {
	struct mem_cgroup *cached; /* this never be root cgroup */
2232
	unsigned int nr_pages;
R
Roman Gushchin 已提交
2233 2234 2235 2236 2237 2238

#ifdef CONFIG_MEMCG_KMEM
	struct obj_cgroup *cached_objcg;
	unsigned int nr_bytes;
#endif

2239
	struct work_struct work;
2240
	unsigned long flags;
2241
#define FLUSHING_CACHED_CHARGE	0
2242 2243
};
static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
2244
static DEFINE_MUTEX(percpu_charge_mutex);
2245

R
Roman Gushchin 已提交
2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261
#ifdef CONFIG_MEMCG_KMEM
static void drain_obj_stock(struct memcg_stock_pcp *stock);
static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
				     struct mem_cgroup *root_memcg);

#else
static inline void drain_obj_stock(struct memcg_stock_pcp *stock)
{
}
static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
				     struct mem_cgroup *root_memcg)
{
	return false;
}
#endif

2262 2263 2264 2265 2266 2267 2268 2269 2270 2271
/**
 * consume_stock: Try to consume stocked charge on this cpu.
 * @memcg: memcg to consume from.
 * @nr_pages: how many pages to charge.
 *
 * The charges will only happen if @memcg matches the current cpu's memcg
 * stock, and at least @nr_pages are available in that stock.  Failure to
 * service an allocation will refill the stock.
 *
 * returns true if successful, false otherwise.
2272
 */
2273
static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2274 2275
{
	struct memcg_stock_pcp *stock;
2276
	unsigned long flags;
2277
	bool ret = false;
2278

2279
	if (nr_pages > MEMCG_CHARGE_BATCH)
2280
		return ret;
2281

2282 2283 2284
	local_irq_save(flags);

	stock = this_cpu_ptr(&memcg_stock);
2285
	if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
2286
		stock->nr_pages -= nr_pages;
2287 2288
		ret = true;
	}
2289 2290 2291

	local_irq_restore(flags);

2292 2293 2294 2295
	return ret;
}

/*
2296
 * Returns stocks cached in percpu and reset cached information.
2297 2298 2299 2300 2301
 */
static void drain_stock(struct memcg_stock_pcp *stock)
{
	struct mem_cgroup *old = stock->cached;

2302 2303 2304
	if (!old)
		return;

2305
	if (stock->nr_pages) {
2306
		page_counter_uncharge(&old->memory, stock->nr_pages);
2307
		if (do_memsw_account())
2308
			page_counter_uncharge(&old->memsw, stock->nr_pages);
2309
		stock->nr_pages = 0;
2310
	}
2311 2312

	css_put(&old->css);
2313 2314 2315 2316 2317
	stock->cached = NULL;
}

static void drain_local_stock(struct work_struct *dummy)
{
2318 2319 2320
	struct memcg_stock_pcp *stock;
	unsigned long flags;

2321 2322 2323 2324
	/*
	 * The only protection from memory hotplug vs. drain_stock races is
	 * that we always operate on local CPU stock here with IRQ disabled
	 */
2325 2326 2327
	local_irq_save(flags);

	stock = this_cpu_ptr(&memcg_stock);
R
Roman Gushchin 已提交
2328
	drain_obj_stock(stock);
2329
	drain_stock(stock);
2330
	clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
2331 2332

	local_irq_restore(flags);
2333 2334 2335
}

/*
2336
 * Cache charges(val) to local per_cpu area.
2337
 * This will be consumed by consume_stock() function, later.
2338
 */
2339
static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2340
{
2341 2342 2343 2344
	struct memcg_stock_pcp *stock;
	unsigned long flags;

	local_irq_save(flags);
2345

2346
	stock = this_cpu_ptr(&memcg_stock);
2347
	if (stock->cached != memcg) { /* reset if necessary */
2348
		drain_stock(stock);
2349
		css_get(&memcg->css);
2350
		stock->cached = memcg;
2351
	}
2352
	stock->nr_pages += nr_pages;
2353

2354
	if (stock->nr_pages > MEMCG_CHARGE_BATCH)
2355 2356
		drain_stock(stock);

2357
	local_irq_restore(flags);
2358 2359 2360
}

/*
2361
 * Drains all per-CPU charge caches for given root_memcg resp. subtree
2362
 * of the hierarchy under it.
2363
 */
2364
static void drain_all_stock(struct mem_cgroup *root_memcg)
2365
{
2366
	int cpu, curcpu;
2367

2368 2369 2370
	/* If someone's already draining, avoid adding running more workers. */
	if (!mutex_trylock(&percpu_charge_mutex))
		return;
2371 2372 2373 2374 2375 2376
	/*
	 * Notify other cpus that system-wide "drain" is running
	 * We do not care about races with the cpu hotplug because cpu down
	 * as well as workers from this path always operate on the local
	 * per-cpu data. CPU up doesn't touch memcg_stock at all.
	 */
2377
	curcpu = get_cpu();
2378 2379
	for_each_online_cpu(cpu) {
		struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
2380
		struct mem_cgroup *memcg;
2381
		bool flush = false;
2382

2383
		rcu_read_lock();
2384
		memcg = stock->cached;
2385 2386 2387
		if (memcg && stock->nr_pages &&
		    mem_cgroup_is_descendant(memcg, root_memcg))
			flush = true;
R
Roman Gushchin 已提交
2388 2389
		if (obj_stock_flush_required(stock, root_memcg))
			flush = true;
2390 2391 2392 2393
		rcu_read_unlock();

		if (flush &&
		    !test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
2394 2395 2396 2397 2398
			if (cpu == curcpu)
				drain_local_stock(&stock->work);
			else
				schedule_work_on(cpu, &stock->work);
		}
2399
	}
2400
	put_cpu();
2401
	mutex_unlock(&percpu_charge_mutex);
2402 2403
}

2404
static int memcg_hotplug_cpu_dead(unsigned int cpu)
2405 2406
{
	struct memcg_stock_pcp *stock;
2407
	struct mem_cgroup *memcg, *mi;
2408 2409 2410

	stock = &per_cpu(memcg_stock, cpu);
	drain_stock(stock);
2411 2412 2413 2414 2415 2416 2417 2418

	for_each_mem_cgroup(memcg) {
		int i;

		for (i = 0; i < MEMCG_NR_STAT; i++) {
			int nid;
			long x;

2419
			x = this_cpu_xchg(memcg->vmstats_percpu->stat[i], 0);
2420
			if (x)
2421 2422
				for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
					atomic_long_add(x, &memcg->vmstats[i]);
2423 2424 2425 2426 2427 2428 2429 2430 2431

			if (i >= NR_VM_NODE_STAT_ITEMS)
				continue;

			for_each_node(nid) {
				struct mem_cgroup_per_node *pn;

				pn = mem_cgroup_nodeinfo(memcg, nid);
				x = this_cpu_xchg(pn->lruvec_stat_cpu->count[i], 0);
2432
				if (x)
2433 2434 2435
					do {
						atomic_long_add(x, &pn->lruvec_stat[i]);
					} while ((pn = parent_nodeinfo(pn, nid)));
2436 2437 2438
			}
		}

2439
		for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
2440 2441
			long x;

2442
			x = this_cpu_xchg(memcg->vmstats_percpu->events[i], 0);
2443
			if (x)
2444 2445
				for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
					atomic_long_add(x, &memcg->vmevents[i]);
2446 2447 2448
		}
	}

2449
	return 0;
2450 2451
}

2452 2453 2454
static unsigned long reclaim_high(struct mem_cgroup *memcg,
				  unsigned int nr_pages,
				  gfp_t gfp_mask)
2455
{
2456 2457
	unsigned long nr_reclaimed = 0;

2458
	do {
2459 2460
		unsigned long pflags;

2461 2462
		if (page_counter_read(&memcg->memory) <=
		    READ_ONCE(memcg->memory.high))
2463
			continue;
2464

2465
		memcg_memory_event(memcg, MEMCG_HIGH);
2466 2467

		psi_memstall_enter(&pflags);
2468 2469
		nr_reclaimed += try_to_free_mem_cgroup_pages(memcg, nr_pages,
							     gfp_mask, true);
2470
		psi_memstall_leave(&pflags);
2471 2472
	} while ((memcg = parent_mem_cgroup(memcg)) &&
		 !mem_cgroup_is_root(memcg));
2473 2474

	return nr_reclaimed;
2475 2476 2477 2478 2479 2480 2481
}

static void high_work_func(struct work_struct *work)
{
	struct mem_cgroup *memcg;

	memcg = container_of(work, struct mem_cgroup, high_work);
2482
	reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL);
2483 2484
}

2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498
/*
 * Clamp the maximum sleep time per allocation batch to 2 seconds. This is
 * enough to still cause a significant slowdown in most cases, while still
 * allowing diagnostics and tracing to proceed without becoming stuck.
 */
#define MEMCG_MAX_HIGH_DELAY_JIFFIES (2UL*HZ)

/*
 * When calculating the delay, we use these either side of the exponentiation to
 * maintain precision and scale to a reasonable number of jiffies (see the table
 * below.
 *
 * - MEMCG_DELAY_PRECISION_SHIFT: Extra precision bits while translating the
 *   overage ratio to a delay.
2499
 * - MEMCG_DELAY_SCALING_SHIFT: The number of bits to scale down the
2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537
 *   proposed penalty in order to reduce to a reasonable number of jiffies, and
 *   to produce a reasonable delay curve.
 *
 * MEMCG_DELAY_SCALING_SHIFT just happens to be a number that produces a
 * reasonable delay curve compared to precision-adjusted overage, not
 * penalising heavily at first, but still making sure that growth beyond the
 * limit penalises misbehaviour cgroups by slowing them down exponentially. For
 * example, with a high of 100 megabytes:
 *
 *  +-------+------------------------+
 *  | usage | time to allocate in ms |
 *  +-------+------------------------+
 *  | 100M  |                      0 |
 *  | 101M  |                      6 |
 *  | 102M  |                     25 |
 *  | 103M  |                     57 |
 *  | 104M  |                    102 |
 *  | 105M  |                    159 |
 *  | 106M  |                    230 |
 *  | 107M  |                    313 |
 *  | 108M  |                    409 |
 *  | 109M  |                    518 |
 *  | 110M  |                    639 |
 *  | 111M  |                    774 |
 *  | 112M  |                    921 |
 *  | 113M  |                   1081 |
 *  | 114M  |                   1254 |
 *  | 115M  |                   1439 |
 *  | 116M  |                   1638 |
 *  | 117M  |                   1849 |
 *  | 118M  |                   2000 |
 *  | 119M  |                   2000 |
 *  | 120M  |                   2000 |
 *  +-------+------------------------+
 */
 #define MEMCG_DELAY_PRECISION_SHIFT 20
 #define MEMCG_DELAY_SCALING_SHIFT 14

2538
static u64 calculate_overage(unsigned long usage, unsigned long high)
2539
{
2540
	u64 overage;
2541

2542 2543
	if (usage <= high)
		return 0;
2544

2545 2546 2547 2548 2549
	/*
	 * Prevent division by 0 in overage calculation by acting as if
	 * it was a threshold of 1 page
	 */
	high = max(high, 1UL);
2550

2551 2552 2553 2554
	overage = usage - high;
	overage <<= MEMCG_DELAY_PRECISION_SHIFT;
	return div64_u64(overage, high);
}
2555

2556 2557 2558
static u64 mem_find_max_overage(struct mem_cgroup *memcg)
{
	u64 overage, max_overage = 0;
2559

2560 2561
	do {
		overage = calculate_overage(page_counter_read(&memcg->memory),
2562
					    READ_ONCE(memcg->memory.high));
2563
		max_overage = max(overage, max_overage);
2564 2565 2566
	} while ((memcg = parent_mem_cgroup(memcg)) &&
		 !mem_cgroup_is_root(memcg));

2567 2568 2569
	return max_overage;
}

2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585
static u64 swap_find_max_overage(struct mem_cgroup *memcg)
{
	u64 overage, max_overage = 0;

	do {
		overage = calculate_overage(page_counter_read(&memcg->swap),
					    READ_ONCE(memcg->swap.high));
		if (overage)
			memcg_memory_event(memcg, MEMCG_SWAP_HIGH);
		max_overage = max(overage, max_overage);
	} while ((memcg = parent_mem_cgroup(memcg)) &&
		 !mem_cgroup_is_root(memcg));

	return max_overage;
}

2586 2587 2588 2589 2590 2591 2592 2593 2594 2595
/*
 * Get the number of jiffies that we should penalise a mischievous cgroup which
 * is exceeding its memory.high by checking both it and its ancestors.
 */
static unsigned long calculate_high_delay(struct mem_cgroup *memcg,
					  unsigned int nr_pages,
					  u64 max_overage)
{
	unsigned long penalty_jiffies;

2596 2597
	if (!max_overage)
		return 0;
2598 2599 2600 2601 2602 2603 2604 2605 2606

	/*
	 * We use overage compared to memory.high to calculate the number of
	 * jiffies to sleep (penalty_jiffies). Ideally this value should be
	 * fairly lenient on small overages, and increasingly harsh when the
	 * memcg in question makes it clear that it has no intention of stopping
	 * its crazy behaviour, so we exponentially increase the delay based on
	 * overage amount.
	 */
2607 2608 2609
	penalty_jiffies = max_overage * max_overage * HZ;
	penalty_jiffies >>= MEMCG_DELAY_PRECISION_SHIFT;
	penalty_jiffies >>= MEMCG_DELAY_SCALING_SHIFT;
2610 2611 2612 2613 2614 2615 2616 2617 2618

	/*
	 * Factor in the task's own contribution to the overage, such that four
	 * N-sized allocations are throttled approximately the same as one
	 * 4N-sized allocation.
	 *
	 * MEMCG_CHARGE_BATCH pages is nominal, so work out how much smaller or
	 * larger the current charge patch is than that.
	 */
2619
	return penalty_jiffies * nr_pages / MEMCG_CHARGE_BATCH;
2620 2621 2622 2623 2624 2625 2626 2627 2628 2629
}

/*
 * Scheduled by try_charge() to be executed from the userland return path
 * and reclaims memory over the high limit.
 */
void mem_cgroup_handle_over_high(void)
{
	unsigned long penalty_jiffies;
	unsigned long pflags;
2630
	unsigned long nr_reclaimed;
2631
	unsigned int nr_pages = current->memcg_nr_pages_over_high;
2632
	int nr_retries = MAX_RECLAIM_RETRIES;
2633
	struct mem_cgroup *memcg;
2634
	bool in_retry = false;
2635 2636 2637 2638 2639 2640 2641

	if (likely(!nr_pages))
		return;

	memcg = get_mem_cgroup_from_mm(current->mm);
	current->memcg_nr_pages_over_high = 0;

2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655
retry_reclaim:
	/*
	 * The allocating task should reclaim at least the batch size, but for
	 * subsequent retries we only want to do what's necessary to prevent oom
	 * or breaching resource isolation.
	 *
	 * This is distinct from memory.max or page allocator behaviour because
	 * memory.high is currently batched, whereas memory.max and the page
	 * allocator run every time an allocation is made.
	 */
	nr_reclaimed = reclaim_high(memcg,
				    in_retry ? SWAP_CLUSTER_MAX : nr_pages,
				    GFP_KERNEL);

2656 2657 2658 2659
	/*
	 * memory.high is breached and reclaim is unable to keep up. Throttle
	 * allocators proactively to slow down excessive growth.
	 */
2660 2661
	penalty_jiffies = calculate_high_delay(memcg, nr_pages,
					       mem_find_max_overage(memcg));
2662

2663 2664 2665
	penalty_jiffies += calculate_high_delay(memcg, nr_pages,
						swap_find_max_overage(memcg));

2666 2667 2668 2669 2670 2671 2672
	/*
	 * Clamp the max delay per usermode return so as to still keep the
	 * application moving forwards and also permit diagnostics, albeit
	 * extremely slowly.
	 */
	penalty_jiffies = min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES);

2673 2674 2675 2676 2677 2678 2679 2680 2681
	/*
	 * Don't sleep if the amount of jiffies this memcg owes us is so low
	 * that it's not even worth doing, in an attempt to be nice to those who
	 * go only a small amount over their memory.high value and maybe haven't
	 * been aggressively reclaimed enough yet.
	 */
	if (penalty_jiffies <= HZ / 100)
		goto out;

2682 2683 2684 2685 2686 2687 2688 2689 2690 2691
	/*
	 * If reclaim is making forward progress but we're still over
	 * memory.high, we want to encourage that rather than doing allocator
	 * throttling.
	 */
	if (nr_reclaimed || nr_retries--) {
		in_retry = true;
		goto retry_reclaim;
	}

2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702
	/*
	 * If we exit early, we're guaranteed to die (since
	 * schedule_timeout_killable sets TASK_KILLABLE). This means we don't
	 * need to account for any ill-begotten jiffies to pay them off later.
	 */
	psi_memstall_enter(&pflags);
	schedule_timeout_killable(penalty_jiffies);
	psi_memstall_leave(&pflags);

out:
	css_put(&memcg->css);
2703 2704
}

2705 2706
static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
		      unsigned int nr_pages)
2707
{
2708
	unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages);
2709
	int nr_retries = MAX_RECLAIM_RETRIES;
2710
	struct mem_cgroup *mem_over_limit;
2711
	struct page_counter *counter;
2712
	enum oom_status oom_status;
2713
	unsigned long nr_reclaimed;
2714 2715
	bool may_swap = true;
	bool drained = false;
2716
	unsigned long pflags;
2717

2718
	if (mem_cgroup_is_root(memcg))
2719
		return 0;
2720
retry:
2721
	if (consume_stock(memcg, nr_pages))
2722
		return 0;
2723

2724
	if (!do_memsw_account() ||
2725 2726
	    page_counter_try_charge(&memcg->memsw, batch, &counter)) {
		if (page_counter_try_charge(&memcg->memory, batch, &counter))
2727
			goto done_restock;
2728
		if (do_memsw_account())
2729 2730
			page_counter_uncharge(&memcg->memsw, batch);
		mem_over_limit = mem_cgroup_from_counter(counter, memory);
2731
	} else {
2732
		mem_over_limit = mem_cgroup_from_counter(counter, memsw);
2733
		may_swap = false;
2734
	}
2735

2736 2737 2738 2739
	if (batch > nr_pages) {
		batch = nr_pages;
		goto retry;
	}
2740

2741 2742 2743 2744 2745 2746 2747 2748 2749
	/*
	 * Memcg doesn't have a dedicated reserve for atomic
	 * allocations. But like the global atomic pool, we need to
	 * put the burden of reclaim on regular allocation requests
	 * and let these go through as privileged allocations.
	 */
	if (gfp_mask & __GFP_ATOMIC)
		goto force;

2750 2751 2752 2753 2754 2755
	/*
	 * Unlike in global OOM situations, memcg is not in a physical
	 * memory shortage.  Allow dying and OOM-killed tasks to
	 * bypass the last charges so that they can exit quickly and
	 * free their memory.
	 */
2756
	if (unlikely(should_force_charge()))
2757
		goto force;
2758

2759 2760 2761 2762 2763 2764 2765 2766 2767
	/*
	 * Prevent unbounded recursion when reclaim operations need to
	 * allocate memory. This might exceed the limits temporarily,
	 * but we prefer facilitating memory reclaim and getting back
	 * under the limit over triggering OOM kills in these cases.
	 */
	if (unlikely(current->flags & PF_MEMALLOC))
		goto force;

2768 2769 2770
	if (unlikely(task_in_memcg_oom(current)))
		goto nomem;

2771
	if (!gfpflags_allow_blocking(gfp_mask))
2772
		goto nomem;
2773

2774
	memcg_memory_event(mem_over_limit, MEMCG_MAX);
2775

2776
	psi_memstall_enter(&pflags);
2777 2778
	nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
						    gfp_mask, may_swap);
2779
	psi_memstall_leave(&pflags);
2780

2781
	if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
2782
		goto retry;
2783

2784
	if (!drained) {
2785
		drain_all_stock(mem_over_limit);
2786 2787 2788 2789
		drained = true;
		goto retry;
	}

2790 2791
	if (gfp_mask & __GFP_NORETRY)
		goto nomem;
2792 2793 2794 2795 2796 2797 2798 2799 2800
	/*
	 * Even though the limit is exceeded at this point, reclaim
	 * may have been able to free some pages.  Retry the charge
	 * before killing the task.
	 *
	 * Only for regular pages, though: huge pages are rather
	 * unlikely to succeed so close to the limit, and we fall back
	 * to regular pages anyway in case of failure.
	 */
2801
	if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
2802 2803 2804 2805 2806 2807 2808 2809
		goto retry;
	/*
	 * At task move, charge accounts can be doubly counted. So, it's
	 * better to wait until the end of task_move if something is going on.
	 */
	if (mem_cgroup_wait_acct_move(mem_over_limit))
		goto retry;

2810 2811 2812
	if (nr_retries--)
		goto retry;

2813
	if (gfp_mask & __GFP_RETRY_MAYFAIL)
2814 2815
		goto nomem;

2816
	if (gfp_mask & __GFP_NOFAIL)
2817
		goto force;
2818

2819
	if (fatal_signal_pending(current))
2820
		goto force;
2821

2822 2823 2824 2825 2826 2827
	/*
	 * keep retrying as long as the memcg oom killer is able to make
	 * a forward progress or bypass the charge if the oom killer
	 * couldn't make any progress.
	 */
	oom_status = mem_cgroup_oom(mem_over_limit, gfp_mask,
2828
		       get_order(nr_pages * PAGE_SIZE));
2829 2830
	switch (oom_status) {
	case OOM_SUCCESS:
2831
		nr_retries = MAX_RECLAIM_RETRIES;
2832 2833 2834 2835 2836 2837
		goto retry;
	case OOM_FAILED:
		goto force;
	default:
		goto nomem;
	}
2838
nomem:
2839
	if (!(gfp_mask & __GFP_NOFAIL))
2840
		return -ENOMEM;
2841 2842 2843 2844 2845 2846 2847
force:
	/*
	 * The allocation either can't fail or will lead to more memory
	 * being freed very soon.  Allow memory usage go over the limit
	 * temporarily by force charging it.
	 */
	page_counter_charge(&memcg->memory, nr_pages);
2848
	if (do_memsw_account())
2849 2850 2851
		page_counter_charge(&memcg->memsw, nr_pages);

	return 0;
2852 2853 2854 2855

done_restock:
	if (batch > nr_pages)
		refill_stock(memcg, batch - nr_pages);
2856

2857
	/*
2858 2859
	 * If the hierarchy is above the normal consumption range, schedule
	 * reclaim on returning to userland.  We can perform reclaim here
2860
	 * if __GFP_RECLAIM but let's always punt for simplicity and so that
2861 2862 2863 2864
	 * GFP_KERNEL can consistently be used during reclaim.  @memcg is
	 * not recorded as it most likely matches current's and won't
	 * change in the meantime.  As high limit is checked again before
	 * reclaim, the cost of mismatch is negligible.
2865 2866
	 */
	do {
2867 2868 2869 2870 2871 2872 2873 2874 2875 2876
		bool mem_high, swap_high;

		mem_high = page_counter_read(&memcg->memory) >
			READ_ONCE(memcg->memory.high);
		swap_high = page_counter_read(&memcg->swap) >
			READ_ONCE(memcg->swap.high);

		/* Don't bother a random interrupted task */
		if (in_interrupt()) {
			if (mem_high) {
2877 2878 2879
				schedule_work(&memcg->high_work);
				break;
			}
2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892
			continue;
		}

		if (mem_high || swap_high) {
			/*
			 * The allocating tasks in this cgroup will need to do
			 * reclaim or be throttled to prevent further growth
			 * of the memory or swap footprints.
			 *
			 * Target some best-effort fairness between the tasks,
			 * and distribute reclaim work and delay penalties
			 * based on how much each task is actually allocating.
			 */
V
Vladimir Davydov 已提交
2893
			current->memcg_nr_pages_over_high += batch;
2894 2895 2896
			set_notify_resume(current);
			break;
		}
2897
	} while ((memcg = parent_mem_cgroup(memcg)));
2898 2899

	return 0;
2900
}
2901

2902
#if defined(CONFIG_MEMCG_KMEM) || defined(CONFIG_MMU)
2903
static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
2904
{
2905 2906 2907
	if (mem_cgroup_is_root(memcg))
		return;

2908
	page_counter_uncharge(&memcg->memory, nr_pages);
2909
	if (do_memsw_account())
2910
		page_counter_uncharge(&memcg->memsw, nr_pages);
2911
}
2912
#endif
2913

2914
static void commit_charge(struct page *page, struct mem_cgroup *memcg)
2915
{
2916
	VM_BUG_ON_PAGE(page_memcg(page), page);
2917
	/*
2918
	 * Any of the following ensures page's memcg stability:
2919
	 *
2920 2921 2922 2923
	 * - the page lock
	 * - LRU isolation
	 * - lock_page_memcg()
	 * - exclusive reference
2924
	 */
2925
	page->memcg_data = (unsigned long)memcg;
2926
}
2927

2928
#ifdef CONFIG_MEMCG_KMEM
2929
int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s,
2930
				 gfp_t gfp, bool new_page)
2931 2932
{
	unsigned int objects = objs_per_slab_page(s, page);
2933
	unsigned long memcg_data;
2934 2935 2936 2937 2938 2939 2940
	void *vec;

	vec = kcalloc_node(objects, sizeof(struct obj_cgroup *), gfp,
			   page_to_nid(page));
	if (!vec)
		return -ENOMEM;

2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954
	memcg_data = (unsigned long) vec | MEMCG_DATA_OBJCGS;
	if (new_page) {
		/*
		 * If the slab page is brand new and nobody can yet access
		 * it's memcg_data, no synchronization is required and
		 * memcg_data can be simply assigned.
		 */
		page->memcg_data = memcg_data;
	} else if (cmpxchg(&page->memcg_data, 0, memcg_data)) {
		/*
		 * If the slab page is already in use, somebody can allocate
		 * and assign obj_cgroups in parallel. In this case the existing
		 * objcg vector should be reused.
		 */
2955
		kfree(vec);
2956 2957
		return 0;
	}
2958

2959
	kmemleak_not_leak(vec);
2960 2961 2962
	return 0;
}

2963 2964 2965
/*
 * Returns a pointer to the memory cgroup to which the kernel object is charged.
 *
2966 2967 2968 2969 2970 2971
 * A passed kernel object can be a slab object or a generic kernel page, so
 * different mechanisms for getting the memory cgroup pointer should be used.
 * In certain cases (e.g. kernel stacks or large kmallocs with SLUB) the caller
 * can not know for sure how the kernel object is implemented.
 * mem_cgroup_from_obj() can be safely used in such cases.
 *
2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984
 * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(),
 * cgroup_mutex, etc.
 */
struct mem_cgroup *mem_cgroup_from_obj(void *p)
{
	struct page *page;

	if (mem_cgroup_disabled())
		return NULL;

	page = virt_to_head_page(p);

	/*
2985 2986 2987
	 * Slab objects are accounted individually, not per-page.
	 * Memcg membership data for each individual object is saved in
	 * the page->obj_cgroups.
2988
	 */
2989
	if (page_objcgs_check(page)) {
2990 2991 2992 2993
		struct obj_cgroup *objcg;
		unsigned int off;

		off = obj_to_index(page->slab_cache, page, p);
2994
		objcg = page_objcgs(page)[off];
2995 2996 2997 2998
		if (objcg)
			return obj_cgroup_memcg(objcg);

		return NULL;
2999
	}
3000

3001 3002 3003 3004 3005 3006 3007 3008
	/*
	 * page_memcg_check() is used here, because page_has_obj_cgroups()
	 * check above could fail because the object cgroups vector wasn't set
	 * at that moment, but it can be set concurrently.
	 * page_memcg_check(page) will guarantee that a proper memory
	 * cgroup pointer or NULL will be returned.
	 */
	return page_memcg_check(page);
3009 3010
}

R
Roman Gushchin 已提交
3011 3012 3013 3014 3015
__always_inline struct obj_cgroup *get_obj_cgroup_from_current(void)
{
	struct obj_cgroup *objcg = NULL;
	struct mem_cgroup *memcg;

3016 3017 3018
	if (memcg_kmem_bypass())
		return NULL;

R
Roman Gushchin 已提交
3019
	rcu_read_lock();
3020 3021
	if (unlikely(active_memcg()))
		memcg = active_memcg();
R
Roman Gushchin 已提交
3022 3023 3024 3025 3026 3027 3028
	else
		memcg = mem_cgroup_from_task(current);

	for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) {
		objcg = rcu_dereference(memcg->objcg);
		if (objcg && obj_cgroup_tryget(objcg))
			break;
3029
		objcg = NULL;
R
Roman Gushchin 已提交
3030 3031 3032 3033 3034 3035
	}
	rcu_read_unlock();

	return objcg;
}

3036
static int memcg_alloc_cache_id(void)
3037
{
3038 3039 3040
	int id, size;
	int err;

3041
	id = ida_simple_get(&memcg_cache_ida,
3042 3043 3044
			    0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL);
	if (id < 0)
		return id;
3045

3046
	if (id < memcg_nr_cache_ids)
3047 3048 3049 3050 3051 3052
		return id;

	/*
	 * There's no space for the new id in memcg_caches arrays,
	 * so we have to grow them.
	 */
3053
	down_write(&memcg_cache_ids_sem);
3054 3055

	size = 2 * (id + 1);
3056 3057 3058 3059 3060
	if (size < MEMCG_CACHES_MIN_SIZE)
		size = MEMCG_CACHES_MIN_SIZE;
	else if (size > MEMCG_CACHES_MAX_SIZE)
		size = MEMCG_CACHES_MAX_SIZE;

3061
	err = memcg_update_all_list_lrus(size);
3062 3063 3064 3065 3066
	if (!err)
		memcg_nr_cache_ids = size;

	up_write(&memcg_cache_ids_sem);

3067
	if (err) {
3068
		ida_simple_remove(&memcg_cache_ida, id);
3069 3070 3071 3072 3073 3074 3075
		return err;
	}
	return id;
}

static void memcg_free_cache_id(int id)
{
3076
	ida_simple_remove(&memcg_cache_ida, id);
3077 3078
}

3079
/**
3080
 * __memcg_kmem_charge: charge a number of kernel pages to a memcg
3081
 * @memcg: memory cgroup to charge
3082
 * @gfp: reclaim mode
3083
 * @nr_pages: number of pages to charge
3084 3085 3086
 *
 * Returns 0 on success, an error code on failure.
 */
3087 3088
int __memcg_kmem_charge(struct mem_cgroup *memcg, gfp_t gfp,
			unsigned int nr_pages)
3089
{
3090
	struct page_counter *counter;
3091 3092
	int ret;

3093
	ret = try_charge(memcg, gfp, nr_pages);
3094
	if (ret)
3095
		return ret;
3096 3097 3098

	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
	    !page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) {
3099 3100 3101 3102 3103 3104 3105 3106 3107 3108

		/*
		 * Enforce __GFP_NOFAIL allocation because callers are not
		 * prepared to see failures and likely do not have any failure
		 * handling code.
		 */
		if (gfp & __GFP_NOFAIL) {
			page_counter_charge(&memcg->kmem, nr_pages);
			return 0;
		}
3109 3110
		cancel_charge(memcg, nr_pages);
		return -ENOMEM;
3111
	}
3112
	return 0;
3113 3114
}

3115 3116 3117 3118 3119 3120 3121 3122 3123 3124
/**
 * __memcg_kmem_uncharge: uncharge a number of kernel pages from a memcg
 * @memcg: memcg to uncharge
 * @nr_pages: number of pages to uncharge
 */
void __memcg_kmem_uncharge(struct mem_cgroup *memcg, unsigned int nr_pages)
{
	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
		page_counter_uncharge(&memcg->kmem, nr_pages);

3125
	refill_stock(memcg, nr_pages);
3126 3127
}

3128
/**
3129
 * __memcg_kmem_charge_page: charge a kmem page to the current memory cgroup
3130 3131 3132 3133 3134 3135
 * @page: page to charge
 * @gfp: reclaim mode
 * @order: allocation order
 *
 * Returns 0 on success, an error code on failure.
 */
3136
int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order)
3137
{
3138
	struct mem_cgroup *memcg;
3139
	int ret = 0;
3140

3141
	memcg = get_mem_cgroup_from_current();
3142
	if (memcg && !mem_cgroup_is_root(memcg)) {
3143
		ret = __memcg_kmem_charge(memcg, gfp, 1 << order);
3144
		if (!ret) {
3145 3146
			page->memcg_data = (unsigned long)memcg |
				MEMCG_DATA_KMEM;
3147
			return 0;
3148
		}
3149
		css_put(&memcg->css);
3150
	}
3151
	return ret;
3152
}
3153

3154
/**
3155
 * __memcg_kmem_uncharge_page: uncharge a kmem page
3156 3157 3158
 * @page: page to uncharge
 * @order: allocation order
 */
3159
void __memcg_kmem_uncharge_page(struct page *page, int order)
3160
{
3161
	struct mem_cgroup *memcg = page_memcg(page);
3162
	unsigned int nr_pages = 1 << order;
3163 3164 3165 3166

	if (!memcg)
		return;

3167
	VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page);
3168
	__memcg_kmem_uncharge(memcg, nr_pages);
3169
	page->memcg_data = 0;
3170
	css_put(&memcg->css);
3171
}
R
Roman Gushchin 已提交
3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282

static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
{
	struct memcg_stock_pcp *stock;
	unsigned long flags;
	bool ret = false;

	local_irq_save(flags);

	stock = this_cpu_ptr(&memcg_stock);
	if (objcg == stock->cached_objcg && stock->nr_bytes >= nr_bytes) {
		stock->nr_bytes -= nr_bytes;
		ret = true;
	}

	local_irq_restore(flags);

	return ret;
}

static void drain_obj_stock(struct memcg_stock_pcp *stock)
{
	struct obj_cgroup *old = stock->cached_objcg;

	if (!old)
		return;

	if (stock->nr_bytes) {
		unsigned int nr_pages = stock->nr_bytes >> PAGE_SHIFT;
		unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1);

		if (nr_pages) {
			rcu_read_lock();
			__memcg_kmem_uncharge(obj_cgroup_memcg(old), nr_pages);
			rcu_read_unlock();
		}

		/*
		 * The leftover is flushed to the centralized per-memcg value.
		 * On the next attempt to refill obj stock it will be moved
		 * to a per-cpu stock (probably, on an other CPU), see
		 * refill_obj_stock().
		 *
		 * How often it's flushed is a trade-off between the memory
		 * limit enforcement accuracy and potential CPU contention,
		 * so it might be changed in the future.
		 */
		atomic_add(nr_bytes, &old->nr_charged_bytes);
		stock->nr_bytes = 0;
	}

	obj_cgroup_put(old);
	stock->cached_objcg = NULL;
}

static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
				     struct mem_cgroup *root_memcg)
{
	struct mem_cgroup *memcg;

	if (stock->cached_objcg) {
		memcg = obj_cgroup_memcg(stock->cached_objcg);
		if (memcg && mem_cgroup_is_descendant(memcg, root_memcg))
			return true;
	}

	return false;
}

static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
{
	struct memcg_stock_pcp *stock;
	unsigned long flags;

	local_irq_save(flags);

	stock = this_cpu_ptr(&memcg_stock);
	if (stock->cached_objcg != objcg) { /* reset if necessary */
		drain_obj_stock(stock);
		obj_cgroup_get(objcg);
		stock->cached_objcg = objcg;
		stock->nr_bytes = atomic_xchg(&objcg->nr_charged_bytes, 0);
	}
	stock->nr_bytes += nr_bytes;

	if (stock->nr_bytes > PAGE_SIZE)
		drain_obj_stock(stock);

	local_irq_restore(flags);
}

int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size)
{
	struct mem_cgroup *memcg;
	unsigned int nr_pages, nr_bytes;
	int ret;

	if (consume_obj_stock(objcg, size))
		return 0;

	/*
	 * In theory, memcg->nr_charged_bytes can have enough
	 * pre-charged bytes to satisfy the allocation. However,
	 * flushing memcg->nr_charged_bytes requires two atomic
	 * operations, and memcg->nr_charged_bytes can't be big,
	 * so it's better to ignore it and try grab some new pages.
	 * memcg->nr_charged_bytes will be flushed in
	 * refill_obj_stock(), called from this function or
	 * independently later.
	 */
	rcu_read_lock();
3283
retry:
R
Roman Gushchin 已提交
3284
	memcg = obj_cgroup_memcg(objcg);
3285 3286
	if (unlikely(!css_tryget(&memcg->css)))
		goto retry;
R
Roman Gushchin 已提交
3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307
	rcu_read_unlock();

	nr_pages = size >> PAGE_SHIFT;
	nr_bytes = size & (PAGE_SIZE - 1);

	if (nr_bytes)
		nr_pages += 1;

	ret = __memcg_kmem_charge(memcg, gfp, nr_pages);
	if (!ret && nr_bytes)
		refill_obj_stock(objcg, PAGE_SIZE - nr_bytes);

	css_put(&memcg->css);
	return ret;
}

void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size)
{
	refill_obj_stock(objcg, size);
}

3308
#endif /* CONFIG_MEMCG_KMEM */
3309

3310 3311
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/*
3312
 * Because page_memcg(head) is not set on compound tails, set it now.
3313
 */
3314
void mem_cgroup_split_huge_fixup(struct page *head)
3315
{
3316
	struct mem_cgroup *memcg = page_memcg(head);
3317
	int i;
3318

3319 3320
	if (mem_cgroup_disabled())
		return;
3321

3322 3323
	for (i = 1; i < HPAGE_PMD_NR; i++) {
		css_get(&memcg->css);
3324
		head[i].memcg_data = (unsigned long)memcg;
3325
	}
3326
}
3327
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
3328

A
Andrew Morton 已提交
3329
#ifdef CONFIG_MEMCG_SWAP
3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340
/**
 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
 * @entry: swap entry to be moved
 * @from:  mem_cgroup which the entry is moved from
 * @to:  mem_cgroup which the entry is moved to
 *
 * It succeeds only when the swap_cgroup's record for this entry is the same
 * as the mem_cgroup's id of @from.
 *
 * Returns 0 on success, -EINVAL on failure.
 *
3341
 * The caller must have charged to @to, IOW, called page_counter_charge() about
3342 3343 3344
 * both res and memsw, and called css_get().
 */
static int mem_cgroup_move_swap_account(swp_entry_t entry,
3345
				struct mem_cgroup *from, struct mem_cgroup *to)
3346 3347 3348
{
	unsigned short old_id, new_id;

L
Li Zefan 已提交
3349 3350
	old_id = mem_cgroup_id(from);
	new_id = mem_cgroup_id(to);
3351 3352

	if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
3353 3354
		mod_memcg_state(from, MEMCG_SWAP, -1);
		mod_memcg_state(to, MEMCG_SWAP, 1);
3355 3356 3357 3358 3359 3360
		return 0;
	}
	return -EINVAL;
}
#else
static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
3361
				struct mem_cgroup *from, struct mem_cgroup *to)
3362 3363 3364
{
	return -EINVAL;
}
3365
#endif
K
KAMEZAWA Hiroyuki 已提交
3366

3367
static DEFINE_MUTEX(memcg_max_mutex);
3368

3369 3370
static int mem_cgroup_resize_max(struct mem_cgroup *memcg,
				 unsigned long max, bool memsw)
3371
{
3372
	bool enlarge = false;
3373
	bool drained = false;
3374
	int ret;
3375 3376
	bool limits_invariant;
	struct page_counter *counter = memsw ? &memcg->memsw : &memcg->memory;
3377

3378
	do {
3379 3380 3381 3382
		if (signal_pending(current)) {
			ret = -EINTR;
			break;
		}
3383

3384
		mutex_lock(&memcg_max_mutex);
3385 3386
		/*
		 * Make sure that the new limit (memsw or memory limit) doesn't
3387
		 * break our basic invariant rule memory.max <= memsw.max.
3388
		 */
3389
		limits_invariant = memsw ? max >= READ_ONCE(memcg->memory.max) :
3390
					   max <= memcg->memsw.max;
3391
		if (!limits_invariant) {
3392
			mutex_unlock(&memcg_max_mutex);
3393 3394 3395
			ret = -EINVAL;
			break;
		}
3396
		if (max > counter->max)
3397
			enlarge = true;
3398 3399
		ret = page_counter_set_max(counter, max);
		mutex_unlock(&memcg_max_mutex);
3400 3401 3402 3403

		if (!ret)
			break;

3404 3405 3406 3407 3408 3409
		if (!drained) {
			drain_all_stock(memcg);
			drained = true;
			continue;
		}

3410 3411 3412 3413 3414 3415
		if (!try_to_free_mem_cgroup_pages(memcg, 1,
					GFP_KERNEL, !memsw)) {
			ret = -EBUSY;
			break;
		}
	} while (true);
3416

3417 3418
	if (!ret && enlarge)
		memcg_oom_recover(memcg);
3419

3420 3421 3422
	return ret;
}

3423
unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
3424 3425 3426 3427
					    gfp_t gfp_mask,
					    unsigned long *total_scanned)
{
	unsigned long nr_reclaimed = 0;
3428
	struct mem_cgroup_per_node *mz, *next_mz = NULL;
3429 3430
	unsigned long reclaimed;
	int loop = 0;
3431
	struct mem_cgroup_tree_per_node *mctz;
3432
	unsigned long excess;
3433 3434 3435 3436 3437
	unsigned long nr_scanned;

	if (order > 0)
		return 0;

3438
	mctz = soft_limit_tree_node(pgdat->node_id);
3439 3440 3441 3442 3443 3444

	/*
	 * Do not even bother to check the largest node if the root
	 * is empty. Do it lockless to prevent lock bouncing. Races
	 * are acceptable as soft limit is best effort anyway.
	 */
3445
	if (!mctz || RB_EMPTY_ROOT(&mctz->rb_root))
3446 3447
		return 0;

3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461
	/*
	 * This loop can run a while, specially if mem_cgroup's continuously
	 * keep exceeding their soft limit and putting the system under
	 * pressure
	 */
	do {
		if (next_mz)
			mz = next_mz;
		else
			mz = mem_cgroup_largest_soft_limit_node(mctz);
		if (!mz)
			break;

		nr_scanned = 0;
3462
		reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat,
3463 3464 3465
						    gfp_mask, &nr_scanned);
		nr_reclaimed += reclaimed;
		*total_scanned += nr_scanned;
3466
		spin_lock_irq(&mctz->lock);
3467
		__mem_cgroup_remove_exceeded(mz, mctz);
3468 3469 3470 3471 3472 3473

		/*
		 * If we failed to reclaim anything from this memory cgroup
		 * it is time to move on to the next cgroup
		 */
		next_mz = NULL;
3474 3475 3476
		if (!reclaimed)
			next_mz = __mem_cgroup_largest_soft_limit_node(mctz);

3477
		excess = soft_limit_excess(mz->memcg);
3478 3479 3480 3481 3482 3483 3484 3485 3486
		/*
		 * One school of thought says that we should not add
		 * back the node to the tree if reclaim returns 0.
		 * But our reclaim could return 0, simply because due
		 * to priority we are exposing a smaller subset of
		 * memory to reclaim from. Consider this as a longer
		 * term TODO.
		 */
		/* If excess == 0, no tree ops */
3487
		__mem_cgroup_insert_exceeded(mz, mctz, excess);
3488
		spin_unlock_irq(&mctz->lock);
3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505
		css_put(&mz->memcg->css);
		loop++;
		/*
		 * Could not reclaim anything and there are no more
		 * mem cgroups to try or we seem to be looping without
		 * reclaiming anything.
		 */
		if (!nr_reclaimed &&
			(next_mz == NULL ||
			loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
			break;
	} while (!nr_reclaimed);
	if (next_mz)
		css_put(&next_mz->memcg->css);
	return nr_reclaimed;
}

3506
/*
3507
 * Reclaims as many pages from the given memcg as possible.
3508 3509 3510 3511 3512
 *
 * Caller is responsible for holding css reference for memcg.
 */
static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
{
3513
	int nr_retries = MAX_RECLAIM_RETRIES;
3514

3515 3516
	/* we call try-to-free pages for make this cgroup empty */
	lru_add_drain_all();
3517 3518 3519

	drain_all_stock(memcg);

3520
	/* try to free all pages in this cgroup */
3521
	while (nr_retries && page_counter_read(&memcg->memory)) {
3522
		int progress;
3523

3524 3525 3526
		if (signal_pending(current))
			return -EINTR;

3527 3528
		progress = try_to_free_mem_cgroup_pages(memcg, 1,
							GFP_KERNEL, true);
3529
		if (!progress) {
3530
			nr_retries--;
3531
			/* maybe some writeback is necessary */
3532
			congestion_wait(BLK_RW_ASYNC, HZ/10);
3533
		}
3534 3535

	}
3536 3537

	return 0;
3538 3539
}

3540 3541 3542
static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of,
					    char *buf, size_t nbytes,
					    loff_t off)
3543
{
3544
	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3545

3546 3547
	if (mem_cgroup_is_root(memcg))
		return -EINVAL;
3548
	return mem_cgroup_force_empty(memcg) ?: nbytes;
3549 3550
}

3551 3552
static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
				     struct cftype *cft)
3553
{
3554
	return 1;
3555 3556
}

3557 3558
static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
				      struct cftype *cft, u64 val)
3559
{
3560
	if (val == 1)
3561
		return 0;
3562

3563 3564 3565
	pr_warn_once("Non-hierarchical mode is deprecated. "
		     "Please report your usecase to linux-mm@kvack.org if you "
		     "depend on this functionality.\n");
3566

3567
	return -EINVAL;
3568 3569
}

3570
static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
3571
{
3572
	unsigned long val;
3573

3574
	if (mem_cgroup_is_root(memcg)) {
3575
		val = memcg_page_state(memcg, NR_FILE_PAGES) +
3576
			memcg_page_state(memcg, NR_ANON_MAPPED);
3577 3578
		if (swap)
			val += memcg_page_state(memcg, MEMCG_SWAP);
3579
	} else {
3580
		if (!swap)
3581
			val = page_counter_read(&memcg->memory);
3582
		else
3583
			val = page_counter_read(&memcg->memsw);
3584
	}
3585
	return val;
3586 3587
}

3588 3589 3590 3591 3592 3593 3594
enum {
	RES_USAGE,
	RES_LIMIT,
	RES_MAX_USAGE,
	RES_FAILCNT,
	RES_SOFT_LIMIT,
};
3595

3596
static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
3597
			       struct cftype *cft)
B
Balbir Singh 已提交
3598
{
3599
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3600
	struct page_counter *counter;
3601

3602
	switch (MEMFILE_TYPE(cft->private)) {
3603
	case _MEM:
3604 3605
		counter = &memcg->memory;
		break;
3606
	case _MEMSWAP:
3607 3608
		counter = &memcg->memsw;
		break;
3609
	case _KMEM:
3610
		counter = &memcg->kmem;
3611
		break;
V
Vladimir Davydov 已提交
3612
	case _TCP:
3613
		counter = &memcg->tcpmem;
V
Vladimir Davydov 已提交
3614
		break;
3615 3616 3617
	default:
		BUG();
	}
3618 3619 3620 3621

	switch (MEMFILE_ATTR(cft->private)) {
	case RES_USAGE:
		if (counter == &memcg->memory)
3622
			return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE;
3623
		if (counter == &memcg->memsw)
3624
			return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE;
3625 3626
		return (u64)page_counter_read(counter) * PAGE_SIZE;
	case RES_LIMIT:
3627
		return (u64)counter->max * PAGE_SIZE;
3628 3629 3630 3631 3632 3633 3634 3635 3636
	case RES_MAX_USAGE:
		return (u64)counter->watermark * PAGE_SIZE;
	case RES_FAILCNT:
		return counter->failcnt;
	case RES_SOFT_LIMIT:
		return (u64)memcg->soft_limit * PAGE_SIZE;
	default:
		BUG();
	}
B
Balbir Singh 已提交
3637
}
3638

3639
static void memcg_flush_percpu_vmstats(struct mem_cgroup *memcg)
3640
{
3641
	unsigned long stat[MEMCG_NR_STAT] = {0};
3642 3643 3644 3645
	struct mem_cgroup *mi;
	int node, cpu, i;

	for_each_online_cpu(cpu)
3646
		for (i = 0; i < MEMCG_NR_STAT; i++)
3647
			stat[i] += per_cpu(memcg->vmstats_percpu->stat[i], cpu);
3648 3649

	for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
3650
		for (i = 0; i < MEMCG_NR_STAT; i++)
3651 3652 3653 3654 3655 3656
			atomic_long_add(stat[i], &mi->vmstats[i]);

	for_each_node(node) {
		struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
		struct mem_cgroup_per_node *pi;

3657
		for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
3658 3659 3660
			stat[i] = 0;

		for_each_online_cpu(cpu)
3661
			for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
3662 3663
				stat[i] += per_cpu(
					pn->lruvec_stat_cpu->count[i], cpu);
3664 3665

		for (pi = pn; pi; pi = parent_nodeinfo(pi, node))
3666
			for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
3667 3668 3669 3670
				atomic_long_add(stat[i], &pi->lruvec_stat[i]);
	}
}

3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681
static void memcg_flush_percpu_vmevents(struct mem_cgroup *memcg)
{
	unsigned long events[NR_VM_EVENT_ITEMS];
	struct mem_cgroup *mi;
	int cpu, i;

	for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
		events[i] = 0;

	for_each_online_cpu(cpu)
		for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
3682 3683
			events[i] += per_cpu(memcg->vmstats_percpu->events[i],
					     cpu);
3684 3685 3686 3687 3688 3689

	for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
		for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
			atomic_long_add(events[i], &mi->vmevents[i]);
}

3690
#ifdef CONFIG_MEMCG_KMEM
3691
static int memcg_online_kmem(struct mem_cgroup *memcg)
3692
{
R
Roman Gushchin 已提交
3693
	struct obj_cgroup *objcg;
3694 3695
	int memcg_id;

3696 3697 3698
	if (cgroup_memory_nokmem)
		return 0;

3699
	BUG_ON(memcg->kmemcg_id >= 0);
3700
	BUG_ON(memcg->kmem_state);
3701

3702
	memcg_id = memcg_alloc_cache_id();
3703 3704
	if (memcg_id < 0)
		return memcg_id;
3705

R
Roman Gushchin 已提交
3706 3707 3708 3709 3710 3711 3712 3713
	objcg = obj_cgroup_alloc();
	if (!objcg) {
		memcg_free_cache_id(memcg_id);
		return -ENOMEM;
	}
	objcg->memcg = memcg;
	rcu_assign_pointer(memcg->objcg, objcg);

3714 3715
	static_branch_enable(&memcg_kmem_enabled_key);

V
Vladimir Davydov 已提交
3716
	memcg->kmemcg_id = memcg_id;
3717
	memcg->kmem_state = KMEM_ONLINE;
3718 3719

	return 0;
3720 3721
}

3722 3723 3724 3725 3726 3727 3728 3729
static void memcg_offline_kmem(struct mem_cgroup *memcg)
{
	struct cgroup_subsys_state *css;
	struct mem_cgroup *parent, *child;
	int kmemcg_id;

	if (memcg->kmem_state != KMEM_ONLINE)
		return;
3730

3731 3732 3733 3734 3735 3736
	memcg->kmem_state = KMEM_ALLOCATED;

	parent = parent_mem_cgroup(memcg);
	if (!parent)
		parent = root_mem_cgroup;

R
Roman Gushchin 已提交
3737
	memcg_reparent_objcgs(memcg, parent);
3738 3739 3740 3741

	kmemcg_id = memcg->kmemcg_id;
	BUG_ON(kmemcg_id < 0);

3742 3743 3744 3745 3746 3747 3748 3749
	/*
	 * Change kmemcg_id of this cgroup and all its descendants to the
	 * parent's id, and then move all entries from this cgroup's list_lrus
	 * to ones of the parent. After we have finished, all list_lrus
	 * corresponding to this cgroup are guaranteed to remain empty. The
	 * ordering is imposed by list_lru_node->lock taken by
	 * memcg_drain_all_list_lrus().
	 */
3750
	rcu_read_lock(); /* can be called from css_free w/o cgroup_mutex */
3751 3752 3753 3754 3755
	css_for_each_descendant_pre(css, &memcg->css) {
		child = mem_cgroup_from_css(css);
		BUG_ON(child->kmemcg_id != kmemcg_id);
		child->kmemcg_id = parent->kmemcg_id;
	}
3756 3757
	rcu_read_unlock();

3758
	memcg_drain_all_list_lrus(kmemcg_id, parent);
3759 3760 3761 3762 3763 3764

	memcg_free_cache_id(kmemcg_id);
}

static void memcg_free_kmem(struct mem_cgroup *memcg)
{
3765 3766 3767
	/* css_alloc() failed, offlining didn't happen */
	if (unlikely(memcg->kmem_state == KMEM_ONLINE))
		memcg_offline_kmem(memcg);
3768
}
3769
#else
3770
static int memcg_online_kmem(struct mem_cgroup *memcg)
3771 3772 3773 3774 3775 3776 3777 3778 3779
{
	return 0;
}
static void memcg_offline_kmem(struct mem_cgroup *memcg)
{
}
static void memcg_free_kmem(struct mem_cgroup *memcg)
{
}
3780
#endif /* CONFIG_MEMCG_KMEM */
3781

3782 3783
static int memcg_update_kmem_max(struct mem_cgroup *memcg,
				 unsigned long max)
3784
{
3785
	int ret;
3786

3787 3788 3789
	mutex_lock(&memcg_max_mutex);
	ret = page_counter_set_max(&memcg->kmem, max);
	mutex_unlock(&memcg_max_mutex);
3790
	return ret;
3791
}
3792

3793
static int memcg_update_tcp_max(struct mem_cgroup *memcg, unsigned long max)
V
Vladimir Davydov 已提交
3794 3795 3796
{
	int ret;

3797
	mutex_lock(&memcg_max_mutex);
V
Vladimir Davydov 已提交
3798

3799
	ret = page_counter_set_max(&memcg->tcpmem, max);
V
Vladimir Davydov 已提交
3800 3801 3802
	if (ret)
		goto out;

3803
	if (!memcg->tcpmem_active) {
V
Vladimir Davydov 已提交
3804 3805 3806
		/*
		 * The active flag needs to be written after the static_key
		 * update. This is what guarantees that the socket activation
3807 3808 3809
		 * function is the last one to run. See mem_cgroup_sk_alloc()
		 * for details, and note that we don't mark any socket as
		 * belonging to this memcg until that flag is up.
V
Vladimir Davydov 已提交
3810 3811 3812 3813 3814 3815
		 *
		 * We need to do this, because static_keys will span multiple
		 * sites, but we can't control their order. If we mark a socket
		 * as accounted, but the accounting functions are not patched in
		 * yet, we'll lose accounting.
		 *
3816
		 * We never race with the readers in mem_cgroup_sk_alloc(),
V
Vladimir Davydov 已提交
3817 3818 3819 3820
		 * because when this value change, the code to process it is not
		 * patched in yet.
		 */
		static_branch_inc(&memcg_sockets_enabled_key);
3821
		memcg->tcpmem_active = true;
V
Vladimir Davydov 已提交
3822 3823
	}
out:
3824
	mutex_unlock(&memcg_max_mutex);
V
Vladimir Davydov 已提交
3825 3826 3827
	return ret;
}

3828 3829 3830 3831
/*
 * The user of this function is...
 * RES_LIMIT.
 */
3832 3833
static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
				char *buf, size_t nbytes, loff_t off)
B
Balbir Singh 已提交
3834
{
3835
	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3836
	unsigned long nr_pages;
3837 3838
	int ret;

3839
	buf = strstrip(buf);
3840
	ret = page_counter_memparse(buf, "-1", &nr_pages);
3841 3842
	if (ret)
		return ret;
3843

3844
	switch (MEMFILE_ATTR(of_cft(of)->private)) {
3845
	case RES_LIMIT:
3846 3847 3848 3849
		if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
			ret = -EINVAL;
			break;
		}
3850 3851
		switch (MEMFILE_TYPE(of_cft(of)->private)) {
		case _MEM:
3852
			ret = mem_cgroup_resize_max(memcg, nr_pages, false);
3853
			break;
3854
		case _MEMSWAP:
3855
			ret = mem_cgroup_resize_max(memcg, nr_pages, true);
3856
			break;
3857
		case _KMEM:
3858 3859 3860
			pr_warn_once("kmem.limit_in_bytes is deprecated and will be removed. "
				     "Please report your usecase to linux-mm@kvack.org if you "
				     "depend on this functionality.\n");
3861
			ret = memcg_update_kmem_max(memcg, nr_pages);
3862
			break;
V
Vladimir Davydov 已提交
3863
		case _TCP:
3864
			ret = memcg_update_tcp_max(memcg, nr_pages);
V
Vladimir Davydov 已提交
3865
			break;
3866
		}
3867
		break;
3868 3869 3870
	case RES_SOFT_LIMIT:
		memcg->soft_limit = nr_pages;
		ret = 0;
3871 3872
		break;
	}
3873
	return ret ?: nbytes;
B
Balbir Singh 已提交
3874 3875
}

3876 3877
static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf,
				size_t nbytes, loff_t off)
3878
{
3879
	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3880
	struct page_counter *counter;
3881

3882 3883 3884 3885 3886 3887 3888 3889 3890 3891
	switch (MEMFILE_TYPE(of_cft(of)->private)) {
	case _MEM:
		counter = &memcg->memory;
		break;
	case _MEMSWAP:
		counter = &memcg->memsw;
		break;
	case _KMEM:
		counter = &memcg->kmem;
		break;
V
Vladimir Davydov 已提交
3892
	case _TCP:
3893
		counter = &memcg->tcpmem;
V
Vladimir Davydov 已提交
3894
		break;
3895 3896 3897
	default:
		BUG();
	}
3898

3899
	switch (MEMFILE_ATTR(of_cft(of)->private)) {
3900
	case RES_MAX_USAGE:
3901
		page_counter_reset_watermark(counter);
3902 3903
		break;
	case RES_FAILCNT:
3904
		counter->failcnt = 0;
3905
		break;
3906 3907
	default:
		BUG();
3908
	}
3909

3910
	return nbytes;
3911 3912
}

3913
static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
3914 3915
					struct cftype *cft)
{
3916
	return mem_cgroup_from_css(css)->move_charge_at_immigrate;
3917 3918
}

3919
#ifdef CONFIG_MMU
3920
static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3921 3922
					struct cftype *cft, u64 val)
{
3923
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3924

3925
	if (val & ~MOVE_MASK)
3926
		return -EINVAL;
3927

3928
	/*
3929 3930 3931 3932
	 * No kind of locking is needed in here, because ->can_attach() will
	 * check this value once in the beginning of the process, and then carry
	 * on with stale data. This means that changes to this value will only
	 * affect task migrations starting after the change.
3933
	 */
3934
	memcg->move_charge_at_immigrate = val;
3935 3936
	return 0;
}
3937
#else
3938
static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3939 3940 3941 3942 3943
					struct cftype *cft, u64 val)
{
	return -ENOSYS;
}
#endif
3944

3945
#ifdef CONFIG_NUMA
3946 3947 3948 3949 3950 3951

#define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE))
#define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
#define LRU_ALL	     ((1 << NR_LRU_LISTS) - 1)

static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
3952
				int nid, unsigned int lru_mask, bool tree)
3953
{
3954
	struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
3955 3956 3957 3958 3959 3960 3961 3962
	unsigned long nr = 0;
	enum lru_list lru;

	VM_BUG_ON((unsigned)nid >= nr_node_ids);

	for_each_lru(lru) {
		if (!(BIT(lru) & lru_mask))
			continue;
3963 3964 3965 3966
		if (tree)
			nr += lruvec_page_state(lruvec, NR_LRU_BASE + lru);
		else
			nr += lruvec_page_state_local(lruvec, NR_LRU_BASE + lru);
3967 3968 3969 3970 3971
	}
	return nr;
}

static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
3972 3973
					     unsigned int lru_mask,
					     bool tree)
3974 3975 3976 3977 3978 3979 3980
{
	unsigned long nr = 0;
	enum lru_list lru;

	for_each_lru(lru) {
		if (!(BIT(lru) & lru_mask))
			continue;
3981 3982 3983 3984
		if (tree)
			nr += memcg_page_state(memcg, NR_LRU_BASE + lru);
		else
			nr += memcg_page_state_local(memcg, NR_LRU_BASE + lru);
3985 3986 3987 3988
	}
	return nr;
}

3989
static int memcg_numa_stat_show(struct seq_file *m, void *v)
3990
{
3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002
	struct numa_stat {
		const char *name;
		unsigned int lru_mask;
	};

	static const struct numa_stat stats[] = {
		{ "total", LRU_ALL },
		{ "file", LRU_ALL_FILE },
		{ "anon", LRU_ALL_ANON },
		{ "unevictable", BIT(LRU_UNEVICTABLE) },
	};
	const struct numa_stat *stat;
4003
	int nid;
4004
	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4005

4006
	for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
4007 4008 4009 4010 4011 4012 4013
		seq_printf(m, "%s=%lu", stat->name,
			   mem_cgroup_nr_lru_pages(memcg, stat->lru_mask,
						   false));
		for_each_node_state(nid, N_MEMORY)
			seq_printf(m, " N%d=%lu", nid,
				   mem_cgroup_node_nr_lru_pages(memcg, nid,
							stat->lru_mask, false));
4014
		seq_putc(m, '\n');
4015 4016
	}

4017
	for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
4018 4019 4020 4021 4022 4023 4024 4025

		seq_printf(m, "hierarchical_%s=%lu", stat->name,
			   mem_cgroup_nr_lru_pages(memcg, stat->lru_mask,
						   true));
		for_each_node_state(nid, N_MEMORY)
			seq_printf(m, " N%d=%lu", nid,
				   mem_cgroup_node_nr_lru_pages(memcg, nid,
							stat->lru_mask, true));
4026
		seq_putc(m, '\n');
4027 4028 4029 4030 4031 4032
	}

	return 0;
}
#endif /* CONFIG_NUMA */

4033
static const unsigned int memcg1_stats[] = {
4034
	NR_FILE_PAGES,
4035
	NR_ANON_MAPPED,
4036 4037 4038
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
	NR_ANON_THPS,
#endif
4039 4040 4041 4042 4043 4044 4045 4046 4047 4048
	NR_SHMEM,
	NR_FILE_MAPPED,
	NR_FILE_DIRTY,
	NR_WRITEBACK,
	MEMCG_SWAP,
};

static const char *const memcg1_stat_names[] = {
	"cache",
	"rss",
4049
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4050
	"rss_huge",
4051
#endif
4052 4053 4054 4055 4056 4057 4058
	"shmem",
	"mapped_file",
	"dirty",
	"writeback",
	"swap",
};

4059
/* Universal VM events cgroup1 shows, original sort order */
4060
static const unsigned int memcg1_events[] = {
4061 4062 4063 4064 4065 4066
	PGPGIN,
	PGPGOUT,
	PGFAULT,
	PGMAJFAULT,
};

4067
static int memcg_stat_show(struct seq_file *m, void *v)
4068
{
4069
	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4070
	unsigned long memory, memsw;
4071 4072
	struct mem_cgroup *mi;
	unsigned int i;
4073

4074
	BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats));
4075

4076
	for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
4077 4078
		unsigned long nr;

4079
		if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
4080
			continue;
4081 4082
		nr = memcg_page_state_local(memcg, memcg1_stats[i]);
		seq_printf(m, "%s %lu\n", memcg1_stat_names[i], nr * PAGE_SIZE);
4083
	}
L
Lee Schermerhorn 已提交
4084

4085
	for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
4086
		seq_printf(m, "%s %lu\n", vm_event_name(memcg1_events[i]),
4087
			   memcg_events_local(memcg, memcg1_events[i]));
4088 4089

	for (i = 0; i < NR_LRU_LISTS; i++)
4090
		seq_printf(m, "%s %lu\n", lru_list_name(i),
4091
			   memcg_page_state_local(memcg, NR_LRU_BASE + i) *
4092
			   PAGE_SIZE);
4093

K
KAMEZAWA Hiroyuki 已提交
4094
	/* Hierarchical information */
4095 4096
	memory = memsw = PAGE_COUNTER_MAX;
	for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) {
4097 4098
		memory = min(memory, READ_ONCE(mi->memory.max));
		memsw = min(memsw, READ_ONCE(mi->memsw.max));
4099
	}
4100 4101
	seq_printf(m, "hierarchical_memory_limit %llu\n",
		   (u64)memory * PAGE_SIZE);
4102
	if (do_memsw_account())
4103 4104
		seq_printf(m, "hierarchical_memsw_limit %llu\n",
			   (u64)memsw * PAGE_SIZE);
K
KOSAKI Motohiro 已提交
4105

4106
	for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
4107 4108
		unsigned long nr;

4109
		if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
4110
			continue;
4111
		nr = memcg_page_state(memcg, memcg1_stats[i]);
4112
		seq_printf(m, "total_%s %llu\n", memcg1_stat_names[i],
4113
						(u64)nr * PAGE_SIZE);
4114 4115
	}

4116
	for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
4117 4118
		seq_printf(m, "total_%s %llu\n",
			   vm_event_name(memcg1_events[i]),
4119
			   (u64)memcg_events(memcg, memcg1_events[i]));
4120

4121
	for (i = 0; i < NR_LRU_LISTS; i++)
4122
		seq_printf(m, "total_%s %llu\n", lru_list_name(i),
4123 4124
			   (u64)memcg_page_state(memcg, NR_LRU_BASE + i) *
			   PAGE_SIZE);
K
KAMEZAWA Hiroyuki 已提交
4125

K
KOSAKI Motohiro 已提交
4126 4127
#ifdef CONFIG_DEBUG_VM
	{
4128 4129
		pg_data_t *pgdat;
		struct mem_cgroup_per_node *mz;
4130 4131
		unsigned long anon_cost = 0;
		unsigned long file_cost = 0;
K
KOSAKI Motohiro 已提交
4132

4133 4134
		for_each_online_pgdat(pgdat) {
			mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
K
KOSAKI Motohiro 已提交
4135

4136 4137
			anon_cost += mz->lruvec.anon_cost;
			file_cost += mz->lruvec.file_cost;
4138
		}
4139 4140
		seq_printf(m, "anon_cost %lu\n", anon_cost);
		seq_printf(m, "file_cost %lu\n", file_cost);
K
KOSAKI Motohiro 已提交
4141 4142 4143
	}
#endif

4144 4145 4146
	return 0;
}

4147 4148
static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
				      struct cftype *cft)
K
KOSAKI Motohiro 已提交
4149
{
4150
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
K
KOSAKI Motohiro 已提交
4151

4152
	return mem_cgroup_swappiness(memcg);
K
KOSAKI Motohiro 已提交
4153 4154
}

4155 4156
static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
				       struct cftype *cft, u64 val)
K
KOSAKI Motohiro 已提交
4157
{
4158
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
K
KOSAKI Motohiro 已提交
4159

4160
	if (val > 100)
K
KOSAKI Motohiro 已提交
4161 4162
		return -EINVAL;

4163
	if (css->parent)
4164 4165 4166
		memcg->swappiness = val;
	else
		vm_swappiness = val;
4167

K
KOSAKI Motohiro 已提交
4168 4169 4170
	return 0;
}

4171 4172 4173
static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
{
	struct mem_cgroup_threshold_ary *t;
4174
	unsigned long usage;
4175 4176 4177 4178
	int i;

	rcu_read_lock();
	if (!swap)
4179
		t = rcu_dereference(memcg->thresholds.primary);
4180
	else
4181
		t = rcu_dereference(memcg->memsw_thresholds.primary);
4182 4183 4184 4185

	if (!t)
		goto unlock;

4186
	usage = mem_cgroup_usage(memcg, swap);
4187 4188

	/*
4189
	 * current_threshold points to threshold just below or equal to usage.
4190 4191 4192
	 * If it's not true, a threshold was crossed after last
	 * call of __mem_cgroup_threshold().
	 */
4193
	i = t->current_threshold;
4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216

	/*
	 * Iterate backward over array of thresholds starting from
	 * current_threshold and check if a threshold is crossed.
	 * If none of thresholds below usage is crossed, we read
	 * only one element of the array here.
	 */
	for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
		eventfd_signal(t->entries[i].eventfd, 1);

	/* i = current_threshold + 1 */
	i++;

	/*
	 * Iterate forward over array of thresholds starting from
	 * current_threshold+1 and check if a threshold is crossed.
	 * If none of thresholds above usage is crossed, we read
	 * only one element of the array here.
	 */
	for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
		eventfd_signal(t->entries[i].eventfd, 1);

	/* Update current_threshold */
4217
	t->current_threshold = i - 1;
4218 4219 4220 4221 4222 4223
unlock:
	rcu_read_unlock();
}

static void mem_cgroup_threshold(struct mem_cgroup *memcg)
{
4224 4225
	while (memcg) {
		__mem_cgroup_threshold(memcg, false);
4226
		if (do_memsw_account())
4227 4228 4229 4230
			__mem_cgroup_threshold(memcg, true);

		memcg = parent_mem_cgroup(memcg);
	}
4231 4232 4233 4234 4235 4236 4237
}

static int compare_thresholds(const void *a, const void *b)
{
	const struct mem_cgroup_threshold *_a = a;
	const struct mem_cgroup_threshold *_b = b;

4238 4239 4240 4241 4242 4243 4244
	if (_a->threshold > _b->threshold)
		return 1;

	if (_a->threshold < _b->threshold)
		return -1;

	return 0;
4245 4246
}

4247
static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
K
KAMEZAWA Hiroyuki 已提交
4248 4249 4250
{
	struct mem_cgroup_eventfd_list *ev;

4251 4252
	spin_lock(&memcg_oom_lock);

4253
	list_for_each_entry(ev, &memcg->oom_notify, list)
K
KAMEZAWA Hiroyuki 已提交
4254
		eventfd_signal(ev->eventfd, 1);
4255 4256

	spin_unlock(&memcg_oom_lock);
K
KAMEZAWA Hiroyuki 已提交
4257 4258 4259
	return 0;
}

4260
static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
K
KAMEZAWA Hiroyuki 已提交
4261
{
K
KAMEZAWA Hiroyuki 已提交
4262 4263
	struct mem_cgroup *iter;

4264
	for_each_mem_cgroup_tree(iter, memcg)
K
KAMEZAWA Hiroyuki 已提交
4265
		mem_cgroup_oom_notify_cb(iter);
K
KAMEZAWA Hiroyuki 已提交
4266 4267
}

4268
static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
T
Tejun Heo 已提交
4269
	struct eventfd_ctx *eventfd, const char *args, enum res_type type)
4270
{
4271 4272
	struct mem_cgroup_thresholds *thresholds;
	struct mem_cgroup_threshold_ary *new;
4273 4274
	unsigned long threshold;
	unsigned long usage;
4275
	int i, size, ret;
4276

4277
	ret = page_counter_memparse(args, "-1", &threshold);
4278 4279 4280 4281
	if (ret)
		return ret;

	mutex_lock(&memcg->thresholds_lock);
4282

4283
	if (type == _MEM) {
4284
		thresholds = &memcg->thresholds;
4285
		usage = mem_cgroup_usage(memcg, false);
4286
	} else if (type == _MEMSWAP) {
4287
		thresholds = &memcg->memsw_thresholds;
4288
		usage = mem_cgroup_usage(memcg, true);
4289
	} else
4290 4291 4292
		BUG();

	/* Check if a threshold crossed before adding a new one */
4293
	if (thresholds->primary)
4294 4295
		__mem_cgroup_threshold(memcg, type == _MEMSWAP);

4296
	size = thresholds->primary ? thresholds->primary->size + 1 : 1;
4297 4298

	/* Allocate memory for new array of thresholds */
4299
	new = kmalloc(struct_size(new, entries, size), GFP_KERNEL);
4300
	if (!new) {
4301 4302 4303
		ret = -ENOMEM;
		goto unlock;
	}
4304
	new->size = size;
4305 4306

	/* Copy thresholds (if any) to new array */
4307 4308 4309
	if (thresholds->primary)
		memcpy(new->entries, thresholds->primary->entries,
		       flex_array_size(new, entries, size - 1));
4310

4311
	/* Add new threshold */
4312 4313
	new->entries[size - 1].eventfd = eventfd;
	new->entries[size - 1].threshold = threshold;
4314 4315

	/* Sort thresholds. Registering of new threshold isn't time-critical */
4316
	sort(new->entries, size, sizeof(*new->entries),
4317 4318 4319
			compare_thresholds, NULL);

	/* Find current threshold */
4320
	new->current_threshold = -1;
4321
	for (i = 0; i < size; i++) {
4322
		if (new->entries[i].threshold <= usage) {
4323
			/*
4324 4325
			 * new->current_threshold will not be used until
			 * rcu_assign_pointer(), so it's safe to increment
4326 4327
			 * it here.
			 */
4328
			++new->current_threshold;
4329 4330
		} else
			break;
4331 4332
	}

4333 4334 4335 4336 4337
	/* Free old spare buffer and save old primary buffer as spare */
	kfree(thresholds->spare);
	thresholds->spare = thresholds->primary;

	rcu_assign_pointer(thresholds->primary, new);
4338

4339
	/* To be sure that nobody uses thresholds */
4340 4341 4342 4343 4344 4345 4346 4347
	synchronize_rcu();

unlock:
	mutex_unlock(&memcg->thresholds_lock);

	return ret;
}

4348
static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
T
Tejun Heo 已提交
4349 4350
	struct eventfd_ctx *eventfd, const char *args)
{
4351
	return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM);
T
Tejun Heo 已提交
4352 4353
}

4354
static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg,
T
Tejun Heo 已提交
4355 4356
	struct eventfd_ctx *eventfd, const char *args)
{
4357
	return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP);
T
Tejun Heo 已提交
4358 4359
}

4360
static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
T
Tejun Heo 已提交
4361
	struct eventfd_ctx *eventfd, enum res_type type)
4362
{
4363 4364
	struct mem_cgroup_thresholds *thresholds;
	struct mem_cgroup_threshold_ary *new;
4365
	unsigned long usage;
4366
	int i, j, size, entries;
4367 4368

	mutex_lock(&memcg->thresholds_lock);
4369 4370

	if (type == _MEM) {
4371
		thresholds = &memcg->thresholds;
4372
		usage = mem_cgroup_usage(memcg, false);
4373
	} else if (type == _MEMSWAP) {
4374
		thresholds = &memcg->memsw_thresholds;
4375
		usage = mem_cgroup_usage(memcg, true);
4376
	} else
4377 4378
		BUG();

4379 4380 4381
	if (!thresholds->primary)
		goto unlock;

4382 4383 4384 4385
	/* Check if a threshold crossed before removing */
	__mem_cgroup_threshold(memcg, type == _MEMSWAP);

	/* Calculate new number of threshold */
4386
	size = entries = 0;
4387 4388
	for (i = 0; i < thresholds->primary->size; i++) {
		if (thresholds->primary->entries[i].eventfd != eventfd)
4389
			size++;
4390 4391
		else
			entries++;
4392 4393
	}

4394
	new = thresholds->spare;
4395

4396 4397 4398 4399
	/* If no items related to eventfd have been cleared, nothing to do */
	if (!entries)
		goto unlock;

4400 4401
	/* Set thresholds array to NULL if we don't have thresholds */
	if (!size) {
4402 4403
		kfree(new);
		new = NULL;
4404
		goto swap_buffers;
4405 4406
	}

4407
	new->size = size;
4408 4409

	/* Copy thresholds and find current threshold */
4410 4411 4412
	new->current_threshold = -1;
	for (i = 0, j = 0; i < thresholds->primary->size; i++) {
		if (thresholds->primary->entries[i].eventfd == eventfd)
4413 4414
			continue;

4415
		new->entries[j] = thresholds->primary->entries[i];
4416
		if (new->entries[j].threshold <= usage) {
4417
			/*
4418
			 * new->current_threshold will not be used
4419 4420 4421
			 * until rcu_assign_pointer(), so it's safe to increment
			 * it here.
			 */
4422
			++new->current_threshold;
4423 4424 4425 4426
		}
		j++;
	}

4427
swap_buffers:
4428 4429
	/* Swap primary and spare array */
	thresholds->spare = thresholds->primary;
4430

4431
	rcu_assign_pointer(thresholds->primary, new);
4432

4433
	/* To be sure that nobody uses thresholds */
4434
	synchronize_rcu();
4435 4436 4437 4438 4439 4440

	/* If all events are unregistered, free the spare array */
	if (!new) {
		kfree(thresholds->spare);
		thresholds->spare = NULL;
	}
4441
unlock:
4442 4443
	mutex_unlock(&memcg->thresholds_lock);
}
4444

4445
static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
T
Tejun Heo 已提交
4446 4447
	struct eventfd_ctx *eventfd)
{
4448
	return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM);
T
Tejun Heo 已提交
4449 4450
}

4451
static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
T
Tejun Heo 已提交
4452 4453
	struct eventfd_ctx *eventfd)
{
4454
	return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP);
T
Tejun Heo 已提交
4455 4456
}

4457
static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
T
Tejun Heo 已提交
4458
	struct eventfd_ctx *eventfd, const char *args)
K
KAMEZAWA Hiroyuki 已提交
4459 4460 4461 4462 4463 4464 4465
{
	struct mem_cgroup_eventfd_list *event;

	event = kmalloc(sizeof(*event),	GFP_KERNEL);
	if (!event)
		return -ENOMEM;

4466
	spin_lock(&memcg_oom_lock);
K
KAMEZAWA Hiroyuki 已提交
4467 4468 4469 4470 4471

	event->eventfd = eventfd;
	list_add(&event->list, &memcg->oom_notify);

	/* already in OOM ? */
4472
	if (memcg->under_oom)
K
KAMEZAWA Hiroyuki 已提交
4473
		eventfd_signal(eventfd, 1);
4474
	spin_unlock(&memcg_oom_lock);
K
KAMEZAWA Hiroyuki 已提交
4475 4476 4477 4478

	return 0;
}

4479
static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg,
T
Tejun Heo 已提交
4480
	struct eventfd_ctx *eventfd)
K
KAMEZAWA Hiroyuki 已提交
4481 4482 4483
{
	struct mem_cgroup_eventfd_list *ev, *tmp;

4484
	spin_lock(&memcg_oom_lock);
K
KAMEZAWA Hiroyuki 已提交
4485

4486
	list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
K
KAMEZAWA Hiroyuki 已提交
4487 4488 4489 4490 4491 4492
		if (ev->eventfd == eventfd) {
			list_del(&ev->list);
			kfree(ev);
		}
	}

4493
	spin_unlock(&memcg_oom_lock);
K
KAMEZAWA Hiroyuki 已提交
4494 4495
}

4496
static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
4497
{
4498
	struct mem_cgroup *memcg = mem_cgroup_from_seq(sf);
4499

4500
	seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable);
4501
	seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom);
R
Roman Gushchin 已提交
4502 4503
	seq_printf(sf, "oom_kill %lu\n",
		   atomic_long_read(&memcg->memory_events[MEMCG_OOM_KILL]));
4504 4505 4506
	return 0;
}

4507
static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
4508 4509
	struct cftype *cft, u64 val)
{
4510
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4511 4512

	/* cannot set to root cgroup and only 0 and 1 are allowed */
4513
	if (!css->parent || !((val == 0) || (val == 1)))
4514 4515
		return -EINVAL;

4516
	memcg->oom_kill_disable = val;
4517
	if (!val)
4518
		memcg_oom_recover(memcg);
4519

4520 4521 4522
	return 0;
}

4523 4524
#ifdef CONFIG_CGROUP_WRITEBACK

4525 4526
#include <trace/events/writeback.h>

T
Tejun Heo 已提交
4527 4528 4529 4530 4531 4532 4533 4534 4535 4536
static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
{
	return wb_domain_init(&memcg->cgwb_domain, gfp);
}

static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
{
	wb_domain_exit(&memcg->cgwb_domain);
}

4537 4538 4539 4540 4541
static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
{
	wb_domain_size_changed(&memcg->cgwb_domain);
}

T
Tejun Heo 已提交
4542 4543 4544 4545 4546 4547 4548 4549 4550 4551
struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);

	if (!memcg->css.parent)
		return NULL;

	return &memcg->cgwb_domain;
}

4552 4553 4554 4555 4556 4557
/*
 * idx can be of type enum memcg_stat_item or node_stat_item.
 * Keep in sync with memcg_exact_page().
 */
static unsigned long memcg_exact_page_state(struct mem_cgroup *memcg, int idx)
{
4558
	long x = atomic_long_read(&memcg->vmstats[idx]);
4559 4560 4561
	int cpu;

	for_each_online_cpu(cpu)
4562
		x += per_cpu_ptr(memcg->vmstats_percpu, cpu)->stat[idx];
4563 4564 4565 4566 4567
	if (x < 0)
		x = 0;
	return x;
}

4568 4569 4570
/**
 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
 * @wb: bdi_writeback in question
4571 4572
 * @pfilepages: out parameter for number of file pages
 * @pheadroom: out parameter for number of allocatable pages according to memcg
4573 4574 4575
 * @pdirty: out parameter for number of dirty pages
 * @pwriteback: out parameter for number of pages under writeback
 *
4576 4577 4578
 * Determine the numbers of file, headroom, dirty, and writeback pages in
 * @wb's memcg.  File, dirty and writeback are self-explanatory.  Headroom
 * is a bit more involved.
4579
 *
4580 4581 4582 4583 4584
 * A memcg's headroom is "min(max, high) - used".  In the hierarchy, the
 * headroom is calculated as the lowest headroom of itself and the
 * ancestors.  Note that this doesn't consider the actual amount of
 * available memory in the system.  The caller should further cap
 * *@pheadroom accordingly.
4585
 */
4586 4587 4588
void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
			 unsigned long *pheadroom, unsigned long *pdirty,
			 unsigned long *pwriteback)
4589 4590 4591 4592
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
	struct mem_cgroup *parent;

4593
	*pdirty = memcg_exact_page_state(memcg, NR_FILE_DIRTY);
4594

4595
	*pwriteback = memcg_exact_page_state(memcg, NR_WRITEBACK);
4596 4597
	*pfilepages = memcg_exact_page_state(memcg, NR_INACTIVE_FILE) +
			memcg_exact_page_state(memcg, NR_ACTIVE_FILE);
4598
	*pheadroom = PAGE_COUNTER_MAX;
4599 4600

	while ((parent = parent_mem_cgroup(memcg))) {
4601
		unsigned long ceiling = min(READ_ONCE(memcg->memory.max),
4602
					    READ_ONCE(memcg->memory.high));
4603 4604
		unsigned long used = page_counter_read(&memcg->memory);

4605
		*pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
4606 4607 4608 4609
		memcg = parent;
	}
}

4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656
/*
 * Foreign dirty flushing
 *
 * There's an inherent mismatch between memcg and writeback.  The former
 * trackes ownership per-page while the latter per-inode.  This was a
 * deliberate design decision because honoring per-page ownership in the
 * writeback path is complicated, may lead to higher CPU and IO overheads
 * and deemed unnecessary given that write-sharing an inode across
 * different cgroups isn't a common use-case.
 *
 * Combined with inode majority-writer ownership switching, this works well
 * enough in most cases but there are some pathological cases.  For
 * example, let's say there are two cgroups A and B which keep writing to
 * different but confined parts of the same inode.  B owns the inode and
 * A's memory is limited far below B's.  A's dirty ratio can rise enough to
 * trigger balance_dirty_pages() sleeps but B's can be low enough to avoid
 * triggering background writeback.  A will be slowed down without a way to
 * make writeback of the dirty pages happen.
 *
 * Conditions like the above can lead to a cgroup getting repatedly and
 * severely throttled after making some progress after each
 * dirty_expire_interval while the underyling IO device is almost
 * completely idle.
 *
 * Solving this problem completely requires matching the ownership tracking
 * granularities between memcg and writeback in either direction.  However,
 * the more egregious behaviors can be avoided by simply remembering the
 * most recent foreign dirtying events and initiating remote flushes on
 * them when local writeback isn't enough to keep the memory clean enough.
 *
 * The following two functions implement such mechanism.  When a foreign
 * page - a page whose memcg and writeback ownerships don't match - is
 * dirtied, mem_cgroup_track_foreign_dirty() records the inode owning
 * bdi_writeback on the page owning memcg.  When balance_dirty_pages()
 * decides that the memcg needs to sleep due to high dirty ratio, it calls
 * mem_cgroup_flush_foreign() which queues writeback on the recorded
 * foreign bdi_writebacks which haven't expired.  Both the numbers of
 * recorded bdi_writebacks and concurrent in-flight foreign writebacks are
 * limited to MEMCG_CGWB_FRN_CNT.
 *
 * The mechanism only remembers IDs and doesn't hold any object references.
 * As being wrong occasionally doesn't matter, updates and accesses to the
 * records are lockless and racy.
 */
void mem_cgroup_track_foreign_dirty_slowpath(struct page *page,
					     struct bdi_writeback *wb)
{
4657
	struct mem_cgroup *memcg = page_memcg(page);
4658 4659 4660 4661 4662 4663
	struct memcg_cgwb_frn *frn;
	u64 now = get_jiffies_64();
	u64 oldest_at = now;
	int oldest = -1;
	int i;

4664 4665
	trace_track_foreign_dirty(page, wb);

4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725
	/*
	 * Pick the slot to use.  If there is already a slot for @wb, keep
	 * using it.  If not replace the oldest one which isn't being
	 * written out.
	 */
	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
		frn = &memcg->cgwb_frn[i];
		if (frn->bdi_id == wb->bdi->id &&
		    frn->memcg_id == wb->memcg_css->id)
			break;
		if (time_before64(frn->at, oldest_at) &&
		    atomic_read(&frn->done.cnt) == 1) {
			oldest = i;
			oldest_at = frn->at;
		}
	}

	if (i < MEMCG_CGWB_FRN_CNT) {
		/*
		 * Re-using an existing one.  Update timestamp lazily to
		 * avoid making the cacheline hot.  We want them to be
		 * reasonably up-to-date and significantly shorter than
		 * dirty_expire_interval as that's what expires the record.
		 * Use the shorter of 1s and dirty_expire_interval / 8.
		 */
		unsigned long update_intv =
			min_t(unsigned long, HZ,
			      msecs_to_jiffies(dirty_expire_interval * 10) / 8);

		if (time_before64(frn->at, now - update_intv))
			frn->at = now;
	} else if (oldest >= 0) {
		/* replace the oldest free one */
		frn = &memcg->cgwb_frn[oldest];
		frn->bdi_id = wb->bdi->id;
		frn->memcg_id = wb->memcg_css->id;
		frn->at = now;
	}
}

/* issue foreign writeback flushes for recorded foreign dirtying events */
void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
	unsigned long intv = msecs_to_jiffies(dirty_expire_interval * 10);
	u64 now = jiffies_64;
	int i;

	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
		struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i];

		/*
		 * If the record is older than dirty_expire_interval,
		 * writeback on it has already started.  No need to kick it
		 * off again.  Also, don't start a new one if there's
		 * already one in flight.
		 */
		if (time_after64(frn->at, now - intv) &&
		    atomic_read(&frn->done.cnt) == 1) {
			frn->at = 0;
4726
			trace_flush_foreign(wb, frn->bdi_id, frn->memcg_id);
4727 4728 4729 4730 4731 4732 4733
			cgroup_writeback_by_id(frn->bdi_id, frn->memcg_id, 0,
					       WB_REASON_FOREIGN_FLUSH,
					       &frn->done);
		}
	}
}

T
Tejun Heo 已提交
4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744
#else	/* CONFIG_CGROUP_WRITEBACK */

static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
{
	return 0;
}

static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
{
}

4745 4746 4747 4748
static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
{
}

4749 4750
#endif	/* CONFIG_CGROUP_WRITEBACK */

4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763
/*
 * DO NOT USE IN NEW FILES.
 *
 * "cgroup.event_control" implementation.
 *
 * This is way over-engineered.  It tries to support fully configurable
 * events for each user.  Such level of flexibility is completely
 * unnecessary especially in the light of the planned unified hierarchy.
 *
 * Please deprecate this and replace with something simpler if at all
 * possible.
 */

4764 4765 4766 4767 4768
/*
 * Unregister event and free resources.
 *
 * Gets called from workqueue.
 */
4769
static void memcg_event_remove(struct work_struct *work)
4770
{
4771 4772
	struct mem_cgroup_event *event =
		container_of(work, struct mem_cgroup_event, remove);
4773
	struct mem_cgroup *memcg = event->memcg;
4774 4775 4776

	remove_wait_queue(event->wqh, &event->wait);

4777
	event->unregister_event(memcg, event->eventfd);
4778 4779 4780 4781 4782 4783

	/* Notify userspace the event is going away. */
	eventfd_signal(event->eventfd, 1);

	eventfd_ctx_put(event->eventfd);
	kfree(event);
4784
	css_put(&memcg->css);
4785 4786 4787
}

/*
4788
 * Gets called on EPOLLHUP on eventfd when user closes it.
4789 4790 4791
 *
 * Called with wqh->lock held and interrupts disabled.
 */
4792
static int memcg_event_wake(wait_queue_entry_t *wait, unsigned mode,
4793
			    int sync, void *key)
4794
{
4795 4796
	struct mem_cgroup_event *event =
		container_of(wait, struct mem_cgroup_event, wait);
4797
	struct mem_cgroup *memcg = event->memcg;
A
Al Viro 已提交
4798
	__poll_t flags = key_to_poll(key);
4799

4800
	if (flags & EPOLLHUP) {
4801 4802 4803 4804 4805 4806 4807 4808 4809
		/*
		 * If the event has been detached at cgroup removal, we
		 * can simply return knowing the other side will cleanup
		 * for us.
		 *
		 * We can't race against event freeing since the other
		 * side will require wqh->lock via remove_wait_queue(),
		 * which we hold.
		 */
4810
		spin_lock(&memcg->event_list_lock);
4811 4812 4813 4814 4815 4816 4817 4818
		if (!list_empty(&event->list)) {
			list_del_init(&event->list);
			/*
			 * We are in atomic context, but cgroup_event_remove()
			 * may sleep, so we have to call it in workqueue.
			 */
			schedule_work(&event->remove);
		}
4819
		spin_unlock(&memcg->event_list_lock);
4820 4821 4822 4823 4824
	}

	return 0;
}

4825
static void memcg_event_ptable_queue_proc(struct file *file,
4826 4827
		wait_queue_head_t *wqh, poll_table *pt)
{
4828 4829
	struct mem_cgroup_event *event =
		container_of(pt, struct mem_cgroup_event, pt);
4830 4831 4832 4833 4834 4835

	event->wqh = wqh;
	add_wait_queue(wqh, &event->wait);
}

/*
4836 4837
 * DO NOT USE IN NEW FILES.
 *
4838 4839 4840 4841 4842
 * Parse input and register new cgroup event handler.
 *
 * Input must be in format '<event_fd> <control_fd> <args>'.
 * Interpretation of args is defined by control file implementation.
 */
4843 4844
static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
					 char *buf, size_t nbytes, loff_t off)
4845
{
4846
	struct cgroup_subsys_state *css = of_css(of);
4847
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4848
	struct mem_cgroup_event *event;
4849 4850 4851 4852
	struct cgroup_subsys_state *cfile_css;
	unsigned int efd, cfd;
	struct fd efile;
	struct fd cfile;
4853
	const char *name;
4854 4855 4856
	char *endp;
	int ret;

4857 4858 4859
	buf = strstrip(buf);

	efd = simple_strtoul(buf, &endp, 10);
4860 4861
	if (*endp != ' ')
		return -EINVAL;
4862
	buf = endp + 1;
4863

4864
	cfd = simple_strtoul(buf, &endp, 10);
4865 4866
	if ((*endp != ' ') && (*endp != '\0'))
		return -EINVAL;
4867
	buf = endp + 1;
4868 4869 4870 4871 4872

	event = kzalloc(sizeof(*event), GFP_KERNEL);
	if (!event)
		return -ENOMEM;

4873
	event->memcg = memcg;
4874
	INIT_LIST_HEAD(&event->list);
4875 4876 4877
	init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc);
	init_waitqueue_func_entry(&event->wait, memcg_event_wake);
	INIT_WORK(&event->remove, memcg_event_remove);
4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898

	efile = fdget(efd);
	if (!efile.file) {
		ret = -EBADF;
		goto out_kfree;
	}

	event->eventfd = eventfd_ctx_fileget(efile.file);
	if (IS_ERR(event->eventfd)) {
		ret = PTR_ERR(event->eventfd);
		goto out_put_efile;
	}

	cfile = fdget(cfd);
	if (!cfile.file) {
		ret = -EBADF;
		goto out_put_eventfd;
	}

	/* the process need read permission on control file */
	/* AV: shouldn't we check that it's been opened for read instead? */
4899
	ret = file_permission(cfile.file, MAY_READ);
4900 4901 4902
	if (ret < 0)
		goto out_put_cfile;

4903 4904 4905 4906 4907
	/*
	 * Determine the event callbacks and set them in @event.  This used
	 * to be done via struct cftype but cgroup core no longer knows
	 * about these events.  The following is crude but the whole thing
	 * is for compatibility anyway.
4908 4909
	 *
	 * DO NOT ADD NEW FILES.
4910
	 */
A
Al Viro 已提交
4911
	name = cfile.file->f_path.dentry->d_name.name;
4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922

	if (!strcmp(name, "memory.usage_in_bytes")) {
		event->register_event = mem_cgroup_usage_register_event;
		event->unregister_event = mem_cgroup_usage_unregister_event;
	} else if (!strcmp(name, "memory.oom_control")) {
		event->register_event = mem_cgroup_oom_register_event;
		event->unregister_event = mem_cgroup_oom_unregister_event;
	} else if (!strcmp(name, "memory.pressure_level")) {
		event->register_event = vmpressure_register_event;
		event->unregister_event = vmpressure_unregister_event;
	} else if (!strcmp(name, "memory.memsw.usage_in_bytes")) {
T
Tejun Heo 已提交
4923 4924
		event->register_event = memsw_cgroup_usage_register_event;
		event->unregister_event = memsw_cgroup_usage_unregister_event;
4925 4926 4927 4928 4929
	} else {
		ret = -EINVAL;
		goto out_put_cfile;
	}

4930
	/*
4931 4932 4933
	 * Verify @cfile should belong to @css.  Also, remaining events are
	 * automatically removed on cgroup destruction but the removal is
	 * asynchronous, so take an extra ref on @css.
4934
	 */
A
Al Viro 已提交
4935
	cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent,
4936
					       &memory_cgrp_subsys);
4937
	ret = -EINVAL;
4938
	if (IS_ERR(cfile_css))
4939
		goto out_put_cfile;
4940 4941
	if (cfile_css != css) {
		css_put(cfile_css);
4942
		goto out_put_cfile;
4943
	}
4944

4945
	ret = event->register_event(memcg, event->eventfd, buf);
4946 4947 4948
	if (ret)
		goto out_put_css;

4949
	vfs_poll(efile.file, &event->pt);
4950

4951 4952 4953
	spin_lock(&memcg->event_list_lock);
	list_add(&event->list, &memcg->event_list);
	spin_unlock(&memcg->event_list_lock);
4954 4955 4956 4957

	fdput(cfile);
	fdput(efile);

4958
	return nbytes;
4959 4960

out_put_css:
4961
	css_put(css);
4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973
out_put_cfile:
	fdput(cfile);
out_put_eventfd:
	eventfd_ctx_put(event->eventfd);
out_put_efile:
	fdput(efile);
out_kfree:
	kfree(event);

	return ret;
}

4974
static struct cftype mem_cgroup_legacy_files[] = {
B
Balbir Singh 已提交
4975
	{
4976
		.name = "usage_in_bytes",
4977
		.private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
4978
		.read_u64 = mem_cgroup_read_u64,
B
Balbir Singh 已提交
4979
	},
4980 4981
	{
		.name = "max_usage_in_bytes",
4982
		.private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
4983
		.write = mem_cgroup_reset,
4984
		.read_u64 = mem_cgroup_read_u64,
4985
	},
B
Balbir Singh 已提交
4986
	{
4987
		.name = "limit_in_bytes",
4988
		.private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
4989
		.write = mem_cgroup_write,
4990
		.read_u64 = mem_cgroup_read_u64,
B
Balbir Singh 已提交
4991
	},
4992 4993 4994
	{
		.name = "soft_limit_in_bytes",
		.private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
4995
		.write = mem_cgroup_write,
4996
		.read_u64 = mem_cgroup_read_u64,
4997
	},
B
Balbir Singh 已提交
4998 4999
	{
		.name = "failcnt",
5000
		.private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
5001
		.write = mem_cgroup_reset,
5002
		.read_u64 = mem_cgroup_read_u64,
B
Balbir Singh 已提交
5003
	},
5004 5005
	{
		.name = "stat",
5006
		.seq_show = memcg_stat_show,
5007
	},
5008 5009
	{
		.name = "force_empty",
5010
		.write = mem_cgroup_force_empty_write,
5011
	},
5012 5013 5014 5015 5016
	{
		.name = "use_hierarchy",
		.write_u64 = mem_cgroup_hierarchy_write,
		.read_u64 = mem_cgroup_hierarchy_read,
	},
5017
	{
5018
		.name = "cgroup.event_control",		/* XXX: for compat */
5019
		.write = memcg_write_event_control,
5020
		.flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE,
5021
	},
K
KOSAKI Motohiro 已提交
5022 5023 5024 5025 5026
	{
		.name = "swappiness",
		.read_u64 = mem_cgroup_swappiness_read,
		.write_u64 = mem_cgroup_swappiness_write,
	},
5027 5028 5029 5030 5031
	{
		.name = "move_charge_at_immigrate",
		.read_u64 = mem_cgroup_move_charge_read,
		.write_u64 = mem_cgroup_move_charge_write,
	},
K
KAMEZAWA Hiroyuki 已提交
5032 5033
	{
		.name = "oom_control",
5034
		.seq_show = mem_cgroup_oom_control_read,
5035
		.write_u64 = mem_cgroup_oom_control_write,
K
KAMEZAWA Hiroyuki 已提交
5036 5037
		.private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
	},
5038 5039 5040
	{
		.name = "pressure_level",
	},
5041 5042 5043
#ifdef CONFIG_NUMA
	{
		.name = "numa_stat",
5044
		.seq_show = memcg_numa_stat_show,
5045 5046
	},
#endif
5047 5048 5049
	{
		.name = "kmem.limit_in_bytes",
		.private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
5050
		.write = mem_cgroup_write,
5051
		.read_u64 = mem_cgroup_read_u64,
5052 5053 5054 5055
	},
	{
		.name = "kmem.usage_in_bytes",
		.private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
5056
		.read_u64 = mem_cgroup_read_u64,
5057 5058 5059 5060
	},
	{
		.name = "kmem.failcnt",
		.private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
5061
		.write = mem_cgroup_reset,
5062
		.read_u64 = mem_cgroup_read_u64,
5063 5064 5065 5066
	},
	{
		.name = "kmem.max_usage_in_bytes",
		.private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
5067
		.write = mem_cgroup_reset,
5068
		.read_u64 = mem_cgroup_read_u64,
5069
	},
5070 5071
#if defined(CONFIG_MEMCG_KMEM) && \
	(defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG))
5072 5073
	{
		.name = "kmem.slabinfo",
5074
		.seq_show = memcg_slab_show,
5075 5076
	},
#endif
V
Vladimir Davydov 已提交
5077 5078 5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099
	{
		.name = "kmem.tcp.limit_in_bytes",
		.private = MEMFILE_PRIVATE(_TCP, RES_LIMIT),
		.write = mem_cgroup_write,
		.read_u64 = mem_cgroup_read_u64,
	},
	{
		.name = "kmem.tcp.usage_in_bytes",
		.private = MEMFILE_PRIVATE(_TCP, RES_USAGE),
		.read_u64 = mem_cgroup_read_u64,
	},
	{
		.name = "kmem.tcp.failcnt",
		.private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT),
		.write = mem_cgroup_reset,
		.read_u64 = mem_cgroup_read_u64,
	},
	{
		.name = "kmem.tcp.max_usage_in_bytes",
		.private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE),
		.write = mem_cgroup_reset,
		.read_u64 = mem_cgroup_read_u64,
	},
5100
	{ },	/* terminate */
5101
};
5102

5103 5104 5105 5106 5107 5108 5109 5110
/*
 * Private memory cgroup IDR
 *
 * Swap-out records and page cache shadow entries need to store memcg
 * references in constrained space, so we maintain an ID space that is
 * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of
 * memory-controlled cgroups to 64k.
 *
5111
 * However, there usually are many references to the offline CSS after
5112 5113 5114 5115 5116 5117 5118 5119 5120 5121 5122 5123 5124 5125 5126 5127 5128
 * the cgroup has been destroyed, such as page cache or reclaimable
 * slab objects, that don't need to hang on to the ID. We want to keep
 * those dead CSS from occupying IDs, or we might quickly exhaust the
 * relatively small ID space and prevent the creation of new cgroups
 * even when there are much fewer than 64k cgroups - possibly none.
 *
 * Maintain a private 16-bit ID space for memcg, and allow the ID to
 * be freed and recycled when it's no longer needed, which is usually
 * when the CSS is offlined.
 *
 * The only exception to that are records of swapped out tmpfs/shmem
 * pages that need to be attributed to live ancestors on swapin. But
 * those references are manageable from userspace.
 */

static DEFINE_IDR(mem_cgroup_idr);

5129 5130 5131 5132 5133 5134 5135 5136
static void mem_cgroup_id_remove(struct mem_cgroup *memcg)
{
	if (memcg->id.id > 0) {
		idr_remove(&mem_cgroup_idr, memcg->id.id);
		memcg->id.id = 0;
	}
}

5137 5138
static void __maybe_unused mem_cgroup_id_get_many(struct mem_cgroup *memcg,
						  unsigned int n)
5139
{
5140
	refcount_add(n, &memcg->id.ref);
5141 5142
}

5143
static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
5144
{
5145
	if (refcount_sub_and_test(n, &memcg->id.ref)) {
5146
		mem_cgroup_id_remove(memcg);
5147 5148 5149 5150 5151 5152

		/* Memcg ID pins CSS */
		css_put(&memcg->css);
	}
}

5153 5154 5155 5156 5157
static inline void mem_cgroup_id_put(struct mem_cgroup *memcg)
{
	mem_cgroup_id_put_many(memcg, 1);
}

5158 5159 5160 5161 5162 5163 5164 5165 5166 5167 5168 5169
/**
 * mem_cgroup_from_id - look up a memcg from a memcg id
 * @id: the memcg id to look up
 *
 * Caller must hold rcu_read_lock().
 */
struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
{
	WARN_ON_ONCE(!rcu_read_lock_held());
	return idr_find(&mem_cgroup_idr, id);
}

5170
static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
5171 5172
{
	struct mem_cgroup_per_node *pn;
5173
	int tmp = node;
5174 5175 5176 5177 5178 5179 5180 5181
	/*
	 * This routine is called against possible nodes.
	 * But it's BUG to call kmalloc() against offline node.
	 *
	 * TODO: this routine can waste much memory for nodes which will
	 *       never be onlined. It's better to use memory hotplug callback
	 *       function.
	 */
5182 5183
	if (!node_state(node, N_NORMAL_MEMORY))
		tmp = -1;
5184
	pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
5185 5186
	if (!pn)
		return 1;
5187

5188 5189
	pn->lruvec_stat_local = alloc_percpu_gfp(struct lruvec_stat,
						 GFP_KERNEL_ACCOUNT);
5190 5191 5192 5193 5194
	if (!pn->lruvec_stat_local) {
		kfree(pn);
		return 1;
	}

5195
	pn->lruvec_stat_cpu = alloc_percpu_gfp(struct batched_lruvec_stat,
5196
					       GFP_KERNEL_ACCOUNT);
5197
	if (!pn->lruvec_stat_cpu) {
5198
		free_percpu(pn->lruvec_stat_local);
5199 5200 5201 5202
		kfree(pn);
		return 1;
	}

5203 5204 5205 5206 5207
	lruvec_init(&pn->lruvec);
	pn->usage_in_excess = 0;
	pn->on_tree = false;
	pn->memcg = memcg;

5208
	memcg->nodeinfo[node] = pn;
5209 5210 5211
	return 0;
}

5212
static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
5213
{
5214 5215
	struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];

M
Michal Hocko 已提交
5216 5217 5218
	if (!pn)
		return;

5219
	free_percpu(pn->lruvec_stat_cpu);
5220
	free_percpu(pn->lruvec_stat_local);
5221
	kfree(pn);
5222 5223
}

5224
static void __mem_cgroup_free(struct mem_cgroup *memcg)
5225
{
5226
	int node;
5227

5228
	for_each_node(node)
5229
		free_mem_cgroup_per_node_info(memcg, node);
5230
	free_percpu(memcg->vmstats_percpu);
5231
	free_percpu(memcg->vmstats_local);
5232
	kfree(memcg);
5233
}
5234

5235 5236 5237
static void mem_cgroup_free(struct mem_cgroup *memcg)
{
	memcg_wb_domain_exit(memcg);
5238 5239 5240 5241
	/*
	 * Flush percpu vmstats and vmevents to guarantee the value correctness
	 * on parent's and all ancestor levels.
	 */
5242
	memcg_flush_percpu_vmstats(memcg);
5243
	memcg_flush_percpu_vmevents(memcg);
5244 5245 5246
	__mem_cgroup_free(memcg);
}

5247
static struct mem_cgroup *mem_cgroup_alloc(void)
B
Balbir Singh 已提交
5248
{
5249
	struct mem_cgroup *memcg;
5250
	unsigned int size;
5251
	int node;
5252
	int __maybe_unused i;
5253
	long error = -ENOMEM;
B
Balbir Singh 已提交
5254

5255 5256 5257 5258
	size = sizeof(struct mem_cgroup);
	size += nr_node_ids * sizeof(struct mem_cgroup_per_node *);

	memcg = kzalloc(size, GFP_KERNEL);
5259
	if (!memcg)
5260
		return ERR_PTR(error);
5261

5262 5263 5264
	memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL,
				 1, MEM_CGROUP_ID_MAX,
				 GFP_KERNEL);
5265 5266
	if (memcg->id.id < 0) {
		error = memcg->id.id;
5267
		goto fail;
5268
	}
5269

5270 5271
	memcg->vmstats_local = alloc_percpu_gfp(struct memcg_vmstats_percpu,
						GFP_KERNEL_ACCOUNT);
5272 5273 5274
	if (!memcg->vmstats_local)
		goto fail;

5275 5276
	memcg->vmstats_percpu = alloc_percpu_gfp(struct memcg_vmstats_percpu,
						 GFP_KERNEL_ACCOUNT);
5277
	if (!memcg->vmstats_percpu)
5278
		goto fail;
5279

B
Bob Liu 已提交
5280
	for_each_node(node)
5281
		if (alloc_mem_cgroup_per_node_info(memcg, node))
5282
			goto fail;
5283

5284 5285
	if (memcg_wb_domain_init(memcg, GFP_KERNEL))
		goto fail;
5286

5287
	INIT_WORK(&memcg->high_work, high_work_func);
5288 5289 5290
	INIT_LIST_HEAD(&memcg->oom_notify);
	mutex_init(&memcg->thresholds_lock);
	spin_lock_init(&memcg->move_lock);
5291
	vmpressure_init(&memcg->vmpressure);
5292 5293
	INIT_LIST_HEAD(&memcg->event_list);
	spin_lock_init(&memcg->event_list_lock);
5294
	memcg->socket_pressure = jiffies;
5295
#ifdef CONFIG_MEMCG_KMEM
V
Vladimir Davydov 已提交
5296
	memcg->kmemcg_id = -1;
R
Roman Gushchin 已提交
5297
	INIT_LIST_HEAD(&memcg->objcg_list);
V
Vladimir Davydov 已提交
5298
#endif
5299 5300
#ifdef CONFIG_CGROUP_WRITEBACK
	INIT_LIST_HEAD(&memcg->cgwb_list);
5301 5302 5303
	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
		memcg->cgwb_frn[i].done =
			__WB_COMPLETION_INIT(&memcg_cgwb_frn_waitq);
5304 5305 5306 5307 5308
#endif
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
	spin_lock_init(&memcg->deferred_split_queue.split_queue_lock);
	INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue);
	memcg->deferred_split_queue.split_queue_len = 0;
5309
#endif
5310
	idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
5311 5312
	return memcg;
fail:
5313
	mem_cgroup_id_remove(memcg);
5314
	__mem_cgroup_free(memcg);
5315
	return ERR_PTR(error);
5316 5317
}

5318 5319
static struct cgroup_subsys_state * __ref
mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
5320
{
5321
	struct mem_cgroup *parent = mem_cgroup_from_css(parent_css);
5322
	struct mem_cgroup *memcg, *old_memcg;
5323
	long error = -ENOMEM;
5324

5325
	old_memcg = set_active_memcg(parent);
5326
	memcg = mem_cgroup_alloc();
5327
	set_active_memcg(old_memcg);
5328 5329
	if (IS_ERR(memcg))
		return ERR_CAST(memcg);
5330

5331
	page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
5332
	memcg->soft_limit = PAGE_COUNTER_MAX;
5333
	page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
5334 5335 5336
	if (parent) {
		memcg->swappiness = mem_cgroup_swappiness(parent);
		memcg->oom_kill_disable = parent->oom_kill_disable;
5337

5338
		page_counter_init(&memcg->memory, &parent->memory);
5339
		page_counter_init(&memcg->swap, &parent->swap);
5340
		page_counter_init(&memcg->kmem, &parent->kmem);
5341
		page_counter_init(&memcg->tcpmem, &parent->tcpmem);
5342
	} else {
5343 5344 5345 5346
		page_counter_init(&memcg->memory, NULL);
		page_counter_init(&memcg->swap, NULL);
		page_counter_init(&memcg->kmem, NULL);
		page_counter_init(&memcg->tcpmem, NULL);
5347

5348 5349 5350 5351
		root_mem_cgroup = memcg;
		return &memcg->css;
	}

5352
	/* The following stuff does not apply to the root */
5353
	error = memcg_online_kmem(memcg);
5354 5355
	if (error)
		goto fail;
5356

5357
	if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
5358
		static_branch_inc(&memcg_sockets_enabled_key);
5359

5360 5361
	return &memcg->css;
fail:
5362
	mem_cgroup_id_remove(memcg);
5363
	mem_cgroup_free(memcg);
5364
	return ERR_PTR(error);
5365 5366
}

5367
static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
5368
{
5369 5370
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);

5371 5372 5373 5374 5375 5376 5377 5378 5379 5380
	/*
	 * A memcg must be visible for memcg_expand_shrinker_maps()
	 * by the time the maps are allocated. So, we allocate maps
	 * here, when for_each_mem_cgroup() can't skip it.
	 */
	if (memcg_alloc_shrinker_maps(memcg)) {
		mem_cgroup_id_remove(memcg);
		return -ENOMEM;
	}

5381
	/* Online state pins memcg ID, memcg ID pins CSS */
5382
	refcount_set(&memcg->id.ref, 1);
5383
	css_get(css);
5384
	return 0;
B
Balbir Singh 已提交
5385 5386
}

5387
static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
5388
{
5389
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5390
	struct mem_cgroup_event *event, *tmp;
5391 5392 5393 5394 5395 5396

	/*
	 * Unregister events and notify userspace.
	 * Notify userspace about cgroup removing only after rmdir of cgroup
	 * directory to avoid race between userspace and kernelspace.
	 */
5397 5398
	spin_lock(&memcg->event_list_lock);
	list_for_each_entry_safe(event, tmp, &memcg->event_list, list) {
5399 5400 5401
		list_del_init(&event->list);
		schedule_work(&event->remove);
	}
5402
	spin_unlock(&memcg->event_list_lock);
5403

R
Roman Gushchin 已提交
5404
	page_counter_set_min(&memcg->memory, 0);
5405
	page_counter_set_low(&memcg->memory, 0);
5406

5407
	memcg_offline_kmem(memcg);
5408
	wb_memcg_offline(memcg);
5409

5410 5411
	drain_all_stock(memcg);

5412
	mem_cgroup_id_put(memcg);
5413 5414
}

5415 5416 5417 5418 5419 5420 5421
static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);

	invalidate_reclaim_iterators(memcg);
}

5422
static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
B
Balbir Singh 已提交
5423
{
5424
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5425
	int __maybe_unused i;
5426

5427 5428 5429 5430
#ifdef CONFIG_CGROUP_WRITEBACK
	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
		wb_wait_for_completion(&memcg->cgwb_frn[i].done);
#endif
5431
	if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
5432
		static_branch_dec(&memcg_sockets_enabled_key);
5433

5434
	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active)
V
Vladimir Davydov 已提交
5435
		static_branch_dec(&memcg_sockets_enabled_key);
5436

5437 5438 5439
	vmpressure_cleanup(&memcg->vmpressure);
	cancel_work_sync(&memcg->high_work);
	mem_cgroup_remove_from_trees(memcg);
5440
	memcg_free_shrinker_maps(memcg);
5441
	memcg_free_kmem(memcg);
5442
	mem_cgroup_free(memcg);
B
Balbir Singh 已提交
5443 5444
}

5445 5446 5447 5448 5449 5450 5451 5452 5453 5454 5455 5456 5457 5458 5459 5460 5461
/**
 * mem_cgroup_css_reset - reset the states of a mem_cgroup
 * @css: the target css
 *
 * Reset the states of the mem_cgroup associated with @css.  This is
 * invoked when the userland requests disabling on the default hierarchy
 * but the memcg is pinned through dependency.  The memcg should stop
 * applying policies and should revert to the vanilla state as it may be
 * made visible again.
 *
 * The current implementation only resets the essential configurations.
 * This needs to be expanded to cover all the visible parts.
 */
static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);

5462 5463 5464 5465
	page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX);
	page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX);
	page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX);
	page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX);
R
Roman Gushchin 已提交
5466
	page_counter_set_min(&memcg->memory, 0);
5467
	page_counter_set_low(&memcg->memory, 0);
5468
	page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
5469
	memcg->soft_limit = PAGE_COUNTER_MAX;
5470
	page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
5471
	memcg_wb_domain_size_changed(memcg);
5472 5473
}

5474
#ifdef CONFIG_MMU
5475
/* Handlers for move charge at task migration. */
5476
static int mem_cgroup_do_precharge(unsigned long count)
5477
{
5478
	int ret;
5479

5480 5481
	/* Try a single bulk charge without reclaim first, kswapd may wake */
	ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count);
5482
	if (!ret) {
5483 5484 5485
		mc.precharge += count;
		return ret;
	}
5486

5487
	/* Try charges one by one with reclaim, but do not retry */
5488
	while (count--) {
5489
		ret = try_charge(mc.to, GFP_KERNEL | __GFP_NORETRY, 1);
5490 5491
		if (ret)
			return ret;
5492
		mc.precharge++;
5493
		cond_resched();
5494
	}
5495
	return 0;
5496 5497 5498 5499
}

union mc_target {
	struct page	*page;
5500
	swp_entry_t	ent;
5501 5502 5503
};

enum mc_target_type {
5504
	MC_TARGET_NONE = 0,
5505
	MC_TARGET_PAGE,
5506
	MC_TARGET_SWAP,
5507
	MC_TARGET_DEVICE,
5508 5509
};

D
Daisuke Nishimura 已提交
5510 5511
static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
						unsigned long addr, pte_t ptent)
5512
{
5513
	struct page *page = vm_normal_page(vma, addr, ptent);
5514

D
Daisuke Nishimura 已提交
5515 5516 5517
	if (!page || !page_mapped(page))
		return NULL;
	if (PageAnon(page)) {
5518
		if (!(mc.flags & MOVE_ANON))
D
Daisuke Nishimura 已提交
5519
			return NULL;
5520 5521 5522 5523
	} else {
		if (!(mc.flags & MOVE_FILE))
			return NULL;
	}
D
Daisuke Nishimura 已提交
5524 5525 5526 5527 5528 5529
	if (!get_page_unless_zero(page))
		return NULL;

	return page;
}

5530
#if defined(CONFIG_SWAP) || defined(CONFIG_DEVICE_PRIVATE)
D
Daisuke Nishimura 已提交
5531
static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5532
			pte_t ptent, swp_entry_t *entry)
D
Daisuke Nishimura 已提交
5533 5534 5535 5536
{
	struct page *page = NULL;
	swp_entry_t ent = pte_to_swp_entry(ptent);

5537
	if (!(mc.flags & MOVE_ANON))
D
Daisuke Nishimura 已提交
5538
		return NULL;
5539 5540 5541 5542 5543 5544 5545 5546 5547 5548 5549 5550 5551 5552 5553 5554 5555

	/*
	 * Handle MEMORY_DEVICE_PRIVATE which are ZONE_DEVICE page belonging to
	 * a device and because they are not accessible by CPU they are store
	 * as special swap entry in the CPU page table.
	 */
	if (is_device_private_entry(ent)) {
		page = device_private_entry_to_page(ent);
		/*
		 * MEMORY_DEVICE_PRIVATE means ZONE_DEVICE page and which have
		 * a refcount of 1 when free (unlike normal page)
		 */
		if (!page_ref_add_unless(page, 1, 1))
			return NULL;
		return page;
	}

5556 5557 5558
	if (non_swap_entry(ent))
		return NULL;

5559 5560 5561 5562
	/*
	 * Because lookup_swap_cache() updates some statistics counter,
	 * we call find_get_page() with swapper_space directly.
	 */
5563
	page = find_get_page(swap_address_space(ent), swp_offset(ent));
5564
	entry->val = ent.val;
D
Daisuke Nishimura 已提交
5565 5566 5567

	return page;
}
5568 5569
#else
static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5570
			pte_t ptent, swp_entry_t *entry)
5571 5572 5573 5574
{
	return NULL;
}
#endif
D
Daisuke Nishimura 已提交
5575

5576 5577 5578 5579 5580
static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
			unsigned long addr, pte_t ptent, swp_entry_t *entry)
{
	if (!vma->vm_file) /* anonymous vma */
		return NULL;
5581
	if (!(mc.flags & MOVE_FILE))
5582 5583 5584
		return NULL;

	/* page is moved even if it's not RSS of this task(page-faulted). */
5585
	/* shmem/tmpfs may report page out on swap: account for that too. */
5586 5587
	return find_get_incore_page(vma->vm_file->f_mapping,
			linear_page_index(vma, addr));
5588 5589
}

5590 5591 5592
/**
 * mem_cgroup_move_account - move account of the page
 * @page: the page
5593
 * @compound: charge the page as compound or small page
5594 5595 5596
 * @from: mem_cgroup which the page is moved from.
 * @to:	mem_cgroup which the page is moved to. @from != @to.
 *
5597
 * The caller must make sure the page is not on LRU (isolate_page() is useful.)
5598 5599 5600 5601 5602
 *
 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
 * from old cgroup.
 */
static int mem_cgroup_move_account(struct page *page,
5603
				   bool compound,
5604 5605 5606
				   struct mem_cgroup *from,
				   struct mem_cgroup *to)
{
5607 5608
	struct lruvec *from_vec, *to_vec;
	struct pglist_data *pgdat;
5609
	unsigned int nr_pages = compound ? thp_nr_pages(page) : 1;
5610 5611 5612 5613
	int ret;

	VM_BUG_ON(from == to);
	VM_BUG_ON_PAGE(PageLRU(page), page);
5614
	VM_BUG_ON(compound && !PageTransHuge(page));
5615 5616

	/*
5617
	 * Prevent mem_cgroup_migrate() from looking at
5618
	 * page's memory cgroup of its source page while we change it.
5619
	 */
5620
	ret = -EBUSY;
5621 5622 5623 5624
	if (!trylock_page(page))
		goto out;

	ret = -EINVAL;
5625
	if (page_memcg(page) != from)
5626 5627
		goto out_unlock;

5628
	pgdat = page_pgdat(page);
5629 5630
	from_vec = mem_cgroup_lruvec(from, pgdat);
	to_vec = mem_cgroup_lruvec(to, pgdat);
5631

5632
	lock_page_memcg(page);
5633

5634 5635 5636 5637
	if (PageAnon(page)) {
		if (page_mapped(page)) {
			__mod_lruvec_state(from_vec, NR_ANON_MAPPED, -nr_pages);
			__mod_lruvec_state(to_vec, NR_ANON_MAPPED, nr_pages);
5638
			if (PageTransHuge(page)) {
5639 5640 5641 5642
				__mod_lruvec_state(from_vec, NR_ANON_THPS,
						   -nr_pages);
				__mod_lruvec_state(to_vec, NR_ANON_THPS,
						   nr_pages);
5643
			}
5644 5645
		}
	} else {
5646 5647 5648 5649 5650 5651 5652 5653
		__mod_lruvec_state(from_vec, NR_FILE_PAGES, -nr_pages);
		__mod_lruvec_state(to_vec, NR_FILE_PAGES, nr_pages);

		if (PageSwapBacked(page)) {
			__mod_lruvec_state(from_vec, NR_SHMEM, -nr_pages);
			__mod_lruvec_state(to_vec, NR_SHMEM, nr_pages);
		}

5654 5655 5656 5657
		if (page_mapped(page)) {
			__mod_lruvec_state(from_vec, NR_FILE_MAPPED, -nr_pages);
			__mod_lruvec_state(to_vec, NR_FILE_MAPPED, nr_pages);
		}
5658

5659 5660
		if (PageDirty(page)) {
			struct address_space *mapping = page_mapping(page);
5661

5662
			if (mapping_can_writeback(mapping)) {
5663 5664 5665 5666 5667
				__mod_lruvec_state(from_vec, NR_FILE_DIRTY,
						   -nr_pages);
				__mod_lruvec_state(to_vec, NR_FILE_DIRTY,
						   nr_pages);
			}
5668 5669 5670
		}
	}

5671
	if (PageWriteback(page)) {
5672 5673
		__mod_lruvec_state(from_vec, NR_WRITEBACK, -nr_pages);
		__mod_lruvec_state(to_vec, NR_WRITEBACK, nr_pages);
5674 5675 5676
	}

	/*
5677 5678
	 * All state has been migrated, let's switch to the new memcg.
	 *
5679
	 * It is safe to change page's memcg here because the page
5680 5681
	 * is referenced, charged, isolated, and locked: we can't race
	 * with (un)charging, migration, LRU putback, or anything else
5682
	 * that would rely on a stable page's memory cgroup.
5683 5684
	 *
	 * Note that lock_page_memcg is a memcg lock, not a page lock,
5685
	 * to save space. As soon as we switch page's memory cgroup to a
5686 5687
	 * new memcg that isn't locked, the above state can change
	 * concurrently again. Make sure we're truly done with it.
5688
	 */
5689
	smp_mb();
5690

5691 5692 5693
	css_get(&to->css);
	css_put(&from->css);

5694
	page->memcg_data = (unsigned long)to;
5695

5696
	__unlock_page_memcg(from);
5697 5698 5699 5700

	ret = 0;

	local_irq_disable();
5701
	mem_cgroup_charge_statistics(to, page, nr_pages);
5702
	memcg_check_events(to, page);
5703
	mem_cgroup_charge_statistics(from, page, -nr_pages);
5704 5705 5706 5707 5708 5709 5710 5711
	memcg_check_events(from, page);
	local_irq_enable();
out_unlock:
	unlock_page(page);
out:
	return ret;
}

5712 5713 5714 5715 5716 5717 5718 5719 5720 5721 5722 5723 5724 5725 5726
/**
 * get_mctgt_type - get target type of moving charge
 * @vma: the vma the pte to be checked belongs
 * @addr: the address corresponding to the pte to be checked
 * @ptent: the pte to be checked
 * @target: the pointer the target page or swap ent will be stored(can be NULL)
 *
 * Returns
 *   0(MC_TARGET_NONE): if the pte is not a target for move charge.
 *   1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
 *     move charge. if @target is not NULL, the page is stored in target->page
 *     with extra refcnt got(Callers should handle it).
 *   2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
 *     target for charge migration. if @target is not NULL, the entry is stored
 *     in target->ent.
5727 5728
 *   3(MC_TARGET_DEVICE): like MC_TARGET_PAGE  but page is MEMORY_DEVICE_PRIVATE
 *     (so ZONE_DEVICE page and thus not on the lru).
5729 5730 5731
 *     For now we such page is charge like a regular page would be as for all
 *     intent and purposes it is just special memory taking the place of a
 *     regular page.
5732 5733
 *
 *     See Documentations/vm/hmm.txt and include/linux/hmm.h
5734 5735 5736 5737
 *
 * Called with pte lock held.
 */

5738
static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
D
Daisuke Nishimura 已提交
5739 5740 5741
		unsigned long addr, pte_t ptent, union mc_target *target)
{
	struct page *page = NULL;
5742
	enum mc_target_type ret = MC_TARGET_NONE;
D
Daisuke Nishimura 已提交
5743 5744 5745 5746 5747
	swp_entry_t ent = { .val = 0 };

	if (pte_present(ptent))
		page = mc_handle_present_pte(vma, addr, ptent);
	else if (is_swap_pte(ptent))
5748
		page = mc_handle_swap_pte(vma, ptent, &ent);
5749
	else if (pte_none(ptent))
5750
		page = mc_handle_file_pte(vma, addr, ptent, &ent);
D
Daisuke Nishimura 已提交
5751 5752

	if (!page && !ent.val)
5753
		return ret;
5754 5755
	if (page) {
		/*
5756
		 * Do only loose check w/o serialization.
5757
		 * mem_cgroup_move_account() checks the page is valid or
5758
		 * not under LRU exclusion.
5759
		 */
5760
		if (page_memcg(page) == mc.from) {
5761
			ret = MC_TARGET_PAGE;
5762
			if (is_device_private_page(page))
5763
				ret = MC_TARGET_DEVICE;
5764 5765 5766 5767 5768 5769
			if (target)
				target->page = page;
		}
		if (!ret || !target)
			put_page(page);
	}
5770 5771 5772 5773 5774
	/*
	 * There is a swap entry and a page doesn't exist or isn't charged.
	 * But we cannot move a tail-page in a THP.
	 */
	if (ent.val && !ret && (!page || !PageTransCompound(page)) &&
L
Li Zefan 已提交
5775
	    mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) {
5776 5777 5778
		ret = MC_TARGET_SWAP;
		if (target)
			target->ent = ent;
5779 5780 5781 5782
	}
	return ret;
}

5783 5784
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/*
5785 5786
 * We don't consider PMD mapped swapping or file mapped pages because THP does
 * not support them for now.
5787 5788 5789 5790 5791 5792 5793 5794
 * Caller should make sure that pmd_trans_huge(pmd) is true.
 */
static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
		unsigned long addr, pmd_t pmd, union mc_target *target)
{
	struct page *page = NULL;
	enum mc_target_type ret = MC_TARGET_NONE;

5795 5796 5797 5798 5799
	if (unlikely(is_swap_pmd(pmd))) {
		VM_BUG_ON(thp_migration_supported() &&
				  !is_pmd_migration_entry(pmd));
		return ret;
	}
5800
	page = pmd_page(pmd);
5801
	VM_BUG_ON_PAGE(!page || !PageHead(page), page);
5802
	if (!(mc.flags & MOVE_ANON))
5803
		return ret;
5804
	if (page_memcg(page) == mc.from) {
5805 5806 5807 5808 5809 5810 5811 5812 5813 5814 5815 5816 5817 5818 5819 5820
		ret = MC_TARGET_PAGE;
		if (target) {
			get_page(page);
			target->page = page;
		}
	}
	return ret;
}
#else
static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
		unsigned long addr, pmd_t pmd, union mc_target *target)
{
	return MC_TARGET_NONE;
}
#endif

5821 5822 5823 5824
static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
					unsigned long addr, unsigned long end,
					struct mm_walk *walk)
{
5825
	struct vm_area_struct *vma = walk->vma;
5826 5827 5828
	pte_t *pte;
	spinlock_t *ptl;

5829 5830
	ptl = pmd_trans_huge_lock(pmd, vma);
	if (ptl) {
5831 5832
		/*
		 * Note their can not be MC_TARGET_DEVICE for now as we do not
5833 5834
		 * support transparent huge page with MEMORY_DEVICE_PRIVATE but
		 * this might change.
5835
		 */
5836 5837
		if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
			mc.precharge += HPAGE_PMD_NR;
5838
		spin_unlock(ptl);
5839
		return 0;
5840
	}
5841

5842 5843
	if (pmd_trans_unstable(pmd))
		return 0;
5844 5845
	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
	for (; addr != end; pte++, addr += PAGE_SIZE)
5846
		if (get_mctgt_type(vma, addr, *pte, NULL))
5847 5848 5849 5850
			mc.precharge++;	/* increment precharge temporarily */
	pte_unmap_unlock(pte - 1, ptl);
	cond_resched();

5851 5852 5853
	return 0;
}

5854 5855 5856 5857
static const struct mm_walk_ops precharge_walk_ops = {
	.pmd_entry	= mem_cgroup_count_precharge_pte_range,
};

5858 5859 5860 5861
static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
{
	unsigned long precharge;

5862
	mmap_read_lock(mm);
5863
	walk_page_range(mm, 0, mm->highest_vm_end, &precharge_walk_ops, NULL);
5864
	mmap_read_unlock(mm);
5865 5866 5867 5868 5869 5870 5871 5872 5873

	precharge = mc.precharge;
	mc.precharge = 0;

	return precharge;
}

static int mem_cgroup_precharge_mc(struct mm_struct *mm)
{
5874 5875 5876 5877 5878
	unsigned long precharge = mem_cgroup_count_precharge(mm);

	VM_BUG_ON(mc.moving_task);
	mc.moving_task = current;
	return mem_cgroup_do_precharge(precharge);
5879 5880
}

5881 5882
/* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
static void __mem_cgroup_clear_mc(void)
5883
{
5884 5885 5886
	struct mem_cgroup *from = mc.from;
	struct mem_cgroup *to = mc.to;

5887
	/* we must uncharge all the leftover precharges from mc.to */
5888
	if (mc.precharge) {
5889
		cancel_charge(mc.to, mc.precharge);
5890 5891 5892 5893 5894 5895 5896
		mc.precharge = 0;
	}
	/*
	 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
	 * we must uncharge here.
	 */
	if (mc.moved_charge) {
5897
		cancel_charge(mc.from, mc.moved_charge);
5898
		mc.moved_charge = 0;
5899
	}
5900 5901 5902
	/* we must fixup refcnts and charges */
	if (mc.moved_swap) {
		/* uncharge swap account from the old cgroup */
5903
		if (!mem_cgroup_is_root(mc.from))
5904
			page_counter_uncharge(&mc.from->memsw, mc.moved_swap);
5905

5906 5907
		mem_cgroup_id_put_many(mc.from, mc.moved_swap);

5908
		/*
5909 5910
		 * we charged both to->memory and to->memsw, so we
		 * should uncharge to->memory.
5911
		 */
5912
		if (!mem_cgroup_is_root(mc.to))
5913 5914
			page_counter_uncharge(&mc.to->memory, mc.moved_swap);

5915 5916
		mc.moved_swap = 0;
	}
5917 5918 5919 5920 5921 5922 5923
	memcg_oom_recover(from);
	memcg_oom_recover(to);
	wake_up_all(&mc.waitq);
}

static void mem_cgroup_clear_mc(void)
{
5924 5925
	struct mm_struct *mm = mc.mm;

5926 5927 5928 5929 5930 5931
	/*
	 * we must clear moving_task before waking up waiters at the end of
	 * task migration.
	 */
	mc.moving_task = NULL;
	__mem_cgroup_clear_mc();
5932
	spin_lock(&mc.lock);
5933 5934
	mc.from = NULL;
	mc.to = NULL;
5935
	mc.mm = NULL;
5936
	spin_unlock(&mc.lock);
5937 5938

	mmput(mm);
5939 5940
}

5941
static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
5942
{
5943
	struct cgroup_subsys_state *css;
5944
	struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */
5945
	struct mem_cgroup *from;
5946
	struct task_struct *leader, *p;
5947
	struct mm_struct *mm;
5948
	unsigned long move_flags;
5949
	int ret = 0;
5950

5951 5952
	/* charge immigration isn't supported on the default hierarchy */
	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
5953 5954
		return 0;

5955 5956 5957 5958 5959 5960 5961
	/*
	 * Multi-process migrations only happen on the default hierarchy
	 * where charge immigration is not used.  Perform charge
	 * immigration if @tset contains a leader and whine if there are
	 * multiple.
	 */
	p = NULL;
5962
	cgroup_taskset_for_each_leader(leader, css, tset) {
5963 5964
		WARN_ON_ONCE(p);
		p = leader;
5965
		memcg = mem_cgroup_from_css(css);
5966 5967 5968 5969
	}
	if (!p)
		return 0;

5970 5971 5972 5973 5974 5975 5976 5977 5978
	/*
	 * We are now commited to this value whatever it is. Changes in this
	 * tunable will only affect upcoming migrations, not the current one.
	 * So we need to save it, and keep it going.
	 */
	move_flags = READ_ONCE(memcg->move_charge_at_immigrate);
	if (!move_flags)
		return 0;

5979 5980 5981 5982 5983 5984 5985 5986 5987 5988 5989 5990 5991 5992 5993 5994
	from = mem_cgroup_from_task(p);

	VM_BUG_ON(from == memcg);

	mm = get_task_mm(p);
	if (!mm)
		return 0;
	/* We move charges only when we move a owner of the mm */
	if (mm->owner == p) {
		VM_BUG_ON(mc.from);
		VM_BUG_ON(mc.to);
		VM_BUG_ON(mc.precharge);
		VM_BUG_ON(mc.moved_charge);
		VM_BUG_ON(mc.moved_swap);

		spin_lock(&mc.lock);
5995
		mc.mm = mm;
5996 5997 5998 5999 6000 6001 6002 6003 6004
		mc.from = from;
		mc.to = memcg;
		mc.flags = move_flags;
		spin_unlock(&mc.lock);
		/* We set mc.moving_task later */

		ret = mem_cgroup_precharge_mc(mm);
		if (ret)
			mem_cgroup_clear_mc();
6005 6006
	} else {
		mmput(mm);
6007 6008 6009 6010
	}
	return ret;
}

6011
static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
6012
{
6013 6014
	if (mc.to)
		mem_cgroup_clear_mc();
6015 6016
}

6017 6018 6019
static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
				unsigned long addr, unsigned long end,
				struct mm_walk *walk)
6020
{
6021
	int ret = 0;
6022
	struct vm_area_struct *vma = walk->vma;
6023 6024
	pte_t *pte;
	spinlock_t *ptl;
6025 6026 6027
	enum mc_target_type target_type;
	union mc_target target;
	struct page *page;
6028

6029 6030
	ptl = pmd_trans_huge_lock(pmd, vma);
	if (ptl) {
6031
		if (mc.precharge < HPAGE_PMD_NR) {
6032
			spin_unlock(ptl);
6033 6034 6035 6036 6037 6038
			return 0;
		}
		target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
		if (target_type == MC_TARGET_PAGE) {
			page = target.page;
			if (!isolate_lru_page(page)) {
6039
				if (!mem_cgroup_move_account(page, true,
6040
							     mc.from, mc.to)) {
6041 6042 6043 6044 6045 6046
					mc.precharge -= HPAGE_PMD_NR;
					mc.moved_charge += HPAGE_PMD_NR;
				}
				putback_lru_page(page);
			}
			put_page(page);
6047 6048 6049 6050 6051 6052 6053 6054
		} else if (target_type == MC_TARGET_DEVICE) {
			page = target.page;
			if (!mem_cgroup_move_account(page, true,
						     mc.from, mc.to)) {
				mc.precharge -= HPAGE_PMD_NR;
				mc.moved_charge += HPAGE_PMD_NR;
			}
			put_page(page);
6055
		}
6056
		spin_unlock(ptl);
6057
		return 0;
6058 6059
	}

6060 6061
	if (pmd_trans_unstable(pmd))
		return 0;
6062 6063 6064 6065
retry:
	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
	for (; addr != end; addr += PAGE_SIZE) {
		pte_t ptent = *(pte++);
6066
		bool device = false;
6067
		swp_entry_t ent;
6068 6069 6070 6071

		if (!mc.precharge)
			break;

6072
		switch (get_mctgt_type(vma, addr, ptent, &target)) {
6073 6074
		case MC_TARGET_DEVICE:
			device = true;
J
Joe Perches 已提交
6075
			fallthrough;
6076 6077
		case MC_TARGET_PAGE:
			page = target.page;
6078 6079 6080 6081 6082 6083 6084 6085
			/*
			 * We can have a part of the split pmd here. Moving it
			 * can be done but it would be too convoluted so simply
			 * ignore such a partial THP and keep it in original
			 * memcg. There should be somebody mapping the head.
			 */
			if (PageTransCompound(page))
				goto put;
6086
			if (!device && isolate_lru_page(page))
6087
				goto put;
6088 6089
			if (!mem_cgroup_move_account(page, false,
						mc.from, mc.to)) {
6090
				mc.precharge--;
6091 6092
				/* we uncharge from mc.from later. */
				mc.moved_charge++;
6093
			}
6094 6095
			if (!device)
				putback_lru_page(page);
6096
put:			/* get_mctgt_type() gets the page */
6097 6098
			put_page(page);
			break;
6099 6100
		case MC_TARGET_SWAP:
			ent = target.ent;
6101
			if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
6102
				mc.precharge--;
6103 6104
				mem_cgroup_id_get_many(mc.to, 1);
				/* we fixup other refcnts and charges later. */
6105 6106
				mc.moved_swap++;
			}
6107
			break;
6108 6109 6110 6111 6112 6113 6114 6115 6116 6117 6118 6119 6120 6121
		default:
			break;
		}
	}
	pte_unmap_unlock(pte - 1, ptl);
	cond_resched();

	if (addr != end) {
		/*
		 * We have consumed all precharges we got in can_attach().
		 * We try charge one by one, but don't do any additional
		 * charges to mc.to if we have failed in charge once in attach()
		 * phase.
		 */
6122
		ret = mem_cgroup_do_precharge(1);
6123 6124 6125 6126 6127 6128 6129
		if (!ret)
			goto retry;
	}

	return ret;
}

6130 6131 6132 6133
static const struct mm_walk_ops charge_walk_ops = {
	.pmd_entry	= mem_cgroup_move_charge_pte_range,
};

6134
static void mem_cgroup_move_charge(void)
6135 6136
{
	lru_add_drain_all();
6137
	/*
6138 6139 6140
	 * Signal lock_page_memcg() to take the memcg's move_lock
	 * while we're moving its pages to another memcg. Then wait
	 * for already started RCU-only updates to finish.
6141 6142 6143
	 */
	atomic_inc(&mc.from->moving_account);
	synchronize_rcu();
6144
retry:
6145
	if (unlikely(!mmap_read_trylock(mc.mm))) {
6146
		/*
6147
		 * Someone who are holding the mmap_lock might be waiting in
6148 6149 6150 6151 6152 6153 6154 6155 6156
		 * waitq. So we cancel all extra charges, wake up all waiters,
		 * and retry. Because we cancel precharges, we might not be able
		 * to move enough charges, but moving charge is a best-effort
		 * feature anyway, so it wouldn't be a big problem.
		 */
		__mem_cgroup_clear_mc();
		cond_resched();
		goto retry;
	}
6157 6158 6159 6160
	/*
	 * When we have consumed all precharges and failed in doing
	 * additional charge, the page walk just aborts.
	 */
6161 6162
	walk_page_range(mc.mm, 0, mc.mm->highest_vm_end, &charge_walk_ops,
			NULL);
6163

6164
	mmap_read_unlock(mc.mm);
6165
	atomic_dec(&mc.from->moving_account);
6166 6167
}

6168
static void mem_cgroup_move_task(void)
B
Balbir Singh 已提交
6169
{
6170 6171
	if (mc.to) {
		mem_cgroup_move_charge();
6172
		mem_cgroup_clear_mc();
6173
	}
B
Balbir Singh 已提交
6174
}
6175
#else	/* !CONFIG_MMU */
6176
static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
6177 6178 6179
{
	return 0;
}
6180
static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
6181 6182
{
}
6183
static void mem_cgroup_move_task(void)
6184 6185 6186
{
}
#endif
B
Balbir Singh 已提交
6187

6188 6189 6190 6191 6192 6193 6194 6195 6196 6197
static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value)
{
	if (value == PAGE_COUNTER_MAX)
		seq_puts(m, "max\n");
	else
		seq_printf(m, "%llu\n", (u64)value * PAGE_SIZE);

	return 0;
}

6198 6199 6200
static u64 memory_current_read(struct cgroup_subsys_state *css,
			       struct cftype *cft)
{
6201 6202 6203
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);

	return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE;
6204 6205
}

R
Roman Gushchin 已提交
6206 6207
static int memory_min_show(struct seq_file *m, void *v)
{
6208 6209
	return seq_puts_memcg_tunable(m,
		READ_ONCE(mem_cgroup_from_seq(m)->memory.min));
R
Roman Gushchin 已提交
6210 6211 6212 6213 6214 6215 6216 6217 6218 6219 6220 6221 6222 6223 6224 6225 6226 6227 6228
}

static ssize_t memory_min_write(struct kernfs_open_file *of,
				char *buf, size_t nbytes, loff_t off)
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
	unsigned long min;
	int err;

	buf = strstrip(buf);
	err = page_counter_memparse(buf, "max", &min);
	if (err)
		return err;

	page_counter_set_min(&memcg->memory, min);

	return nbytes;
}

6229 6230
static int memory_low_show(struct seq_file *m, void *v)
{
6231 6232
	return seq_puts_memcg_tunable(m,
		READ_ONCE(mem_cgroup_from_seq(m)->memory.low));
6233 6234 6235 6236 6237 6238 6239 6240 6241 6242
}

static ssize_t memory_low_write(struct kernfs_open_file *of,
				char *buf, size_t nbytes, loff_t off)
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
	unsigned long low;
	int err;

	buf = strstrip(buf);
6243
	err = page_counter_memparse(buf, "max", &low);
6244 6245 6246
	if (err)
		return err;

6247
	page_counter_set_low(&memcg->memory, low);
6248 6249 6250 6251 6252 6253

	return nbytes;
}

static int memory_high_show(struct seq_file *m, void *v)
{
6254 6255
	return seq_puts_memcg_tunable(m,
		READ_ONCE(mem_cgroup_from_seq(m)->memory.high));
6256 6257 6258 6259 6260 6261
}

static ssize_t memory_high_write(struct kernfs_open_file *of,
				 char *buf, size_t nbytes, loff_t off)
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6262
	unsigned int nr_retries = MAX_RECLAIM_RETRIES;
6263
	bool drained = false;
6264 6265 6266 6267
	unsigned long high;
	int err;

	buf = strstrip(buf);
6268
	err = page_counter_memparse(buf, "max", &high);
6269 6270 6271
	if (err)
		return err;

6272 6273
	page_counter_set_high(&memcg->memory, high);

6274 6275 6276 6277 6278 6279 6280 6281 6282 6283 6284 6285 6286 6287 6288 6289 6290 6291 6292 6293 6294 6295
	for (;;) {
		unsigned long nr_pages = page_counter_read(&memcg->memory);
		unsigned long reclaimed;

		if (nr_pages <= high)
			break;

		if (signal_pending(current))
			break;

		if (!drained) {
			drain_all_stock(memcg);
			drained = true;
			continue;
		}

		reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
							 GFP_KERNEL, true);

		if (!reclaimed && !nr_retries--)
			break;
	}
6296

6297
	memcg_wb_domain_size_changed(memcg);
6298 6299 6300 6301 6302
	return nbytes;
}

static int memory_max_show(struct seq_file *m, void *v)
{
6303 6304
	return seq_puts_memcg_tunable(m,
		READ_ONCE(mem_cgroup_from_seq(m)->memory.max));
6305 6306 6307 6308 6309 6310
}

static ssize_t memory_max_write(struct kernfs_open_file *of,
				char *buf, size_t nbytes, loff_t off)
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6311
	unsigned int nr_reclaims = MAX_RECLAIM_RETRIES;
6312
	bool drained = false;
6313 6314 6315 6316
	unsigned long max;
	int err;

	buf = strstrip(buf);
6317
	err = page_counter_memparse(buf, "max", &max);
6318 6319 6320
	if (err)
		return err;

6321
	xchg(&memcg->memory.max, max);
6322 6323 6324 6325 6326 6327 6328

	for (;;) {
		unsigned long nr_pages = page_counter_read(&memcg->memory);

		if (nr_pages <= max)
			break;

6329
		if (signal_pending(current))
6330 6331 6332 6333 6334 6335 6336 6337 6338 6339 6340 6341 6342 6343 6344
			break;

		if (!drained) {
			drain_all_stock(memcg);
			drained = true;
			continue;
		}

		if (nr_reclaims) {
			if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
							  GFP_KERNEL, true))
				nr_reclaims--;
			continue;
		}

6345
		memcg_memory_event(memcg, MEMCG_OOM);
6346 6347 6348
		if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
			break;
	}
6349

6350
	memcg_wb_domain_size_changed(memcg);
6351 6352 6353
	return nbytes;
}

6354 6355 6356 6357 6358 6359 6360 6361 6362 6363
static void __memory_events_show(struct seq_file *m, atomic_long_t *events)
{
	seq_printf(m, "low %lu\n", atomic_long_read(&events[MEMCG_LOW]));
	seq_printf(m, "high %lu\n", atomic_long_read(&events[MEMCG_HIGH]));
	seq_printf(m, "max %lu\n", atomic_long_read(&events[MEMCG_MAX]));
	seq_printf(m, "oom %lu\n", atomic_long_read(&events[MEMCG_OOM]));
	seq_printf(m, "oom_kill %lu\n",
		   atomic_long_read(&events[MEMCG_OOM_KILL]));
}

6364 6365
static int memory_events_show(struct seq_file *m, void *v)
{
6366
	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6367

6368 6369 6370 6371 6372 6373 6374
	__memory_events_show(m, memcg->memory_events);
	return 0;
}

static int memory_events_local_show(struct seq_file *m, void *v)
{
	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6375

6376
	__memory_events_show(m, memcg->memory_events_local);
6377 6378 6379
	return 0;
}

6380 6381
static int memory_stat_show(struct seq_file *m, void *v)
{
6382
	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6383
	char *buf;
6384

6385 6386 6387 6388 6389
	buf = memory_stat_format(memcg);
	if (!buf)
		return -ENOMEM;
	seq_puts(m, buf);
	kfree(buf);
6390 6391 6392
	return 0;
}

6393
#ifdef CONFIG_NUMA
6394 6395 6396 6397 6398 6399
static inline unsigned long lruvec_page_state_output(struct lruvec *lruvec,
						     int item)
{
	return lruvec_page_state(lruvec, item) * memcg_page_state_unit(item);
}

6400 6401 6402 6403 6404 6405 6406 6407 6408 6409 6410 6411 6412 6413 6414 6415 6416
static int memory_numa_stat_show(struct seq_file *m, void *v)
{
	int i;
	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);

	for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
		int nid;

		if (memory_stats[i].idx >= NR_VM_NODE_STAT_ITEMS)
			continue;

		seq_printf(m, "%s", memory_stats[i].name);
		for_each_node_state(nid, N_MEMORY) {
			u64 size;
			struct lruvec *lruvec;

			lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
6417 6418
			size = lruvec_page_state_output(lruvec,
							memory_stats[i].idx);
6419 6420 6421 6422 6423 6424 6425 6426 6427
			seq_printf(m, " N%d=%llu", nid, size);
		}
		seq_putc(m, '\n');
	}

	return 0;
}
#endif

6428 6429
static int memory_oom_group_show(struct seq_file *m, void *v)
{
6430
	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6431 6432 6433 6434 6435 6436 6437 6438 6439 6440 6441 6442 6443 6444 6445 6446 6447 6448 6449 6450 6451 6452 6453 6454 6455 6456 6457 6458

	seq_printf(m, "%d\n", memcg->oom_group);

	return 0;
}

static ssize_t memory_oom_group_write(struct kernfs_open_file *of,
				      char *buf, size_t nbytes, loff_t off)
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
	int ret, oom_group;

	buf = strstrip(buf);
	if (!buf)
		return -EINVAL;

	ret = kstrtoint(buf, 0, &oom_group);
	if (ret)
		return ret;

	if (oom_group != 0 && oom_group != 1)
		return -EINVAL;

	memcg->oom_group = oom_group;

	return nbytes;
}

6459 6460 6461
static struct cftype memory_files[] = {
	{
		.name = "current",
6462
		.flags = CFTYPE_NOT_ON_ROOT,
6463 6464
		.read_u64 = memory_current_read,
	},
R
Roman Gushchin 已提交
6465 6466 6467 6468 6469 6470
	{
		.name = "min",
		.flags = CFTYPE_NOT_ON_ROOT,
		.seq_show = memory_min_show,
		.write = memory_min_write,
	},
6471 6472 6473 6474 6475 6476 6477 6478 6479 6480 6481 6482 6483 6484 6485 6486 6487 6488 6489 6490 6491
	{
		.name = "low",
		.flags = CFTYPE_NOT_ON_ROOT,
		.seq_show = memory_low_show,
		.write = memory_low_write,
	},
	{
		.name = "high",
		.flags = CFTYPE_NOT_ON_ROOT,
		.seq_show = memory_high_show,
		.write = memory_high_write,
	},
	{
		.name = "max",
		.flags = CFTYPE_NOT_ON_ROOT,
		.seq_show = memory_max_show,
		.write = memory_max_write,
	},
	{
		.name = "events",
		.flags = CFTYPE_NOT_ON_ROOT,
6492
		.file_offset = offsetof(struct mem_cgroup, events_file),
6493 6494
		.seq_show = memory_events_show,
	},
6495 6496 6497 6498 6499 6500
	{
		.name = "events.local",
		.flags = CFTYPE_NOT_ON_ROOT,
		.file_offset = offsetof(struct mem_cgroup, events_local_file),
		.seq_show = memory_events_local_show,
	},
6501 6502 6503 6504
	{
		.name = "stat",
		.seq_show = memory_stat_show,
	},
6505 6506 6507 6508 6509 6510
#ifdef CONFIG_NUMA
	{
		.name = "numa_stat",
		.seq_show = memory_numa_stat_show,
	},
#endif
6511 6512 6513 6514 6515 6516
	{
		.name = "oom.group",
		.flags = CFTYPE_NOT_ON_ROOT | CFTYPE_NS_DELEGATABLE,
		.seq_show = memory_oom_group_show,
		.write = memory_oom_group_write,
	},
6517 6518 6519
	{ }	/* terminate */
};

6520
struct cgroup_subsys memory_cgrp_subsys = {
6521
	.css_alloc = mem_cgroup_css_alloc,
6522
	.css_online = mem_cgroup_css_online,
6523
	.css_offline = mem_cgroup_css_offline,
6524
	.css_released = mem_cgroup_css_released,
6525
	.css_free = mem_cgroup_css_free,
6526
	.css_reset = mem_cgroup_css_reset,
6527 6528
	.can_attach = mem_cgroup_can_attach,
	.cancel_attach = mem_cgroup_cancel_attach,
6529
	.post_attach = mem_cgroup_move_task,
6530 6531
	.dfl_cftypes = memory_files,
	.legacy_cftypes = mem_cgroup_legacy_files,
6532
	.early_init = 0,
B
Balbir Singh 已提交
6533
};
6534

6535 6536 6537 6538 6539 6540 6541 6542 6543 6544 6545 6546 6547 6548 6549 6550 6551 6552 6553 6554 6555 6556 6557 6558 6559 6560 6561 6562 6563 6564
/*
 * This function calculates an individual cgroup's effective
 * protection which is derived from its own memory.min/low, its
 * parent's and siblings' settings, as well as the actual memory
 * distribution in the tree.
 *
 * The following rules apply to the effective protection values:
 *
 * 1. At the first level of reclaim, effective protection is equal to
 *    the declared protection in memory.min and memory.low.
 *
 * 2. To enable safe delegation of the protection configuration, at
 *    subsequent levels the effective protection is capped to the
 *    parent's effective protection.
 *
 * 3. To make complex and dynamic subtrees easier to configure, the
 *    user is allowed to overcommit the declared protection at a given
 *    level. If that is the case, the parent's effective protection is
 *    distributed to the children in proportion to how much protection
 *    they have declared and how much of it they are utilizing.
 *
 *    This makes distribution proportional, but also work-conserving:
 *    if one cgroup claims much more protection than it uses memory,
 *    the unused remainder is available to its siblings.
 *
 * 4. Conversely, when the declared protection is undercommitted at a
 *    given level, the distribution of the larger parental protection
 *    budget is NOT proportional. A cgroup's protection from a sibling
 *    is capped to its own memory.min/low setting.
 *
6565 6566 6567 6568 6569 6570 6571 6572 6573 6574 6575 6576
 * 5. However, to allow protecting recursive subtrees from each other
 *    without having to declare each individual cgroup's fixed share
 *    of the ancestor's claim to protection, any unutilized -
 *    "floating" - protection from up the tree is distributed in
 *    proportion to each cgroup's *usage*. This makes the protection
 *    neutral wrt sibling cgroups and lets them compete freely over
 *    the shared parental protection budget, but it protects the
 *    subtree as a whole from neighboring subtrees.
 *
 * Note that 4. and 5. are not in conflict: 4. is about protecting
 * against immediate siblings whereas 5. is about protecting against
 * neighboring subtrees.
6577 6578
 */
static unsigned long effective_protection(unsigned long usage,
6579
					  unsigned long parent_usage,
6580 6581 6582 6583 6584
					  unsigned long setting,
					  unsigned long parent_effective,
					  unsigned long siblings_protected)
{
	unsigned long protected;
6585
	unsigned long ep;
6586 6587 6588 6589 6590 6591 6592 6593 6594 6595 6596 6597 6598 6599 6600 6601 6602 6603 6604 6605 6606 6607 6608 6609 6610 6611 6612 6613 6614 6615

	protected = min(usage, setting);
	/*
	 * If all cgroups at this level combined claim and use more
	 * protection then what the parent affords them, distribute
	 * shares in proportion to utilization.
	 *
	 * We are using actual utilization rather than the statically
	 * claimed protection in order to be work-conserving: claimed
	 * but unused protection is available to siblings that would
	 * otherwise get a smaller chunk than what they claimed.
	 */
	if (siblings_protected > parent_effective)
		return protected * parent_effective / siblings_protected;

	/*
	 * Ok, utilized protection of all children is within what the
	 * parent affords them, so we know whatever this child claims
	 * and utilizes is effectively protected.
	 *
	 * If there is unprotected usage beyond this value, reclaim
	 * will apply pressure in proportion to that amount.
	 *
	 * If there is unutilized protection, the cgroup will be fully
	 * shielded from reclaim, but we do return a smaller value for
	 * protection than what the group could enjoy in theory. This
	 * is okay. With the overcommit distribution above, effective
	 * protection is always dependent on how memory is actually
	 * consumed among the siblings anyway.
	 */
6616 6617 6618 6619 6620 6621 6622 6623 6624 6625 6626 6627 6628
	ep = protected;

	/*
	 * If the children aren't claiming (all of) the protection
	 * afforded to them by the parent, distribute the remainder in
	 * proportion to the (unprotected) memory of each cgroup. That
	 * way, cgroups that aren't explicitly prioritized wrt each
	 * other compete freely over the allowance, but they are
	 * collectively protected from neighboring trees.
	 *
	 * We're using unprotected memory for the weight so that if
	 * some cgroups DO claim explicit protection, we don't protect
	 * the same bytes twice.
6629 6630 6631 6632
	 *
	 * Check both usage and parent_usage against the respective
	 * protected values. One should imply the other, but they
	 * aren't read atomically - make sure the division is sane.
6633 6634 6635
	 */
	if (!(cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT))
		return ep;
6636 6637 6638
	if (parent_effective > siblings_protected &&
	    parent_usage > siblings_protected &&
	    usage > protected) {
6639 6640 6641 6642 6643 6644 6645 6646 6647 6648
		unsigned long unclaimed;

		unclaimed = parent_effective - siblings_protected;
		unclaimed *= usage - protected;
		unclaimed /= parent_usage - siblings_protected;

		ep += unclaimed;
	}

	return ep;
6649 6650
}

6651
/**
R
Roman Gushchin 已提交
6652
 * mem_cgroup_protected - check if memory consumption is in the normal range
6653
 * @root: the top ancestor of the sub-tree being checked
6654 6655
 * @memcg: the memory cgroup to check
 *
6656 6657
 * WARNING: This function is not stateless! It can only be used as part
 *          of a top-down tree iteration, not for isolated queries.
6658
 */
6659 6660
void mem_cgroup_calculate_protection(struct mem_cgroup *root,
				     struct mem_cgroup *memcg)
6661
{
6662
	unsigned long usage, parent_usage;
6663 6664
	struct mem_cgroup *parent;

6665
	if (mem_cgroup_disabled())
6666
		return;
6667

6668 6669
	if (!root)
		root = root_mem_cgroup;
6670 6671 6672 6673 6674 6675 6676 6677

	/*
	 * Effective values of the reclaim targets are ignored so they
	 * can be stale. Have a look at mem_cgroup_protection for more
	 * details.
	 * TODO: calculation should be more robust so that we do not need
	 * that special casing.
	 */
6678
	if (memcg == root)
6679
		return;
6680

6681
	usage = page_counter_read(&memcg->memory);
R
Roman Gushchin 已提交
6682
	if (!usage)
6683
		return;
R
Roman Gushchin 已提交
6684 6685

	parent = parent_mem_cgroup(memcg);
6686 6687
	/* No parent means a non-hierarchical mode on v1 memcg */
	if (!parent)
6688
		return;
6689

6690
	if (parent == root) {
6691
		memcg->memory.emin = READ_ONCE(memcg->memory.min);
6692
		memcg->memory.elow = READ_ONCE(memcg->memory.low);
6693
		return;
R
Roman Gushchin 已提交
6694 6695
	}

6696 6697
	parent_usage = page_counter_read(&parent->memory);

6698
	WRITE_ONCE(memcg->memory.emin, effective_protection(usage, parent_usage,
6699 6700
			READ_ONCE(memcg->memory.min),
			READ_ONCE(parent->memory.emin),
6701
			atomic_long_read(&parent->memory.children_min_usage)));
6702

6703
	WRITE_ONCE(memcg->memory.elow, effective_protection(usage, parent_usage,
6704 6705
			READ_ONCE(memcg->memory.low),
			READ_ONCE(parent->memory.elow),
6706
			atomic_long_read(&parent->memory.children_low_usage)));
6707 6708
}

6709
/**
6710
 * mem_cgroup_charge - charge a newly allocated page to a cgroup
6711 6712 6713 6714 6715 6716 6717
 * @page: page to charge
 * @mm: mm context of the victim
 * @gfp_mask: reclaim mode
 *
 * Try to charge @page to the memcg that @mm belongs to, reclaiming
 * pages according to @gfp_mask if necessary.
 *
6718
 * Returns 0 on success. Otherwise, an error code is returned.
6719
 */
6720
int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask)
6721
{
6722
	unsigned int nr_pages = thp_nr_pages(page);
6723 6724 6725 6726 6727 6728 6729
	struct mem_cgroup *memcg = NULL;
	int ret = 0;

	if (mem_cgroup_disabled())
		goto out;

	if (PageSwapCache(page)) {
6730 6731 6732
		swp_entry_t ent = { .val = page_private(page), };
		unsigned short id;

6733 6734 6735
		/*
		 * Every swap fault against a single page tries to charge the
		 * page, bail as early as possible.  shmem_unuse() encounters
6736 6737 6738
		 * already charged pages, too.  page and memcg binding is
		 * protected by the page lock, which serializes swap cache
		 * removal, which in turn serializes uncharging.
6739
		 */
6740
		VM_BUG_ON_PAGE(!PageLocked(page), page);
6741
		if (page_memcg(compound_head(page)))
6742
			goto out;
6743

6744 6745 6746 6747 6748 6749
		id = lookup_swap_cgroup_id(ent);
		rcu_read_lock();
		memcg = mem_cgroup_from_id(id);
		if (memcg && !css_tryget_online(&memcg->css))
			memcg = NULL;
		rcu_read_unlock();
6750 6751 6752 6753 6754 6755
	}

	if (!memcg)
		memcg = get_mem_cgroup_from_mm(mm);

	ret = try_charge(memcg, gfp_mask, nr_pages);
6756 6757
	if (ret)
		goto out_put;
6758

6759
	css_get(&memcg->css);
6760
	commit_charge(page, memcg);
6761 6762

	local_irq_disable();
6763
	mem_cgroup_charge_statistics(memcg, page, nr_pages);
6764 6765
	memcg_check_events(memcg, page);
	local_irq_enable();
6766

6767
	if (PageSwapCache(page)) {
6768 6769 6770 6771 6772 6773
		swp_entry_t entry = { .val = page_private(page) };
		/*
		 * The swap entry might not get freed for a long time,
		 * let's not wait for it.  The page already received a
		 * memory+swap charge, drop the swap entry duplicate.
		 */
6774
		mem_cgroup_uncharge_swap(entry, nr_pages);
6775 6776
	}

6777 6778 6779 6780
out_put:
	css_put(&memcg->css);
out:
	return ret;
6781 6782
}

6783 6784
struct uncharge_gather {
	struct mem_cgroup *memcg;
6785
	unsigned long nr_pages;
6786 6787 6788 6789 6790 6791
	unsigned long pgpgout;
	unsigned long nr_kmem;
	struct page *dummy_page;
};

static inline void uncharge_gather_clear(struct uncharge_gather *ug)
6792
{
6793 6794 6795 6796 6797
	memset(ug, 0, sizeof(*ug));
}

static void uncharge_batch(const struct uncharge_gather *ug)
{
6798 6799
	unsigned long flags;

6800
	if (!mem_cgroup_is_root(ug->memcg)) {
6801
		page_counter_uncharge(&ug->memcg->memory, ug->nr_pages);
6802
		if (do_memsw_account())
6803
			page_counter_uncharge(&ug->memcg->memsw, ug->nr_pages);
6804 6805 6806
		if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && ug->nr_kmem)
			page_counter_uncharge(&ug->memcg->kmem, ug->nr_kmem);
		memcg_oom_recover(ug->memcg);
6807
	}
6808 6809

	local_irq_save(flags);
6810
	__count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout);
6811
	__this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_pages);
6812
	memcg_check_events(ug->memcg, ug->dummy_page);
6813
	local_irq_restore(flags);
6814 6815 6816

	/* drop reference from uncharge_page */
	css_put(&ug->memcg->css);
6817 6818 6819 6820
}

static void uncharge_page(struct page *page, struct uncharge_gather *ug)
{
6821 6822
	unsigned long nr_pages;

6823 6824
	VM_BUG_ON_PAGE(PageLRU(page), page);

6825
	if (!page_memcg(page))
6826 6827 6828 6829
		return;

	/*
	 * Nobody should be changing or seriously looking at
6830
	 * page_memcg(page) at this point, we have fully
6831 6832 6833
	 * exclusive access to the page.
	 */

6834
	if (ug->memcg != page_memcg(page)) {
6835 6836 6837 6838
		if (ug->memcg) {
			uncharge_batch(ug);
			uncharge_gather_clear(ug);
		}
6839
		ug->memcg = page_memcg(page);
6840 6841 6842

		/* pairs with css_put in uncharge_batch */
		css_get(&ug->memcg->css);
6843 6844
	}

6845 6846
	nr_pages = compound_nr(page);
	ug->nr_pages += nr_pages;
6847

6848
	if (PageMemcgKmem(page))
6849
		ug->nr_kmem += nr_pages;
6850 6851
	else
		ug->pgpgout++;
6852 6853

	ug->dummy_page = page;
6854
	page->memcg_data = 0;
6855
	css_put(&ug->memcg->css);
6856 6857 6858 6859
}

static void uncharge_list(struct list_head *page_list)
{
6860
	struct uncharge_gather ug;
6861
	struct list_head *next;
6862 6863

	uncharge_gather_clear(&ug);
6864

6865 6866 6867 6868
	/*
	 * Note that the list can be a single page->lru; hence the
	 * do-while loop instead of a simple list_for_each_entry().
	 */
6869 6870
	next = page_list->next;
	do {
6871 6872
		struct page *page;

6873 6874 6875
		page = list_entry(next, struct page, lru);
		next = page->lru.next;

6876
		uncharge_page(page, &ug);
6877 6878
	} while (next != page_list);

6879 6880
	if (ug.memcg)
		uncharge_batch(&ug);
6881 6882
}

6883 6884 6885 6886
/**
 * mem_cgroup_uncharge - uncharge a page
 * @page: page to uncharge
 *
6887
 * Uncharge a page previously charged with mem_cgroup_charge().
6888 6889 6890
 */
void mem_cgroup_uncharge(struct page *page)
{
6891 6892
	struct uncharge_gather ug;

6893 6894 6895
	if (mem_cgroup_disabled())
		return;

6896
	/* Don't touch page->lru of any random page, pre-check: */
6897
	if (!page_memcg(page))
6898 6899
		return;

6900 6901 6902
	uncharge_gather_clear(&ug);
	uncharge_page(page, &ug);
	uncharge_batch(&ug);
6903
}
6904

6905 6906 6907 6908 6909
/**
 * mem_cgroup_uncharge_list - uncharge a list of page
 * @page_list: list of pages to uncharge
 *
 * Uncharge a list of pages previously charged with
6910
 * mem_cgroup_charge().
6911 6912 6913 6914 6915
 */
void mem_cgroup_uncharge_list(struct list_head *page_list)
{
	if (mem_cgroup_disabled())
		return;
6916

6917 6918
	if (!list_empty(page_list))
		uncharge_list(page_list);
6919 6920 6921
}

/**
6922 6923 6924
 * mem_cgroup_migrate - charge a page's replacement
 * @oldpage: currently circulating page
 * @newpage: replacement page
6925
 *
6926 6927
 * Charge @newpage as a replacement page for @oldpage. @oldpage will
 * be uncharged upon free.
6928 6929 6930
 *
 * Both pages must be locked, @newpage->mapping must be set up.
 */
6931
void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
6932
{
6933
	struct mem_cgroup *memcg;
6934
	unsigned int nr_pages;
6935
	unsigned long flags;
6936 6937 6938 6939

	VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
	VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
	VM_BUG_ON_PAGE(PageAnon(oldpage) != PageAnon(newpage), newpage);
6940 6941
	VM_BUG_ON_PAGE(PageTransHuge(oldpage) != PageTransHuge(newpage),
		       newpage);
6942 6943 6944 6945 6946

	if (mem_cgroup_disabled())
		return;

	/* Page cache replacement: new page already charged? */
6947
	if (page_memcg(newpage))
6948 6949
		return;

6950
	memcg = page_memcg(oldpage);
6951
	VM_WARN_ON_ONCE_PAGE(!memcg, oldpage);
6952
	if (!memcg)
6953 6954
		return;

6955
	/* Force-charge the new page. The old one will be freed soon */
6956
	nr_pages = thp_nr_pages(newpage);
6957 6958 6959 6960

	page_counter_charge(&memcg->memory, nr_pages);
	if (do_memsw_account())
		page_counter_charge(&memcg->memsw, nr_pages);
6961

6962
	css_get(&memcg->css);
6963
	commit_charge(newpage, memcg);
6964

6965
	local_irq_save(flags);
6966
	mem_cgroup_charge_statistics(memcg, newpage, nr_pages);
6967
	memcg_check_events(memcg, newpage);
6968
	local_irq_restore(flags);
6969 6970
}

6971
DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
6972 6973
EXPORT_SYMBOL(memcg_sockets_enabled_key);

6974
void mem_cgroup_sk_alloc(struct sock *sk)
6975 6976 6977
{
	struct mem_cgroup *memcg;

6978 6979 6980
	if (!mem_cgroup_sockets_enabled)
		return;

6981 6982 6983 6984
	/* Do not associate the sock with unrelated interrupted task's memcg. */
	if (in_interrupt())
		return;

6985 6986
	rcu_read_lock();
	memcg = mem_cgroup_from_task(current);
6987 6988
	if (memcg == root_mem_cgroup)
		goto out;
6989
	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active)
6990
		goto out;
S
Shakeel Butt 已提交
6991
	if (css_tryget(&memcg->css))
6992
		sk->sk_memcg = memcg;
6993
out:
6994 6995 6996
	rcu_read_unlock();
}

6997
void mem_cgroup_sk_free(struct sock *sk)
6998
{
6999 7000
	if (sk->sk_memcg)
		css_put(&sk->sk_memcg->css);
7001 7002 7003 7004 7005 7006 7007 7008 7009 7010 7011 7012
}

/**
 * mem_cgroup_charge_skmem - charge socket memory
 * @memcg: memcg to charge
 * @nr_pages: number of pages to charge
 *
 * Charges @nr_pages to @memcg. Returns %true if the charge fit within
 * @memcg's configured limit, %false if the charge had to be forced.
 */
bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
{
7013
	gfp_t gfp_mask = GFP_KERNEL;
7014

7015
	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
7016
		struct page_counter *fail;
7017

7018 7019
		if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) {
			memcg->tcpmem_pressure = 0;
7020 7021
			return true;
		}
7022 7023
		page_counter_charge(&memcg->tcpmem, nr_pages);
		memcg->tcpmem_pressure = 1;
7024
		return false;
7025
	}
7026

7027 7028 7029 7030
	/* Don't block in the packet receive path */
	if (in_softirq())
		gfp_mask = GFP_NOWAIT;

7031
	mod_memcg_state(memcg, MEMCG_SOCK, nr_pages);
7032

7033 7034 7035 7036
	if (try_charge(memcg, gfp_mask, nr_pages) == 0)
		return true;

	try_charge(memcg, gfp_mask|__GFP_NOFAIL, nr_pages);
7037 7038 7039 7040 7041
	return false;
}

/**
 * mem_cgroup_uncharge_skmem - uncharge socket memory
M
Mike Rapoport 已提交
7042 7043
 * @memcg: memcg to uncharge
 * @nr_pages: number of pages to uncharge
7044 7045 7046
 */
void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
{
7047
	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
7048
		page_counter_uncharge(&memcg->tcpmem, nr_pages);
7049 7050
		return;
	}
7051

7052
	mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages);
7053

7054
	refill_stock(memcg, nr_pages);
7055 7056
}

7057 7058 7059 7060 7061 7062 7063 7064 7065
static int __init cgroup_memory(char *s)
{
	char *token;

	while ((token = strsep(&s, ",")) != NULL) {
		if (!*token)
			continue;
		if (!strcmp(token, "nosocket"))
			cgroup_memory_nosocket = true;
7066 7067
		if (!strcmp(token, "nokmem"))
			cgroup_memory_nokmem = true;
7068 7069 7070 7071
	}
	return 0;
}
__setup("cgroup.memory=", cgroup_memory);
7072

7073
/*
7074 7075
 * subsys_initcall() for memory controller.
 *
7076 7077 7078 7079
 * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this
 * context because of lock dependencies (cgroup_lock -> cpu hotplug) but
 * basically everything that doesn't depend on a specific mem_cgroup structure
 * should be initialized from here.
7080 7081 7082
 */
static int __init mem_cgroup_init(void)
{
7083 7084
	int cpu, node;

7085 7086 7087 7088 7089 7090 7091 7092
	/*
	 * Currently s32 type (can refer to struct batched_lruvec_stat) is
	 * used for per-memcg-per-cpu caching of per-node statistics. In order
	 * to work fine, we should make sure that the overfill threshold can't
	 * exceed S32_MAX / PAGE_SIZE.
	 */
	BUILD_BUG_ON(MEMCG_CHARGE_BATCH > S32_MAX / PAGE_SIZE);

7093 7094
	cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
				  memcg_hotplug_cpu_dead);
7095 7096 7097 7098 7099 7100 7101 7102 7103 7104 7105

	for_each_possible_cpu(cpu)
		INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
			  drain_local_stock);

	for_each_node(node) {
		struct mem_cgroup_tree_per_node *rtpn;

		rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL,
				    node_online(node) ? node : NUMA_NO_NODE);

7106
		rtpn->rb_root = RB_ROOT;
7107
		rtpn->rb_rightmost = NULL;
7108
		spin_lock_init(&rtpn->lock);
7109 7110 7111
		soft_limit_tree.rb_tree_per_node[node] = rtpn;
	}

7112 7113 7114
	return 0;
}
subsys_initcall(mem_cgroup_init);
7115 7116

#ifdef CONFIG_MEMCG_SWAP
7117 7118
static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg)
{
7119
	while (!refcount_inc_not_zero(&memcg->id.ref)) {
7120 7121 7122 7123 7124 7125 7126 7127 7128 7129 7130 7131 7132 7133 7134
		/*
		 * The root cgroup cannot be destroyed, so it's refcount must
		 * always be >= 1.
		 */
		if (WARN_ON_ONCE(memcg == root_mem_cgroup)) {
			VM_BUG_ON(1);
			break;
		}
		memcg = parent_mem_cgroup(memcg);
		if (!memcg)
			memcg = root_mem_cgroup;
	}
	return memcg;
}

7135 7136 7137 7138 7139 7140 7141 7142 7143
/**
 * mem_cgroup_swapout - transfer a memsw charge to swap
 * @page: page whose memsw charge to transfer
 * @entry: swap entry to move the charge to
 *
 * Transfer the memsw charge of @page to @entry.
 */
void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
{
7144
	struct mem_cgroup *memcg, *swap_memcg;
7145
	unsigned int nr_entries;
7146 7147 7148 7149 7150
	unsigned short oldid;

	VM_BUG_ON_PAGE(PageLRU(page), page);
	VM_BUG_ON_PAGE(page_count(page), page);

7151 7152 7153
	if (mem_cgroup_disabled())
		return;

7154
	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
7155 7156
		return;

7157
	memcg = page_memcg(page);
7158

7159
	VM_WARN_ON_ONCE_PAGE(!memcg, page);
7160 7161 7162
	if (!memcg)
		return;

7163 7164 7165 7166 7167 7168
	/*
	 * In case the memcg owning these pages has been offlined and doesn't
	 * have an ID allocated to it anymore, charge the closest online
	 * ancestor for the swap instead and transfer the memory+swap charge.
	 */
	swap_memcg = mem_cgroup_id_get_online(memcg);
7169
	nr_entries = thp_nr_pages(page);
7170 7171 7172 7173 7174
	/* Get references for the tail pages, too */
	if (nr_entries > 1)
		mem_cgroup_id_get_many(swap_memcg, nr_entries - 1);
	oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg),
				   nr_entries);
7175
	VM_BUG_ON_PAGE(oldid, page);
7176
	mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries);
7177

7178
	page->memcg_data = 0;
7179 7180

	if (!mem_cgroup_is_root(memcg))
7181
		page_counter_uncharge(&memcg->memory, nr_entries);
7182

7183
	if (!cgroup_memory_noswap && memcg != swap_memcg) {
7184
		if (!mem_cgroup_is_root(swap_memcg))
7185 7186
			page_counter_charge(&swap_memcg->memsw, nr_entries);
		page_counter_uncharge(&memcg->memsw, nr_entries);
7187 7188
	}

7189 7190
	/*
	 * Interrupts should be disabled here because the caller holds the
M
Matthew Wilcox 已提交
7191
	 * i_pages lock which is taken with interrupts-off. It is
7192
	 * important here to have the interrupts disabled because it is the
M
Matthew Wilcox 已提交
7193
	 * only synchronisation we have for updating the per-CPU variables.
7194 7195
	 */
	VM_BUG_ON(!irqs_disabled());
7196
	mem_cgroup_charge_statistics(memcg, page, -nr_entries);
7197
	memcg_check_events(memcg, page);
7198

7199
	css_put(&memcg->css);
7200 7201
}

7202 7203
/**
 * mem_cgroup_try_charge_swap - try charging swap space for a page
7204 7205 7206
 * @page: page being added to swap
 * @entry: swap entry to charge
 *
7207
 * Try to charge @page's memcg for the swap space at @entry.
7208 7209 7210 7211 7212
 *
 * Returns 0 on success, -ENOMEM on failure.
 */
int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
{
7213
	unsigned int nr_pages = thp_nr_pages(page);
7214
	struct page_counter *counter;
7215
	struct mem_cgroup *memcg;
7216 7217
	unsigned short oldid;

7218 7219 7220
	if (mem_cgroup_disabled())
		return 0;

7221
	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
7222 7223
		return 0;

7224
	memcg = page_memcg(page);
7225

7226
	VM_WARN_ON_ONCE_PAGE(!memcg, page);
7227 7228 7229
	if (!memcg)
		return 0;

7230 7231
	if (!entry.val) {
		memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
7232
		return 0;
7233
	}
7234

7235 7236
	memcg = mem_cgroup_id_get_online(memcg);

7237
	if (!cgroup_memory_noswap && !mem_cgroup_is_root(memcg) &&
7238
	    !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) {
7239 7240
		memcg_memory_event(memcg, MEMCG_SWAP_MAX);
		memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
7241
		mem_cgroup_id_put(memcg);
7242
		return -ENOMEM;
7243
	}
7244

7245 7246 7247 7248
	/* Get references for the tail pages, too */
	if (nr_pages > 1)
		mem_cgroup_id_get_many(memcg, nr_pages - 1);
	oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages);
7249
	VM_BUG_ON_PAGE(oldid, page);
7250
	mod_memcg_state(memcg, MEMCG_SWAP, nr_pages);
7251 7252 7253 7254

	return 0;
}

7255
/**
7256
 * mem_cgroup_uncharge_swap - uncharge swap space
7257
 * @entry: swap entry to uncharge
7258
 * @nr_pages: the amount of swap space to uncharge
7259
 */
7260
void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
7261 7262 7263 7264
{
	struct mem_cgroup *memcg;
	unsigned short id;

7265
	id = swap_cgroup_record(entry, 0, nr_pages);
7266
	rcu_read_lock();
7267
	memcg = mem_cgroup_from_id(id);
7268
	if (memcg) {
7269
		if (!cgroup_memory_noswap && !mem_cgroup_is_root(memcg)) {
7270
			if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
7271
				page_counter_uncharge(&memcg->swap, nr_pages);
7272
			else
7273
				page_counter_uncharge(&memcg->memsw, nr_pages);
7274
		}
7275
		mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages);
7276
		mem_cgroup_id_put_many(memcg, nr_pages);
7277 7278 7279 7280
	}
	rcu_read_unlock();
}

7281 7282 7283 7284
long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
{
	long nr_swap_pages = get_nr_swap_pages();

7285
	if (cgroup_memory_noswap || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
7286 7287 7288
		return nr_swap_pages;
	for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
		nr_swap_pages = min_t(long, nr_swap_pages,
7289
				      READ_ONCE(memcg->swap.max) -
7290 7291 7292 7293
				      page_counter_read(&memcg->swap));
	return nr_swap_pages;
}

7294 7295 7296 7297 7298 7299 7300 7301
bool mem_cgroup_swap_full(struct page *page)
{
	struct mem_cgroup *memcg;

	VM_BUG_ON_PAGE(!PageLocked(page), page);

	if (vm_swap_full())
		return true;
7302
	if (cgroup_memory_noswap || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
7303 7304
		return false;

7305
	memcg = page_memcg(page);
7306 7307 7308
	if (!memcg)
		return false;

7309 7310 7311 7312 7313
	for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) {
		unsigned long usage = page_counter_read(&memcg->swap);

		if (usage * 2 >= READ_ONCE(memcg->swap.high) ||
		    usage * 2 >= READ_ONCE(memcg->swap.max))
7314
			return true;
7315
	}
7316 7317 7318 7319

	return false;
}

7320
static int __init setup_swap_account(char *s)
7321 7322
{
	if (!strcmp(s, "1"))
7323
		cgroup_memory_noswap = false;
7324
	else if (!strcmp(s, "0"))
7325
		cgroup_memory_noswap = true;
7326 7327
	return 1;
}
7328
__setup("swapaccount=", setup_swap_account);
7329

7330 7331 7332 7333 7334 7335 7336 7337
static u64 swap_current_read(struct cgroup_subsys_state *css,
			     struct cftype *cft)
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);

	return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE;
}

7338 7339 7340 7341 7342 7343 7344 7345 7346 7347 7348 7349 7350 7351 7352 7353 7354 7355 7356 7357 7358 7359 7360
static int swap_high_show(struct seq_file *m, void *v)
{
	return seq_puts_memcg_tunable(m,
		READ_ONCE(mem_cgroup_from_seq(m)->swap.high));
}

static ssize_t swap_high_write(struct kernfs_open_file *of,
			       char *buf, size_t nbytes, loff_t off)
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
	unsigned long high;
	int err;

	buf = strstrip(buf);
	err = page_counter_memparse(buf, "max", &high);
	if (err)
		return err;

	page_counter_set_high(&memcg->swap, high);

	return nbytes;
}

7361 7362
static int swap_max_show(struct seq_file *m, void *v)
{
7363 7364
	return seq_puts_memcg_tunable(m,
		READ_ONCE(mem_cgroup_from_seq(m)->swap.max));
7365 7366 7367 7368 7369 7370 7371 7372 7373 7374 7375 7376 7377 7378
}

static ssize_t swap_max_write(struct kernfs_open_file *of,
			      char *buf, size_t nbytes, loff_t off)
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
	unsigned long max;
	int err;

	buf = strstrip(buf);
	err = page_counter_memparse(buf, "max", &max);
	if (err)
		return err;

7379
	xchg(&memcg->swap.max, max);
7380 7381 7382 7383

	return nbytes;
}

7384 7385
static int swap_events_show(struct seq_file *m, void *v)
{
7386
	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
7387

7388 7389
	seq_printf(m, "high %lu\n",
		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_HIGH]));
7390 7391 7392 7393 7394 7395 7396 7397
	seq_printf(m, "max %lu\n",
		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX]));
	seq_printf(m, "fail %lu\n",
		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL]));

	return 0;
}

7398 7399 7400 7401 7402 7403
static struct cftype swap_files[] = {
	{
		.name = "swap.current",
		.flags = CFTYPE_NOT_ON_ROOT,
		.read_u64 = swap_current_read,
	},
7404 7405 7406 7407 7408 7409
	{
		.name = "swap.high",
		.flags = CFTYPE_NOT_ON_ROOT,
		.seq_show = swap_high_show,
		.write = swap_high_write,
	},
7410 7411 7412 7413 7414 7415
	{
		.name = "swap.max",
		.flags = CFTYPE_NOT_ON_ROOT,
		.seq_show = swap_max_show,
		.write = swap_max_write,
	},
7416 7417 7418 7419 7420 7421
	{
		.name = "swap.events",
		.flags = CFTYPE_NOT_ON_ROOT,
		.file_offset = offsetof(struct mem_cgroup, swap_events_file),
		.seq_show = swap_events_show,
	},
7422 7423 7424
	{ }	/* terminate */
};

7425
static struct cftype memsw_files[] = {
7426 7427 7428 7429 7430 7431 7432 7433 7434 7435 7436 7437 7438 7439 7440 7441 7442 7443 7444 7445 7446 7447 7448 7449 7450 7451
	{
		.name = "memsw.usage_in_bytes",
		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
		.read_u64 = mem_cgroup_read_u64,
	},
	{
		.name = "memsw.max_usage_in_bytes",
		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
		.write = mem_cgroup_reset,
		.read_u64 = mem_cgroup_read_u64,
	},
	{
		.name = "memsw.limit_in_bytes",
		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
		.write = mem_cgroup_write,
		.read_u64 = mem_cgroup_read_u64,
	},
	{
		.name = "memsw.failcnt",
		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
		.write = mem_cgroup_reset,
		.read_u64 = mem_cgroup_read_u64,
	},
	{ },	/* terminate */
};

7452 7453 7454 7455 7456 7457 7458
/*
 * If mem_cgroup_swap_init() is implemented as a subsys_initcall()
 * instead of a core_initcall(), this could mean cgroup_memory_noswap still
 * remains set to false even when memcg is disabled via "cgroup_disable=memory"
 * boot parameter. This may result in premature OOPS inside
 * mem_cgroup_get_nr_swap_pages() function in corner cases.
 */
7459 7460
static int __init mem_cgroup_swap_init(void)
{
7461 7462 7463 7464 7465
	/* No memory control -> no swap control */
	if (mem_cgroup_disabled())
		cgroup_memory_noswap = true;

	if (cgroup_memory_noswap)
7466 7467 7468 7469 7470
		return 0;

	WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, swap_files));
	WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, memsw_files));

7471 7472
	return 0;
}
7473
core_initcall(mem_cgroup_swap_init);
7474 7475

#endif /* CONFIG_MEMCG_SWAP */