memcontrol.c 192.0 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-or-later
B
Balbir Singh 已提交
2 3 4 5 6
/* memcontrol.c - Memory Controller
 *
 * Copyright IBM Corporation, 2007
 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
 *
7 8 9
 * Copyright 2007 OpenVZ SWsoft Inc
 * Author: Pavel Emelianov <xemul@openvz.org>
 *
10 11 12 13
 * Memory thresholds
 * Copyright (C) 2009 Nokia Corporation
 * Author: Kirill A. Shutemov
 *
14 15 16 17
 * Kernel Memory Controller
 * Copyright (C) 2012 Parallels Inc. and Google Inc.
 * Authors: Glauber Costa and Suleiman Souhlal
 *
18 19 20 21 22
 * Native page reclaim
 * Charge lifetime sanitation
 * Lockless page tracking & accounting
 * Unified hierarchy configuration model
 * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner
B
Balbir Singh 已提交
23 24
 */

25
#include <linux/page_counter.h>
B
Balbir Singh 已提交
26 27
#include <linux/memcontrol.h>
#include <linux/cgroup.h>
28
#include <linux/pagewalk.h>
29
#include <linux/sched/mm.h>
30
#include <linux/shmem_fs.h>
31
#include <linux/hugetlb.h>
K
KAMEZAWA Hiroyuki 已提交
32
#include <linux/pagemap.h>
33
#include <linux/vm_event_item.h>
34
#include <linux/smp.h>
35
#include <linux/page-flags.h>
36
#include <linux/backing-dev.h>
37 38
#include <linux/bit_spinlock.h>
#include <linux/rcupdate.h>
39
#include <linux/limits.h>
40
#include <linux/export.h>
41
#include <linux/mutex.h>
42
#include <linux/rbtree.h>
43
#include <linux/slab.h>
44
#include <linux/swap.h>
45
#include <linux/swapops.h>
46
#include <linux/spinlock.h>
47
#include <linux/eventfd.h>
48
#include <linux/poll.h>
49
#include <linux/sort.h>
50
#include <linux/fs.h>
51
#include <linux/seq_file.h>
52
#include <linux/vmpressure.h>
53
#include <linux/mm_inline.h>
54
#include <linux/swap_cgroup.h>
55
#include <linux/cpu.h>
56
#include <linux/oom.h>
57
#include <linux/lockdep.h>
58
#include <linux/file.h>
59
#include <linux/tracehook.h>
60
#include <linux/psi.h>
61
#include <linux/seq_buf.h>
K
KAMEZAWA Hiroyuki 已提交
62
#include "internal.h"
G
Glauber Costa 已提交
63
#include <net/sock.h>
M
Michal Hocko 已提交
64
#include <net/ip.h>
65
#include "slab.h"
B
Balbir Singh 已提交
66

67
#include <linux/uaccess.h>
68

69 70
#include <trace/events/vmscan.h>

71 72
struct cgroup_subsys memory_cgrp_subsys __read_mostly;
EXPORT_SYMBOL(memory_cgrp_subsys);
73

74 75
struct mem_cgroup *root_mem_cgroup __read_mostly;

76 77 78
/* Active memory cgroup to use from an interrupt context */
DEFINE_PER_CPU(struct mem_cgroup *, int_active_memcg);

79 80 81
/* Socket memory accounting disabled? */
static bool cgroup_memory_nosocket;

82 83 84
/* Kernel memory accounting disabled? */
static bool cgroup_memory_nokmem;

85
/* Whether the swap controller is active */
A
Andrew Morton 已提交
86
#ifdef CONFIG_MEMCG_SWAP
87
bool cgroup_memory_noswap __read_mostly;
88
#else
89
#define cgroup_memory_noswap		1
90
#endif
91

92 93 94 95
#ifdef CONFIG_CGROUP_WRITEBACK
static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq);
#endif

96 97 98
/* Whether legacy memory+swap accounting is active */
static bool do_memsw_account(void)
{
99
	return !cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_noswap;
100 101
}

102 103
#define THRESHOLDS_EVENTS_TARGET 128
#define SOFTLIMIT_EVENTS_TARGET 1024
104

105 106 107 108 109
/*
 * Cgroups above their limits are maintained in a RB-Tree, independent of
 * their hierarchy representation
 */

110
struct mem_cgroup_tree_per_node {
111
	struct rb_root rb_root;
112
	struct rb_node *rb_rightmost;
113 114 115 116 117 118 119 120 121
	spinlock_t lock;
};

struct mem_cgroup_tree {
	struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
};

static struct mem_cgroup_tree soft_limit_tree __read_mostly;

K
KAMEZAWA Hiroyuki 已提交
122 123 124 125 126
/* for OOM */
struct mem_cgroup_eventfd_list {
	struct list_head list;
	struct eventfd_ctx *eventfd;
};
127

128 129 130
/*
 * cgroup_event represents events which userspace want to receive.
 */
131
struct mem_cgroup_event {
132
	/*
133
	 * memcg which the event belongs to.
134
	 */
135
	struct mem_cgroup *memcg;
136 137 138 139 140 141 142 143
	/*
	 * eventfd to signal userspace about the event.
	 */
	struct eventfd_ctx *eventfd;
	/*
	 * Each of these stored in a list by the cgroup.
	 */
	struct list_head list;
144 145 146 147 148
	/*
	 * register_event() callback will be used to add new userspace
	 * waiter for changes related to this event.  Use eventfd_signal()
	 * on eventfd to send notification to userspace.
	 */
149
	int (*register_event)(struct mem_cgroup *memcg,
T
Tejun Heo 已提交
150
			      struct eventfd_ctx *eventfd, const char *args);
151 152 153 154 155
	/*
	 * unregister_event() callback will be called when userspace closes
	 * the eventfd or on cgroup removing.  This callback must be set,
	 * if you want provide notification functionality.
	 */
156
	void (*unregister_event)(struct mem_cgroup *memcg,
157
				 struct eventfd_ctx *eventfd);
158 159 160 161 162 163
	/*
	 * All fields below needed to unregister event when
	 * userspace closes eventfd.
	 */
	poll_table pt;
	wait_queue_head_t *wqh;
164
	wait_queue_entry_t wait;
165 166 167
	struct work_struct remove;
};

168 169
static void mem_cgroup_threshold(struct mem_cgroup *memcg);
static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
170

171 172
/* Stuffs for move charges at task migration. */
/*
173
 * Types of charges to be moved.
174
 */
175 176 177
#define MOVE_ANON	0x1U
#define MOVE_FILE	0x2U
#define MOVE_MASK	(MOVE_ANON | MOVE_FILE)
178

179 180
/* "mc" and its members are protected by cgroup_mutex */
static struct move_charge_struct {
181
	spinlock_t	  lock; /* for from, to */
182
	struct mm_struct  *mm;
183 184
	struct mem_cgroup *from;
	struct mem_cgroup *to;
185
	unsigned long flags;
186
	unsigned long precharge;
187
	unsigned long moved_charge;
188
	unsigned long moved_swap;
189 190 191
	struct task_struct *moving_task;	/* a task moving charges */
	wait_queue_head_t waitq;		/* a waitq for other context */
} mc = {
192
	.lock = __SPIN_LOCK_UNLOCKED(mc.lock),
193 194
	.waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
};
195

196 197 198 199
/*
 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
 * limit reclaim to prevent infinite loops, if they ever occur.
 */
200
#define	MEM_CGROUP_MAX_RECLAIM_LOOPS		100
201
#define	MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS	2
202

203
/* for encoding cft->private value on file */
G
Glauber Costa 已提交
204 205 206 207
enum res_type {
	_MEM,
	_MEMSWAP,
	_OOM_TYPE,
208
	_KMEM,
V
Vladimir Davydov 已提交
209
	_TCP,
G
Glauber Costa 已提交
210 211
};

212 213
#define MEMFILE_PRIVATE(x, val)	((x) << 16 | (val))
#define MEMFILE_TYPE(val)	((val) >> 16 & 0xffff)
214
#define MEMFILE_ATTR(val)	((val) & 0xffff)
K
KAMEZAWA Hiroyuki 已提交
215 216
/* Used for OOM nofiier */
#define OOM_CONTROL		(0)
217

218 219 220 221 222 223 224 225 226 227 228 229 230 231 232
/*
 * Iteration constructs for visiting all cgroups (under a tree).  If
 * loops are exited prematurely (break), mem_cgroup_iter_break() must
 * be used for reference counting.
 */
#define for_each_mem_cgroup_tree(iter, root)		\
	for (iter = mem_cgroup_iter(root, NULL, NULL);	\
	     iter != NULL;				\
	     iter = mem_cgroup_iter(root, iter, NULL))

#define for_each_mem_cgroup(iter)			\
	for (iter = mem_cgroup_iter(NULL, NULL, NULL);	\
	     iter != NULL;				\
	     iter = mem_cgroup_iter(NULL, iter, NULL))

233 234 235 236 237 238
static inline bool should_force_charge(void)
{
	return tsk_is_oom_victim(current) || fatal_signal_pending(current) ||
		(current->flags & PF_EXITING);
}

239 240 241 242 243 244 245 246 247 248 249 250 251
/* Some nice accessors for the vmpressure. */
struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
{
	if (!memcg)
		memcg = root_mem_cgroup;
	return &memcg->vmpressure;
}

struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr)
{
	return &container_of(vmpr, struct mem_cgroup, vmpressure)->css;
}

252
#ifdef CONFIG_MEMCG_KMEM
R
Roman Gushchin 已提交
253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344
extern spinlock_t css_set_lock;

static void obj_cgroup_release(struct percpu_ref *ref)
{
	struct obj_cgroup *objcg = container_of(ref, struct obj_cgroup, refcnt);
	struct mem_cgroup *memcg;
	unsigned int nr_bytes;
	unsigned int nr_pages;
	unsigned long flags;

	/*
	 * At this point all allocated objects are freed, and
	 * objcg->nr_charged_bytes can't have an arbitrary byte value.
	 * However, it can be PAGE_SIZE or (x * PAGE_SIZE).
	 *
	 * The following sequence can lead to it:
	 * 1) CPU0: objcg == stock->cached_objcg
	 * 2) CPU1: we do a small allocation (e.g. 92 bytes),
	 *          PAGE_SIZE bytes are charged
	 * 3) CPU1: a process from another memcg is allocating something,
	 *          the stock if flushed,
	 *          objcg->nr_charged_bytes = PAGE_SIZE - 92
	 * 5) CPU0: we do release this object,
	 *          92 bytes are added to stock->nr_bytes
	 * 6) CPU0: stock is flushed,
	 *          92 bytes are added to objcg->nr_charged_bytes
	 *
	 * In the result, nr_charged_bytes == PAGE_SIZE.
	 * This page will be uncharged in obj_cgroup_release().
	 */
	nr_bytes = atomic_read(&objcg->nr_charged_bytes);
	WARN_ON_ONCE(nr_bytes & (PAGE_SIZE - 1));
	nr_pages = nr_bytes >> PAGE_SHIFT;

	spin_lock_irqsave(&css_set_lock, flags);
	memcg = obj_cgroup_memcg(objcg);
	if (nr_pages)
		__memcg_kmem_uncharge(memcg, nr_pages);
	list_del(&objcg->list);
	mem_cgroup_put(memcg);
	spin_unlock_irqrestore(&css_set_lock, flags);

	percpu_ref_exit(ref);
	kfree_rcu(objcg, rcu);
}

static struct obj_cgroup *obj_cgroup_alloc(void)
{
	struct obj_cgroup *objcg;
	int ret;

	objcg = kzalloc(sizeof(struct obj_cgroup), GFP_KERNEL);
	if (!objcg)
		return NULL;

	ret = percpu_ref_init(&objcg->refcnt, obj_cgroup_release, 0,
			      GFP_KERNEL);
	if (ret) {
		kfree(objcg);
		return NULL;
	}
	INIT_LIST_HEAD(&objcg->list);
	return objcg;
}

static void memcg_reparent_objcgs(struct mem_cgroup *memcg,
				  struct mem_cgroup *parent)
{
	struct obj_cgroup *objcg, *iter;

	objcg = rcu_replace_pointer(memcg->objcg, NULL, true);

	spin_lock_irq(&css_set_lock);

	/* Move active objcg to the parent's list */
	xchg(&objcg->memcg, parent);
	css_get(&parent->css);
	list_add(&objcg->list, &parent->objcg_list);

	/* Move already reparented objcgs to the parent's list */
	list_for_each_entry(iter, &memcg->objcg_list, list) {
		css_get(&parent->css);
		xchg(&iter->memcg, parent);
		css_put(&memcg->css);
	}
	list_splice(&memcg->objcg_list, &parent->objcg_list);

	spin_unlock_irq(&css_set_lock);

	percpu_ref_kill(&objcg->refcnt);
}

345
/*
346
 * This will be used as a shrinker list's index.
L
Li Zefan 已提交
347 348 349 350 351
 * The main reason for not using cgroup id for this:
 *  this works better in sparse environments, where we have a lot of memcgs,
 *  but only a few kmem-limited. Or also, if we have, for instance, 200
 *  memcgs, and none but the 200th is kmem-limited, we'd have to have a
 *  200 entry array for that.
352
 *
353 354
 * The current size of the caches array is stored in memcg_nr_cache_ids. It
 * will double each time we have to increase it.
355
 */
356 357
static DEFINE_IDA(memcg_cache_ida);
int memcg_nr_cache_ids;
358

359 360 361 362 363 364 365 366 367 368 369 370 371
/* Protects memcg_nr_cache_ids */
static DECLARE_RWSEM(memcg_cache_ids_sem);

void memcg_get_cache_ids(void)
{
	down_read(&memcg_cache_ids_sem);
}

void memcg_put_cache_ids(void)
{
	up_read(&memcg_cache_ids_sem);
}

372 373 374 375 376 377
/*
 * MIN_SIZE is different than 1, because we would like to avoid going through
 * the alloc/free process all the time. In a small machine, 4 kmem-limited
 * cgroups is a reasonable guess. In the future, it could be a parameter or
 * tunable, but that is strictly not necessary.
 *
L
Li Zefan 已提交
378
 * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get
379 380
 * this constant directly from cgroup, but it is understandable that this is
 * better kept as an internal representation in cgroup.c. In any case, the
L
Li Zefan 已提交
381
 * cgrp_id space is not getting any smaller, and we don't have to necessarily
382 383 384
 * increase ours as well if it increases.
 */
#define MEMCG_CACHES_MIN_SIZE 4
L
Li Zefan 已提交
385
#define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX
386

387 388
/*
 * A lot of the calls to the cache allocation functions are expected to be
389
 * inlined by the compiler. Since the calls to memcg_slab_pre_alloc_hook() are
390 391 392
 * conditional to this static branch, we'll have to allow modules that does
 * kmem_cache_alloc and the such to see this symbol as well
 */
393
DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key);
394
EXPORT_SYMBOL(memcg_kmem_enabled_key);
395
#endif
396

397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419
static int memcg_shrinker_map_size;
static DEFINE_MUTEX(memcg_shrinker_map_mutex);

static void memcg_free_shrinker_map_rcu(struct rcu_head *head)
{
	kvfree(container_of(head, struct memcg_shrinker_map, rcu));
}

static int memcg_expand_one_shrinker_map(struct mem_cgroup *memcg,
					 int size, int old_size)
{
	struct memcg_shrinker_map *new, *old;
	int nid;

	lockdep_assert_held(&memcg_shrinker_map_mutex);

	for_each_node(nid) {
		old = rcu_dereference_protected(
			mem_cgroup_nodeinfo(memcg, nid)->shrinker_map, true);
		/* Not yet online memcg */
		if (!old)
			return 0;

420
		new = kvmalloc_node(sizeof(*new) + size, GFP_KERNEL, nid);
421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463
		if (!new)
			return -ENOMEM;

		/* Set all old bits, clear all new bits */
		memset(new->map, (int)0xff, old_size);
		memset((void *)new->map + old_size, 0, size - old_size);

		rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, new);
		call_rcu(&old->rcu, memcg_free_shrinker_map_rcu);
	}

	return 0;
}

static void memcg_free_shrinker_maps(struct mem_cgroup *memcg)
{
	struct mem_cgroup_per_node *pn;
	struct memcg_shrinker_map *map;
	int nid;

	if (mem_cgroup_is_root(memcg))
		return;

	for_each_node(nid) {
		pn = mem_cgroup_nodeinfo(memcg, nid);
		map = rcu_dereference_protected(pn->shrinker_map, true);
		if (map)
			kvfree(map);
		rcu_assign_pointer(pn->shrinker_map, NULL);
	}
}

static int memcg_alloc_shrinker_maps(struct mem_cgroup *memcg)
{
	struct memcg_shrinker_map *map;
	int nid, size, ret = 0;

	if (mem_cgroup_is_root(memcg))
		return 0;

	mutex_lock(&memcg_shrinker_map_mutex);
	size = memcg_shrinker_map_size;
	for_each_node(nid) {
464
		map = kvzalloc_node(sizeof(*map) + size, GFP_KERNEL, nid);
465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494
		if (!map) {
			memcg_free_shrinker_maps(memcg);
			ret = -ENOMEM;
			break;
		}
		rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, map);
	}
	mutex_unlock(&memcg_shrinker_map_mutex);

	return ret;
}

int memcg_expand_shrinker_maps(int new_id)
{
	int size, old_size, ret = 0;
	struct mem_cgroup *memcg;

	size = DIV_ROUND_UP(new_id + 1, BITS_PER_LONG) * sizeof(unsigned long);
	old_size = memcg_shrinker_map_size;
	if (size <= old_size)
		return 0;

	mutex_lock(&memcg_shrinker_map_mutex);
	if (!root_mem_cgroup)
		goto unlock;

	for_each_mem_cgroup(memcg) {
		if (mem_cgroup_is_root(memcg))
			continue;
		ret = memcg_expand_one_shrinker_map(memcg, size, old_size);
495 496
		if (ret) {
			mem_cgroup_iter_break(NULL, memcg);
497
			goto unlock;
498
		}
499 500 501 502 503 504 505
	}
unlock:
	if (!ret)
		memcg_shrinker_map_size = size;
	mutex_unlock(&memcg_shrinker_map_mutex);
	return ret;
}
506 507 508 509 510 511 512 513

void memcg_set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id)
{
	if (shrinker_id >= 0 && memcg && !mem_cgroup_is_root(memcg)) {
		struct memcg_shrinker_map *map;

		rcu_read_lock();
		map = rcu_dereference(memcg->nodeinfo[nid]->shrinker_map);
514 515
		/* Pairs with smp mb in shrink_slab() */
		smp_mb__before_atomic();
516 517 518 519 520
		set_bit(shrinker_id, map->map);
		rcu_read_unlock();
	}
}

521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537
/**
 * mem_cgroup_css_from_page - css of the memcg associated with a page
 * @page: page of interest
 *
 * If memcg is bound to the default hierarchy, css of the memcg associated
 * with @page is returned.  The returned css remains associated with @page
 * until it is released.
 *
 * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup
 * is returned.
 */
struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page)
{
	struct mem_cgroup *memcg;

	memcg = page->mem_cgroup;

538
	if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
539 540 541 542 543
		memcg = root_mem_cgroup;

	return &memcg->css;
}

544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562
/**
 * page_cgroup_ino - return inode number of the memcg a page is charged to
 * @page: the page
 *
 * Look up the closest online ancestor of the memory cgroup @page is charged to
 * and return its inode number or 0 if @page is not charged to any cgroup. It
 * is safe to call this function without holding a reference to @page.
 *
 * Note, this function is inherently racy, because there is nothing to prevent
 * the cgroup inode from getting torn down and potentially reallocated a moment
 * after page_cgroup_ino() returns, so it only should be used by callers that
 * do not care (such as procfs interfaces).
 */
ino_t page_cgroup_ino(struct page *page)
{
	struct mem_cgroup *memcg;
	unsigned long ino = 0;

	rcu_read_lock();
563
	memcg = page->mem_cgroup;
564

565 566 567 568 569 570 571 572
	/*
	 * The lowest bit set means that memcg isn't a valid
	 * memcg pointer, but a obj_cgroups pointer.
	 * In this case the page is shared and doesn't belong
	 * to any specific memory cgroup.
	 */
	if ((unsigned long) memcg & 0x1UL)
		memcg = NULL;
573

574 575 576 577 578 579 580 581
	while (memcg && !(memcg->css.flags & CSS_ONLINE))
		memcg = parent_mem_cgroup(memcg);
	if (memcg)
		ino = cgroup_ino(memcg->css.cgroup);
	rcu_read_unlock();
	return ino;
}

582 583
static struct mem_cgroup_per_node *
mem_cgroup_page_nodeinfo(struct mem_cgroup *memcg, struct page *page)
584
{
585
	int nid = page_to_nid(page);
586

587
	return memcg->nodeinfo[nid];
588 589
}

590 591
static struct mem_cgroup_tree_per_node *
soft_limit_tree_node(int nid)
592
{
593
	return soft_limit_tree.rb_tree_per_node[nid];
594 595
}

596
static struct mem_cgroup_tree_per_node *
597 598 599 600
soft_limit_tree_from_page(struct page *page)
{
	int nid = page_to_nid(page);

601
	return soft_limit_tree.rb_tree_per_node[nid];
602 603
}

604 605
static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_node *mz,
					 struct mem_cgroup_tree_per_node *mctz,
606
					 unsigned long new_usage_in_excess)
607 608 609
{
	struct rb_node **p = &mctz->rb_root.rb_node;
	struct rb_node *parent = NULL;
610
	struct mem_cgroup_per_node *mz_node;
611
	bool rightmost = true;
612 613 614 615 616 617 618 619 620

	if (mz->on_tree)
		return;

	mz->usage_in_excess = new_usage_in_excess;
	if (!mz->usage_in_excess)
		return;
	while (*p) {
		parent = *p;
621
		mz_node = rb_entry(parent, struct mem_cgroup_per_node,
622
					tree_node);
623
		if (mz->usage_in_excess < mz_node->usage_in_excess) {
624
			p = &(*p)->rb_left;
625 626 627
			rightmost = false;
		}

628 629 630 631 632 633 634
		/*
		 * We can't avoid mem cgroups that are over their soft
		 * limit by the same amount
		 */
		else if (mz->usage_in_excess >= mz_node->usage_in_excess)
			p = &(*p)->rb_right;
	}
635 636 637 638

	if (rightmost)
		mctz->rb_rightmost = &mz->tree_node;

639 640 641 642 643
	rb_link_node(&mz->tree_node, parent, p);
	rb_insert_color(&mz->tree_node, &mctz->rb_root);
	mz->on_tree = true;
}

644 645
static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
					 struct mem_cgroup_tree_per_node *mctz)
646 647 648
{
	if (!mz->on_tree)
		return;
649 650 651 652

	if (&mz->tree_node == mctz->rb_rightmost)
		mctz->rb_rightmost = rb_prev(&mz->tree_node);

653 654 655 656
	rb_erase(&mz->tree_node, &mctz->rb_root);
	mz->on_tree = false;
}

657 658
static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
				       struct mem_cgroup_tree_per_node *mctz)
659
{
660 661 662
	unsigned long flags;

	spin_lock_irqsave(&mctz->lock, flags);
663
	__mem_cgroup_remove_exceeded(mz, mctz);
664
	spin_unlock_irqrestore(&mctz->lock, flags);
665 666
}

667 668 669
static unsigned long soft_limit_excess(struct mem_cgroup *memcg)
{
	unsigned long nr_pages = page_counter_read(&memcg->memory);
670
	unsigned long soft_limit = READ_ONCE(memcg->soft_limit);
671 672 673 674 675 676 677
	unsigned long excess = 0;

	if (nr_pages > soft_limit)
		excess = nr_pages - soft_limit;

	return excess;
}
678 679 680

static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
{
681
	unsigned long excess;
682 683
	struct mem_cgroup_per_node *mz;
	struct mem_cgroup_tree_per_node *mctz;
684

685
	mctz = soft_limit_tree_from_page(page);
686 687
	if (!mctz)
		return;
688 689 690 691 692
	/*
	 * Necessary to update all ancestors when hierarchy is used.
	 * because their event counter is not touched.
	 */
	for (; memcg; memcg = parent_mem_cgroup(memcg)) {
693
		mz = mem_cgroup_page_nodeinfo(memcg, page);
694
		excess = soft_limit_excess(memcg);
695 696 697 698 699
		/*
		 * We have to update the tree if mz is on RB-tree or
		 * mem is over its softlimit.
		 */
		if (excess || mz->on_tree) {
700 701 702
			unsigned long flags;

			spin_lock_irqsave(&mctz->lock, flags);
703 704
			/* if on-tree, remove it */
			if (mz->on_tree)
705
				__mem_cgroup_remove_exceeded(mz, mctz);
706 707 708 709
			/*
			 * Insert again. mz->usage_in_excess will be updated.
			 * If excess is 0, no tree ops.
			 */
710
			__mem_cgroup_insert_exceeded(mz, mctz, excess);
711
			spin_unlock_irqrestore(&mctz->lock, flags);
712 713 714 715 716 717
		}
	}
}

static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
{
718 719 720
	struct mem_cgroup_tree_per_node *mctz;
	struct mem_cgroup_per_node *mz;
	int nid;
721

722
	for_each_node(nid) {
723 724
		mz = mem_cgroup_nodeinfo(memcg, nid);
		mctz = soft_limit_tree_node(nid);
725 726
		if (mctz)
			mem_cgroup_remove_exceeded(mz, mctz);
727 728 729
	}
}

730 731
static struct mem_cgroup_per_node *
__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
732
{
733
	struct mem_cgroup_per_node *mz;
734 735 736

retry:
	mz = NULL;
737
	if (!mctz->rb_rightmost)
738 739
		goto done;		/* Nothing to reclaim from */

740 741
	mz = rb_entry(mctz->rb_rightmost,
		      struct mem_cgroup_per_node, tree_node);
742 743 744 745 746
	/*
	 * Remove the node now but someone else can add it back,
	 * we will to add it back at the end of reclaim to its correct
	 * position in the tree.
	 */
747
	__mem_cgroup_remove_exceeded(mz, mctz);
748
	if (!soft_limit_excess(mz->memcg) ||
S
Shakeel Butt 已提交
749
	    !css_tryget(&mz->memcg->css))
750 751 752 753 754
		goto retry;
done:
	return mz;
}

755 756
static struct mem_cgroup_per_node *
mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
757
{
758
	struct mem_cgroup_per_node *mz;
759

760
	spin_lock_irq(&mctz->lock);
761
	mz = __mem_cgroup_largest_soft_limit_node(mctz);
762
	spin_unlock_irq(&mctz->lock);
763 764 765
	return mz;
}

766 767 768 769 770 771 772 773
/**
 * __mod_memcg_state - update cgroup memory statistics
 * @memcg: the memory cgroup
 * @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item
 * @val: delta to add to the counter, can be negative
 */
void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val)
{
774
	long x, threshold = MEMCG_CHARGE_BATCH;
775 776 777 778

	if (mem_cgroup_disabled())
		return;

779
	if (memcg_stat_item_in_bytes(idx))
780 781
		threshold <<= PAGE_SHIFT;

782
	x = val + __this_cpu_read(memcg->vmstats_percpu->stat[idx]);
783
	if (unlikely(abs(x) > threshold)) {
784 785
		struct mem_cgroup *mi;

786 787 788 789 790
		/*
		 * Batch local counters to keep them in sync with
		 * the hierarchical ones.
		 */
		__this_cpu_add(memcg->vmstats_local->stat[idx], x);
791 792
		for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
			atomic_long_add(x, &mi->vmstats[idx]);
793 794 795 796 797
		x = 0;
	}
	__this_cpu_write(memcg->vmstats_percpu->stat[idx], x);
}

798 799 800 801 802 803 804 805 806 807 808
static struct mem_cgroup_per_node *
parent_nodeinfo(struct mem_cgroup_per_node *pn, int nid)
{
	struct mem_cgroup *parent;

	parent = parent_mem_cgroup(pn->memcg);
	if (!parent)
		return NULL;
	return mem_cgroup_nodeinfo(parent, nid);
}

809 810
void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
			      int val)
811 812
{
	struct mem_cgroup_per_node *pn;
813
	struct mem_cgroup *memcg;
814
	long x, threshold = MEMCG_CHARGE_BATCH;
815 816

	pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
817
	memcg = pn->memcg;
818 819

	/* Update memcg */
820
	__mod_memcg_state(memcg, idx, val);
821

822 823 824
	/* Update lruvec */
	__this_cpu_add(pn->lruvec_stat_local->count[idx], val);

825 826 827
	if (vmstat_item_in_bytes(idx))
		threshold <<= PAGE_SHIFT;

828
	x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]);
829
	if (unlikely(abs(x) > threshold)) {
830
		pg_data_t *pgdat = lruvec_pgdat(lruvec);
831 832 833 834
		struct mem_cgroup_per_node *pi;

		for (pi = pn; pi; pi = parent_nodeinfo(pi, pgdat->node_id))
			atomic_long_add(x, &pi->lruvec_stat[idx]);
835 836 837 838 839
		x = 0;
	}
	__this_cpu_write(pn->lruvec_stat_cpu->count[idx], x);
}

840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860
/**
 * __mod_lruvec_state - update lruvec memory statistics
 * @lruvec: the lruvec
 * @idx: the stat item
 * @val: delta to add to the counter, can be negative
 *
 * The lruvec is the intersection of the NUMA node and a cgroup. This
 * function updates the all three counters that are affected by a
 * change of state at this level: per-node, per-cgroup, per-lruvec.
 */
void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
			int val)
{
	/* Update node */
	__mod_node_page_state(lruvec_pgdat(lruvec), idx, val);

	/* Update memcg and lruvec */
	if (!mem_cgroup_disabled())
		__mod_memcg_lruvec_state(lruvec, idx, val);
}

861 862
void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val)
{
863
	pg_data_t *pgdat = page_pgdat(virt_to_page(p));
864 865 866 867
	struct mem_cgroup *memcg;
	struct lruvec *lruvec;

	rcu_read_lock();
868
	memcg = mem_cgroup_from_obj(p);
869

870 871 872 873 874 875 876
	/*
	 * Untracked pages have no memcg, no lruvec. Update only the
	 * node. If we reparent the slab objects to the root memcg,
	 * when we free the slab object, we need to update the per-memcg
	 * vmstats to keep it correct for the root memcg.
	 */
	if (!memcg) {
877 878
		__mod_node_page_state(pgdat, idx, val);
	} else {
879
		lruvec = mem_cgroup_lruvec(memcg, pgdat);
880 881 882 883 884
		__mod_lruvec_state(lruvec, idx, val);
	}
	rcu_read_unlock();
}

885 886 887 888 889 890 891 892 893 894 895
void mod_memcg_obj_state(void *p, int idx, int val)
{
	struct mem_cgroup *memcg;

	rcu_read_lock();
	memcg = mem_cgroup_from_obj(p);
	if (memcg)
		mod_memcg_state(memcg, idx, val);
	rcu_read_unlock();
}

896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911
/**
 * __count_memcg_events - account VM events in a cgroup
 * @memcg: the memory cgroup
 * @idx: the event item
 * @count: the number of events that occured
 */
void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
			  unsigned long count)
{
	unsigned long x;

	if (mem_cgroup_disabled())
		return;

	x = count + __this_cpu_read(memcg->vmstats_percpu->events[idx]);
	if (unlikely(x > MEMCG_CHARGE_BATCH)) {
912 913
		struct mem_cgroup *mi;

914 915 916 917 918
		/*
		 * Batch local counters to keep them in sync with
		 * the hierarchical ones.
		 */
		__this_cpu_add(memcg->vmstats_local->events[idx], x);
919 920
		for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
			atomic_long_add(x, &mi->vmevents[idx]);
921 922 923 924 925
		x = 0;
	}
	__this_cpu_write(memcg->vmstats_percpu->events[idx], x);
}

926
static unsigned long memcg_events(struct mem_cgroup *memcg, int event)
927
{
928
	return atomic_long_read(&memcg->vmevents[event]);
929 930
}

931 932
static unsigned long memcg_events_local(struct mem_cgroup *memcg, int event)
{
933 934 935 936 937 938
	long x = 0;
	int cpu;

	for_each_possible_cpu(cpu)
		x += per_cpu(memcg->vmstats_local->events[event], cpu);
	return x;
939 940
}

941
static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
942
					 struct page *page,
943
					 int nr_pages)
944
{
945 946
	/* pagein of a big page is an event. So, ignore page size */
	if (nr_pages > 0)
947
		__count_memcg_events(memcg, PGPGIN, 1);
948
	else {
949
		__count_memcg_events(memcg, PGPGOUT, 1);
950 951
		nr_pages = -nr_pages; /* for event */
	}
952

953
	__this_cpu_add(memcg->vmstats_percpu->nr_page_events, nr_pages);
954 955
}

956 957
static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
				       enum mem_cgroup_events_target target)
958 959 960
{
	unsigned long val, next;

961 962
	val = __this_cpu_read(memcg->vmstats_percpu->nr_page_events);
	next = __this_cpu_read(memcg->vmstats_percpu->targets[target]);
963
	/* from time_after() in jiffies.h */
964
	if ((long)(next - val) < 0) {
965 966 967 968
		switch (target) {
		case MEM_CGROUP_TARGET_THRESH:
			next = val + THRESHOLDS_EVENTS_TARGET;
			break;
969 970 971
		case MEM_CGROUP_TARGET_SOFTLIMIT:
			next = val + SOFTLIMIT_EVENTS_TARGET;
			break;
972 973 974
		default:
			break;
		}
975
		__this_cpu_write(memcg->vmstats_percpu->targets[target], next);
976
		return true;
977
	}
978
	return false;
979 980 981 982 983 984
}

/*
 * Check events in order.
 *
 */
985
static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
986 987
{
	/* threshold event is triggered in finer grain than soft limit */
988 989
	if (unlikely(mem_cgroup_event_ratelimit(memcg,
						MEM_CGROUP_TARGET_THRESH))) {
990
		bool do_softlimit;
991

992 993
		do_softlimit = mem_cgroup_event_ratelimit(memcg,
						MEM_CGROUP_TARGET_SOFTLIMIT);
994
		mem_cgroup_threshold(memcg);
995 996
		if (unlikely(do_softlimit))
			mem_cgroup_update_tree(memcg, page);
997
	}
998 999
}

1000
struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
1001
{
1002 1003 1004 1005 1006 1007 1008 1009
	/*
	 * mm_update_next_owner() may clear mm->owner to NULL
	 * if it races with swapoff, page migration, etc.
	 * So this can be called with p == NULL.
	 */
	if (unlikely(!p))
		return NULL;

1010
	return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
1011
}
M
Michal Hocko 已提交
1012
EXPORT_SYMBOL(mem_cgroup_from_task);
1013

1014 1015 1016 1017 1018 1019 1020 1021 1022
/**
 * get_mem_cgroup_from_mm: Obtain a reference on given mm_struct's memcg.
 * @mm: mm from which memcg should be extracted. It can be NULL.
 *
 * Obtain a reference on mm->memcg and returns it if successful. Otherwise
 * root_mem_cgroup is returned. However if mem_cgroup is disabled, NULL is
 * returned.
 */
struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
1023
{
1024 1025 1026 1027
	struct mem_cgroup *memcg;

	if (mem_cgroup_disabled())
		return NULL;
1028

1029 1030
	rcu_read_lock();
	do {
1031 1032 1033 1034 1035 1036
		/*
		 * Page cache insertions can happen withou an
		 * actual mm context, e.g. during disk probing
		 * on boot, loopback IO, acct() writes etc.
		 */
		if (unlikely(!mm))
1037
			memcg = root_mem_cgroup;
1038 1039 1040 1041 1042
		else {
			memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
			if (unlikely(!memcg))
				memcg = root_mem_cgroup;
		}
1043
	} while (!css_tryget(&memcg->css));
1044
	rcu_read_unlock();
1045
	return memcg;
1046
}
1047 1048
EXPORT_SYMBOL(get_mem_cgroup_from_mm);

1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063
/**
 * get_mem_cgroup_from_page: Obtain a reference on given page's memcg.
 * @page: page from which memcg should be extracted.
 *
 * Obtain a reference on page->memcg and returns it if successful. Otherwise
 * root_mem_cgroup is returned.
 */
struct mem_cgroup *get_mem_cgroup_from_page(struct page *page)
{
	struct mem_cgroup *memcg = page->mem_cgroup;

	if (mem_cgroup_disabled())
		return NULL;

	rcu_read_lock();
S
Shakeel Butt 已提交
1064 1065
	/* Page should not get uncharged and freed memcg under us. */
	if (!memcg || WARN_ON_ONCE(!css_tryget(&memcg->css)))
1066 1067 1068 1069 1070 1071
		memcg = root_mem_cgroup;
	rcu_read_unlock();
	return memcg;
}
EXPORT_SYMBOL(get_mem_cgroup_from_page);

1072
static __always_inline struct mem_cgroup *active_memcg(void)
1073
{
1074 1075 1076 1077 1078
	if (in_interrupt())
		return this_cpu_read(int_active_memcg);
	else
		return current->active_memcg;
}
1079

1080 1081 1082
static __always_inline struct mem_cgroup *get_active_memcg(void)
{
	struct mem_cgroup *memcg;
1083

1084 1085 1086
	rcu_read_lock();
	memcg = active_memcg();
	if (memcg) {
S
Shakeel Butt 已提交
1087
		/* current->active_memcg must hold a ref. */
1088
		if (WARN_ON_ONCE(!css_tryget(&memcg->css)))
S
Shakeel Butt 已提交
1089 1090
			memcg = root_mem_cgroup;
		else
1091 1092
			memcg = current->active_memcg;
	}
1093 1094 1095 1096 1097
	rcu_read_unlock();

	return memcg;
}

1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110
static __always_inline bool memcg_kmem_bypass(void)
{
	/* Allow remote memcg charging from any context. */
	if (unlikely(active_memcg()))
		return false;

	/* Memcg to charge can't be determined. */
	if (in_interrupt() || !current->mm || (current->flags & PF_KTHREAD))
		return true;

	return false;
}

1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121
/**
 * If active memcg is set, do not fallback to current->mm->memcg.
 */
static __always_inline struct mem_cgroup *get_mem_cgroup_from_current(void)
{
	if (memcg_kmem_bypass())
		return NULL;

	if (unlikely(active_memcg()))
		return get_active_memcg();

1122 1123
	return get_mem_cgroup_from_mm(current->mm);
}
1124

1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137
/**
 * mem_cgroup_iter - iterate over memory cgroup hierarchy
 * @root: hierarchy root
 * @prev: previously returned memcg, NULL on first invocation
 * @reclaim: cookie for shared reclaim walks, NULL for full walks
 *
 * Returns references to children of the hierarchy below @root, or
 * @root itself, or %NULL after a full round-trip.
 *
 * Caller must pass the return value in @prev on subsequent
 * invocations for reference counting, or use mem_cgroup_iter_break()
 * to cancel a hierarchy walk before the round-trip is complete.
 *
1138 1139 1140
 * Reclaimers can specify a node in @reclaim to divide up the memcgs
 * in the hierarchy among all concurrent reclaimers operating on the
 * same node.
1141
 */
1142
struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
1143
				   struct mem_cgroup *prev,
1144
				   struct mem_cgroup_reclaim_cookie *reclaim)
K
KAMEZAWA Hiroyuki 已提交
1145
{
1146
	struct mem_cgroup_reclaim_iter *iter;
1147
	struct cgroup_subsys_state *css = NULL;
1148
	struct mem_cgroup *memcg = NULL;
1149
	struct mem_cgroup *pos = NULL;
1150

1151 1152
	if (mem_cgroup_disabled())
		return NULL;
1153

1154 1155
	if (!root)
		root = root_mem_cgroup;
K
KAMEZAWA Hiroyuki 已提交
1156

1157
	if (prev && !reclaim)
1158
		pos = prev;
K
KAMEZAWA Hiroyuki 已提交
1159

1160 1161
	if (!root->use_hierarchy && root != root_mem_cgroup) {
		if (prev)
1162
			goto out;
1163
		return root;
1164
	}
K
KAMEZAWA Hiroyuki 已提交
1165

1166
	rcu_read_lock();
M
Michal Hocko 已提交
1167

1168
	if (reclaim) {
1169
		struct mem_cgroup_per_node *mz;
1170

1171
		mz = mem_cgroup_nodeinfo(root, reclaim->pgdat->node_id);
1172
		iter = &mz->iter;
1173 1174 1175 1176

		if (prev && reclaim->generation != iter->generation)
			goto out_unlock;

1177
		while (1) {
1178
			pos = READ_ONCE(iter->position);
1179 1180
			if (!pos || css_tryget(&pos->css))
				break;
1181
			/*
1182 1183 1184 1185 1186 1187
			 * css reference reached zero, so iter->position will
			 * be cleared by ->css_released. However, we should not
			 * rely on this happening soon, because ->css_released
			 * is called from a work queue, and by busy-waiting we
			 * might block it. So we clear iter->position right
			 * away.
1188
			 */
1189 1190
			(void)cmpxchg(&iter->position, pos, NULL);
		}
1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207
	}

	if (pos)
		css = &pos->css;

	for (;;) {
		css = css_next_descendant_pre(css, &root->css);
		if (!css) {
			/*
			 * Reclaimers share the hierarchy walk, and a
			 * new one might jump in right at the end of
			 * the hierarchy - make sure they see at least
			 * one group and restart from the beginning.
			 */
			if (!prev)
				continue;
			break;
1208
		}
K
KAMEZAWA Hiroyuki 已提交
1209

1210 1211 1212 1213 1214 1215
		/*
		 * Verify the css and acquire a reference.  The root
		 * is provided by the caller, so we know it's alive
		 * and kicking, and don't take an extra reference.
		 */
		memcg = mem_cgroup_from_css(css);
K
KAMEZAWA Hiroyuki 已提交
1216

1217 1218
		if (css == &root->css)
			break;
K
KAMEZAWA Hiroyuki 已提交
1219

1220 1221
		if (css_tryget(css))
			break;
1222

1223
		memcg = NULL;
1224
	}
1225 1226 1227

	if (reclaim) {
		/*
1228 1229 1230
		 * The position could have already been updated by a competing
		 * thread, so check that the value hasn't changed since we read
		 * it to avoid reclaiming from the same cgroup twice.
1231
		 */
1232 1233
		(void)cmpxchg(&iter->position, pos, memcg);

1234 1235 1236 1237 1238 1239 1240
		if (pos)
			css_put(&pos->css);

		if (!memcg)
			iter->generation++;
		else if (!prev)
			reclaim->generation = iter->generation;
1241
	}
1242

1243 1244
out_unlock:
	rcu_read_unlock();
1245
out:
1246 1247 1248
	if (prev && prev != root)
		css_put(&prev->css);

1249
	return memcg;
K
KAMEZAWA Hiroyuki 已提交
1250
}
K
KAMEZAWA Hiroyuki 已提交
1251

1252 1253 1254 1255 1256 1257 1258
/**
 * mem_cgroup_iter_break - abort a hierarchy walk prematurely
 * @root: hierarchy root
 * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
 */
void mem_cgroup_iter_break(struct mem_cgroup *root,
			   struct mem_cgroup *prev)
1259 1260 1261 1262 1263 1264
{
	if (!root)
		root = root_mem_cgroup;
	if (prev && prev != root)
		css_put(&prev->css);
}
K
KAMEZAWA Hiroyuki 已提交
1265

1266 1267
static void __invalidate_reclaim_iterators(struct mem_cgroup *from,
					struct mem_cgroup *dead_memcg)
1268 1269
{
	struct mem_cgroup_reclaim_iter *iter;
1270 1271
	struct mem_cgroup_per_node *mz;
	int nid;
1272

1273 1274
	for_each_node(nid) {
		mz = mem_cgroup_nodeinfo(from, nid);
1275 1276
		iter = &mz->iter;
		cmpxchg(&iter->position, dead_memcg, NULL);
1277 1278 1279
	}
}

1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300
static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
{
	struct mem_cgroup *memcg = dead_memcg;
	struct mem_cgroup *last;

	do {
		__invalidate_reclaim_iterators(memcg, dead_memcg);
		last = memcg;
	} while ((memcg = parent_mem_cgroup(memcg)));

	/*
	 * When cgruop1 non-hierarchy mode is used,
	 * parent_mem_cgroup() does not walk all the way up to the
	 * cgroup root (root_mem_cgroup). So we have to handle
	 * dead_memcg from cgroup root separately.
	 */
	if (last != root_mem_cgroup)
		__invalidate_reclaim_iterators(root_mem_cgroup,
						dead_memcg);
}

1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325
/**
 * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
 * @memcg: hierarchy root
 * @fn: function to call for each task
 * @arg: argument passed to @fn
 *
 * This function iterates over tasks attached to @memcg or to any of its
 * descendants and calls @fn for each task. If @fn returns a non-zero
 * value, the function breaks the iteration loop and returns the value.
 * Otherwise, it will iterate over all tasks and return 0.
 *
 * This function must not be called for the root memory cgroup.
 */
int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
			  int (*fn)(struct task_struct *, void *), void *arg)
{
	struct mem_cgroup *iter;
	int ret = 0;

	BUG_ON(memcg == root_mem_cgroup);

	for_each_mem_cgroup_tree(iter, memcg) {
		struct css_task_iter it;
		struct task_struct *task;

1326
		css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it);
1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337
		while (!ret && (task = css_task_iter_next(&it)))
			ret = fn(task, arg);
		css_task_iter_end(&it);
		if (ret) {
			mem_cgroup_iter_break(memcg, iter);
			break;
		}
	}
	return ret;
}

1338
/**
1339
 * mem_cgroup_page_lruvec - return lruvec for isolating/putting an LRU page
1340
 * @page: the page
1341
 * @pgdat: pgdat of the page
1342
 *
1343 1344
 * This function relies on page->mem_cgroup being stable - see the
 * access rules in commit_charge().
1345
 */
M
Mel Gorman 已提交
1346
struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct pglist_data *pgdat)
K
KAMEZAWA Hiroyuki 已提交
1347
{
1348
	struct mem_cgroup_per_node *mz;
1349
	struct mem_cgroup *memcg;
1350
	struct lruvec *lruvec;
1351

1352
	if (mem_cgroup_disabled()) {
1353
		lruvec = &pgdat->__lruvec;
1354 1355
		goto out;
	}
1356

1357
	memcg = page->mem_cgroup;
1358
	/*
1359
	 * Swapcache readahead pages are added to the LRU - and
1360
	 * possibly migrated - before they are charged.
1361
	 */
1362 1363
	if (!memcg)
		memcg = root_mem_cgroup;
1364

1365
	mz = mem_cgroup_page_nodeinfo(memcg, page);
1366 1367 1368 1369 1370 1371 1372
	lruvec = &mz->lruvec;
out:
	/*
	 * Since a node can be onlined after the mem_cgroup was created,
	 * we have to be prepared to initialize lruvec->zone here;
	 * and if offlined then reonlined, we need to reinitialize it.
	 */
M
Mel Gorman 已提交
1373 1374
	if (unlikely(lruvec->pgdat != pgdat))
		lruvec->pgdat = pgdat;
1375
	return lruvec;
K
KAMEZAWA Hiroyuki 已提交
1376
}
1377

1378
/**
1379 1380 1381
 * mem_cgroup_update_lru_size - account for adding or removing an lru page
 * @lruvec: mem_cgroup per zone lru vector
 * @lru: index of lru list the page is sitting on
1382
 * @zid: zone id of the accounted pages
1383
 * @nr_pages: positive when adding or negative when removing
1384
 *
1385 1386 1387
 * This function must be called under lru_lock, just before a page is added
 * to or just after a page is removed from an lru list (that ordering being
 * so as to allow it to check that lru_size 0 is consistent with list_empty).
1388
 */
1389
void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
1390
				int zid, int nr_pages)
1391
{
1392
	struct mem_cgroup_per_node *mz;
1393
	unsigned long *lru_size;
1394
	long size;
1395 1396 1397 1398

	if (mem_cgroup_disabled())
		return;

1399
	mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
1400
	lru_size = &mz->lru_zone_size[zid][lru];
1401 1402 1403 1404 1405

	if (nr_pages < 0)
		*lru_size += nr_pages;

	size = *lru_size;
1406 1407 1408
	if (WARN_ONCE(size < 0,
		"%s(%p, %d, %d): lru_size %ld\n",
		__func__, lruvec, lru, nr_pages, size)) {
1409 1410 1411 1412 1413 1414
		VM_BUG_ON(1);
		*lru_size = 0;
	}

	if (nr_pages > 0)
		*lru_size += nr_pages;
K
KAMEZAWA Hiroyuki 已提交
1415
}
1416

1417
/**
1418
 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
W
Wanpeng Li 已提交
1419
 * @memcg: the memory cgroup
1420
 *
1421
 * Returns the maximum amount of memory @mem can be charged with, in
1422
 * pages.
1423
 */
1424
static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1425
{
1426 1427 1428
	unsigned long margin = 0;
	unsigned long count;
	unsigned long limit;
1429

1430
	count = page_counter_read(&memcg->memory);
1431
	limit = READ_ONCE(memcg->memory.max);
1432 1433 1434
	if (count < limit)
		margin = limit - count;

1435
	if (do_memsw_account()) {
1436
		count = page_counter_read(&memcg->memsw);
1437
		limit = READ_ONCE(memcg->memsw.max);
1438
		if (count < limit)
1439
			margin = min(margin, limit - count);
1440 1441
		else
			margin = 0;
1442 1443 1444
	}

	return margin;
1445 1446
}

1447
/*
Q
Qiang Huang 已提交
1448
 * A routine for checking "mem" is under move_account() or not.
1449
 *
Q
Qiang Huang 已提交
1450 1451 1452
 * Checking a cgroup is mc.from or mc.to or under hierarchy of
 * moving cgroups. This is for waiting at high-memory pressure
 * caused by "move".
1453
 */
1454
static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
1455
{
1456 1457
	struct mem_cgroup *from;
	struct mem_cgroup *to;
1458
	bool ret = false;
1459 1460 1461 1462 1463 1464 1465 1466 1467
	/*
	 * Unlike task_move routines, we access mc.to, mc.from not under
	 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
	 */
	spin_lock(&mc.lock);
	from = mc.from;
	to = mc.to;
	if (!from)
		goto unlock;
1468

1469 1470
	ret = mem_cgroup_is_descendant(from, memcg) ||
		mem_cgroup_is_descendant(to, memcg);
1471 1472
unlock:
	spin_unlock(&mc.lock);
1473 1474 1475
	return ret;
}

1476
static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
1477 1478
{
	if (mc.moving_task && current != mc.moving_task) {
1479
		if (mem_cgroup_under_move(memcg)) {
1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491
			DEFINE_WAIT(wait);
			prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
			/* moving charge context might have finished. */
			if (mc.moving_task)
				schedule();
			finish_wait(&mc.waitq, &wait);
			return true;
		}
	}
	return false;
}

1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514
struct memory_stat {
	const char *name;
	unsigned int ratio;
	unsigned int idx;
};

static struct memory_stat memory_stats[] = {
	{ "anon", PAGE_SIZE, NR_ANON_MAPPED },
	{ "file", PAGE_SIZE, NR_FILE_PAGES },
	{ "kernel_stack", 1024, NR_KERNEL_STACK_KB },
	{ "percpu", 1, MEMCG_PERCPU_B },
	{ "sock", PAGE_SIZE, MEMCG_SOCK },
	{ "shmem", PAGE_SIZE, NR_SHMEM },
	{ "file_mapped", PAGE_SIZE, NR_FILE_MAPPED },
	{ "file_dirty", PAGE_SIZE, NR_FILE_DIRTY },
	{ "file_writeback", PAGE_SIZE, NR_WRITEBACK },
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
	/*
	 * The ratio will be initialized in memory_stats_init(). Because
	 * on some architectures, the macro of HPAGE_PMD_SIZE is not
	 * constant(e.g. powerpc).
	 */
	{ "anon_thp", 0, NR_ANON_THPS },
1515 1516
	{ "file_thp", 0, NR_FILE_THPS },
	{ "shmem_thp", 0, NR_SHMEM_THPS },
1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546
#endif
	{ "inactive_anon", PAGE_SIZE, NR_INACTIVE_ANON },
	{ "active_anon", PAGE_SIZE, NR_ACTIVE_ANON },
	{ "inactive_file", PAGE_SIZE, NR_INACTIVE_FILE },
	{ "active_file", PAGE_SIZE, NR_ACTIVE_FILE },
	{ "unevictable", PAGE_SIZE, NR_UNEVICTABLE },

	/*
	 * Note: The slab_reclaimable and slab_unreclaimable must be
	 * together and slab_reclaimable must be in front.
	 */
	{ "slab_reclaimable", 1, NR_SLAB_RECLAIMABLE_B },
	{ "slab_unreclaimable", 1, NR_SLAB_UNRECLAIMABLE_B },

	/* The memory events */
	{ "workingset_refault_anon", 1, WORKINGSET_REFAULT_ANON },
	{ "workingset_refault_file", 1, WORKINGSET_REFAULT_FILE },
	{ "workingset_activate_anon", 1, WORKINGSET_ACTIVATE_ANON },
	{ "workingset_activate_file", 1, WORKINGSET_ACTIVATE_FILE },
	{ "workingset_restore_anon", 1, WORKINGSET_RESTORE_ANON },
	{ "workingset_restore_file", 1, WORKINGSET_RESTORE_FILE },
	{ "workingset_nodereclaim", 1, WORKINGSET_NODERECLAIM },
};

static int __init memory_stats_init(void)
{
	int i;

	for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1547 1548 1549
		if (memory_stats[i].idx == NR_ANON_THPS ||
		    memory_stats[i].idx == NR_FILE_THPS ||
		    memory_stats[i].idx == NR_SHMEM_THPS)
1550 1551 1552 1553 1554 1555 1556 1557 1558 1559
			memory_stats[i].ratio = HPAGE_PMD_SIZE;
#endif
		VM_BUG_ON(!memory_stats[i].ratio);
		VM_BUG_ON(memory_stats[i].idx >= MEMCG_NR_STAT);
	}

	return 0;
}
pure_initcall(memory_stats_init);

1560 1561 1562 1563
static char *memory_stat_format(struct mem_cgroup *memcg)
{
	struct seq_buf s;
	int i;
1564

1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579
	seq_buf_init(&s, kmalloc(PAGE_SIZE, GFP_KERNEL), PAGE_SIZE);
	if (!s.buffer)
		return NULL;

	/*
	 * Provide statistics on the state of the memory subsystem as
	 * well as cumulative event counters that show past behavior.
	 *
	 * This list is ordered following a combination of these gradients:
	 * 1) generic big picture -> specifics and details
	 * 2) reflecting userspace activity -> reflecting kernel heuristics
	 *
	 * Current memory state:
	 */

1580 1581
	for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
		u64 size;
1582

1583 1584 1585
		size = memcg_page_state(memcg, memory_stats[i].idx);
		size *= memory_stats[i].ratio;
		seq_buf_printf(&s, "%s %llu\n", memory_stats[i].name, size);
1586

1587 1588 1589 1590 1591 1592
		if (unlikely(memory_stats[i].idx == NR_SLAB_UNRECLAIMABLE_B)) {
			size = memcg_page_state(memcg, NR_SLAB_RECLAIMABLE_B) +
			       memcg_page_state(memcg, NR_SLAB_UNRECLAIMABLE_B);
			seq_buf_printf(&s, "slab %llu\n", size);
		}
	}
1593 1594 1595

	/* Accumulated memory events */

1596 1597 1598 1599 1600 1601
	seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGFAULT),
		       memcg_events(memcg, PGFAULT));
	seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGMAJFAULT),
		       memcg_events(memcg, PGMAJFAULT));
	seq_buf_printf(&s, "%s %lu\n",  vm_event_name(PGREFILL),
		       memcg_events(memcg, PGREFILL));
1602 1603 1604 1605 1606 1607
	seq_buf_printf(&s, "pgscan %lu\n",
		       memcg_events(memcg, PGSCAN_KSWAPD) +
		       memcg_events(memcg, PGSCAN_DIRECT));
	seq_buf_printf(&s, "pgsteal %lu\n",
		       memcg_events(memcg, PGSTEAL_KSWAPD) +
		       memcg_events(memcg, PGSTEAL_DIRECT));
1608 1609 1610 1611 1612 1613 1614 1615
	seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGACTIVATE),
		       memcg_events(memcg, PGACTIVATE));
	seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGDEACTIVATE),
		       memcg_events(memcg, PGDEACTIVATE));
	seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGLAZYFREE),
		       memcg_events(memcg, PGLAZYFREE));
	seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGLAZYFREED),
		       memcg_events(memcg, PGLAZYFREED));
1616 1617

#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1618
	seq_buf_printf(&s, "%s %lu\n", vm_event_name(THP_FAULT_ALLOC),
1619
		       memcg_events(memcg, THP_FAULT_ALLOC));
1620
	seq_buf_printf(&s, "%s %lu\n", vm_event_name(THP_COLLAPSE_ALLOC),
1621 1622 1623 1624 1625 1626 1627 1628
		       memcg_events(memcg, THP_COLLAPSE_ALLOC));
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */

	/* The above should easily fit into one page */
	WARN_ON_ONCE(seq_buf_has_overflowed(&s));

	return s.buffer;
}
1629

1630
#define K(x) ((x) << (PAGE_SHIFT-10))
1631
/**
1632 1633
 * mem_cgroup_print_oom_context: Print OOM information relevant to
 * memory controller.
1634 1635 1636 1637 1638 1639
 * @memcg: The memory cgroup that went over limit
 * @p: Task that is going to be killed
 *
 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
 * enabled
 */
1640
void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
1641 1642 1643
{
	rcu_read_lock();

1644 1645 1646 1647 1648
	if (memcg) {
		pr_cont(",oom_memcg=");
		pr_cont_cgroup_path(memcg->css.cgroup);
	} else
		pr_cont(",global_oom");
1649
	if (p) {
1650
		pr_cont(",task_memcg=");
1651 1652
		pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
	}
1653
	rcu_read_unlock();
1654 1655 1656 1657 1658 1659 1660 1661 1662
}

/**
 * mem_cgroup_print_oom_meminfo: Print OOM memory information relevant to
 * memory controller.
 * @memcg: The memory cgroup that went over limit
 */
void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
{
1663
	char *buf;
1664

1665 1666
	pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
		K((u64)page_counter_read(&memcg->memory)),
1667
		K((u64)READ_ONCE(memcg->memory.max)), memcg->memory.failcnt);
1668 1669 1670
	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
		pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n",
			K((u64)page_counter_read(&memcg->swap)),
1671
			K((u64)READ_ONCE(memcg->swap.max)), memcg->swap.failcnt);
1672 1673 1674 1675 1676 1677 1678
	else {
		pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
			K((u64)page_counter_read(&memcg->memsw)),
			K((u64)memcg->memsw.max), memcg->memsw.failcnt);
		pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
			K((u64)page_counter_read(&memcg->kmem)),
			K((u64)memcg->kmem.max), memcg->kmem.failcnt);
1679
	}
1680 1681 1682 1683 1684 1685 1686 1687 1688

	pr_info("Memory cgroup stats for ");
	pr_cont_cgroup_path(memcg->css.cgroup);
	pr_cont(":");
	buf = memory_stat_format(memcg);
	if (!buf)
		return;
	pr_info("%s", buf);
	kfree(buf);
1689 1690
}

D
David Rientjes 已提交
1691 1692 1693
/*
 * Return the memory (and swap, if configured) limit for a memcg.
 */
1694
unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
D
David Rientjes 已提交
1695
{
1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708
	unsigned long max = READ_ONCE(memcg->memory.max);

	if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
		if (mem_cgroup_swappiness(memcg))
			max += min(READ_ONCE(memcg->swap.max),
				   (unsigned long)total_swap_pages);
	} else { /* v1 */
		if (mem_cgroup_swappiness(memcg)) {
			/* Calculate swap excess capacity from memsw limit */
			unsigned long swap = READ_ONCE(memcg->memsw.max) - max;

			max += min(swap, (unsigned long)total_swap_pages);
		}
1709
	}
1710
	return max;
D
David Rientjes 已提交
1711 1712
}

1713 1714 1715 1716 1717
unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
{
	return page_counter_read(&memcg->memory);
}

1718
static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1719
				     int order)
1720
{
1721 1722 1723
	struct oom_control oc = {
		.zonelist = NULL,
		.nodemask = NULL,
1724
		.memcg = memcg,
1725 1726 1727
		.gfp_mask = gfp_mask,
		.order = order,
	};
1728
	bool ret = true;
1729

1730 1731
	if (mutex_lock_killable(&oom_lock))
		return true;
1732 1733 1734 1735

	if (mem_cgroup_margin(memcg) >= (1 << order))
		goto unlock;

1736 1737 1738 1739 1740
	/*
	 * A few threads which were not waiting at mutex_lock_killable() can
	 * fail to bail out. Therefore, check again after holding oom_lock.
	 */
	ret = should_force_charge() || out_of_memory(&oc);
1741 1742

unlock:
1743
	mutex_unlock(&oom_lock);
1744
	return ret;
1745 1746
}

1747
static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
1748
				   pg_data_t *pgdat,
1749 1750 1751 1752 1753 1754 1755 1756 1757
				   gfp_t gfp_mask,
				   unsigned long *total_scanned)
{
	struct mem_cgroup *victim = NULL;
	int total = 0;
	int loop = 0;
	unsigned long excess;
	unsigned long nr_scanned;
	struct mem_cgroup_reclaim_cookie reclaim = {
1758
		.pgdat = pgdat,
1759 1760
	};

1761
	excess = soft_limit_excess(root_memcg);
1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786

	while (1) {
		victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
		if (!victim) {
			loop++;
			if (loop >= 2) {
				/*
				 * If we have not been able to reclaim
				 * anything, it might because there are
				 * no reclaimable pages under this hierarchy
				 */
				if (!total)
					break;
				/*
				 * We want to do more targeted reclaim.
				 * excess >> 2 is not to excessive so as to
				 * reclaim too much, nor too less that we keep
				 * coming back to reclaim from this cgroup
				 */
				if (total >= (excess >> 2) ||
					(loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
					break;
			}
			continue;
		}
1787
		total += mem_cgroup_shrink_node(victim, gfp_mask, false,
1788
					pgdat, &nr_scanned);
1789
		*total_scanned += nr_scanned;
1790
		if (!soft_limit_excess(root_memcg))
1791
			break;
1792
	}
1793 1794
	mem_cgroup_iter_break(root_memcg, victim);
	return total;
1795 1796
}

1797 1798 1799 1800 1801 1802
#ifdef CONFIG_LOCKDEP
static struct lockdep_map memcg_oom_lock_dep_map = {
	.name = "memcg_oom_lock",
};
#endif

1803 1804
static DEFINE_SPINLOCK(memcg_oom_lock);

K
KAMEZAWA Hiroyuki 已提交
1805 1806 1807 1808
/*
 * Check OOM-Killer is already running under our hierarchy.
 * If someone is running, return false.
 */
1809
static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
K
KAMEZAWA Hiroyuki 已提交
1810
{
1811
	struct mem_cgroup *iter, *failed = NULL;
1812

1813 1814
	spin_lock(&memcg_oom_lock);

1815
	for_each_mem_cgroup_tree(iter, memcg) {
1816
		if (iter->oom_lock) {
1817 1818 1819 1820 1821
			/*
			 * this subtree of our hierarchy is already locked
			 * so we cannot give a lock.
			 */
			failed = iter;
1822 1823
			mem_cgroup_iter_break(memcg, iter);
			break;
1824 1825
		} else
			iter->oom_lock = true;
K
KAMEZAWA Hiroyuki 已提交
1826
	}
K
KAMEZAWA Hiroyuki 已提交
1827

1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838
	if (failed) {
		/*
		 * OK, we failed to lock the whole subtree so we have
		 * to clean up what we set up to the failing subtree
		 */
		for_each_mem_cgroup_tree(iter, memcg) {
			if (iter == failed) {
				mem_cgroup_iter_break(memcg, iter);
				break;
			}
			iter->oom_lock = false;
1839
		}
1840 1841
	} else
		mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
1842 1843 1844 1845

	spin_unlock(&memcg_oom_lock);

	return !failed;
1846
}
1847

1848
static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
1849
{
K
KAMEZAWA Hiroyuki 已提交
1850 1851
	struct mem_cgroup *iter;

1852
	spin_lock(&memcg_oom_lock);
1853
	mutex_release(&memcg_oom_lock_dep_map, _RET_IP_);
1854
	for_each_mem_cgroup_tree(iter, memcg)
1855
		iter->oom_lock = false;
1856
	spin_unlock(&memcg_oom_lock);
1857 1858
}

1859
static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
1860 1861 1862
{
	struct mem_cgroup *iter;

1863
	spin_lock(&memcg_oom_lock);
1864
	for_each_mem_cgroup_tree(iter, memcg)
1865 1866
		iter->under_oom++;
	spin_unlock(&memcg_oom_lock);
1867 1868
}

1869
static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
1870 1871 1872
{
	struct mem_cgroup *iter;

K
KAMEZAWA Hiroyuki 已提交
1873
	/*
1874 1875
	 * Be careful about under_oom underflows becase a child memcg
	 * could have been added after mem_cgroup_mark_under_oom.
K
KAMEZAWA Hiroyuki 已提交
1876
	 */
1877
	spin_lock(&memcg_oom_lock);
1878
	for_each_mem_cgroup_tree(iter, memcg)
1879 1880 1881
		if (iter->under_oom > 0)
			iter->under_oom--;
	spin_unlock(&memcg_oom_lock);
1882 1883
}

K
KAMEZAWA Hiroyuki 已提交
1884 1885
static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);

K
KAMEZAWA Hiroyuki 已提交
1886
struct oom_wait_info {
1887
	struct mem_cgroup *memcg;
1888
	wait_queue_entry_t	wait;
K
KAMEZAWA Hiroyuki 已提交
1889 1890
};

1891
static int memcg_oom_wake_function(wait_queue_entry_t *wait,
K
KAMEZAWA Hiroyuki 已提交
1892 1893
	unsigned mode, int sync, void *arg)
{
1894 1895
	struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
	struct mem_cgroup *oom_wait_memcg;
K
KAMEZAWA Hiroyuki 已提交
1896 1897 1898
	struct oom_wait_info *oom_wait_info;

	oom_wait_info = container_of(wait, struct oom_wait_info, wait);
1899
	oom_wait_memcg = oom_wait_info->memcg;
K
KAMEZAWA Hiroyuki 已提交
1900

1901 1902
	if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) &&
	    !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg))
K
KAMEZAWA Hiroyuki 已提交
1903 1904 1905 1906
		return 0;
	return autoremove_wake_function(wait, mode, sync, arg);
}

1907
static void memcg_oom_recover(struct mem_cgroup *memcg)
1908
{
1909 1910 1911 1912 1913 1914 1915 1916 1917
	/*
	 * For the following lockless ->under_oom test, the only required
	 * guarantee is that it must see the state asserted by an OOM when
	 * this function is called as a result of userland actions
	 * triggered by the notification of the OOM.  This is trivially
	 * achieved by invoking mem_cgroup_mark_under_oom() before
	 * triggering notification.
	 */
	if (memcg && memcg->under_oom)
1918
		__wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
1919 1920
}

1921 1922 1923 1924 1925 1926 1927 1928
enum oom_status {
	OOM_SUCCESS,
	OOM_FAILED,
	OOM_ASYNC,
	OOM_SKIPPED
};

static enum oom_status mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
1929
{
1930 1931 1932
	enum oom_status ret;
	bool locked;

1933 1934 1935
	if (order > PAGE_ALLOC_COSTLY_ORDER)
		return OOM_SKIPPED;

1936 1937
	memcg_memory_event(memcg, MEMCG_OOM);

K
KAMEZAWA Hiroyuki 已提交
1938
	/*
1939 1940 1941 1942
	 * We are in the middle of the charge context here, so we
	 * don't want to block when potentially sitting on a callstack
	 * that holds all kinds of filesystem and mm locks.
	 *
1943 1944 1945 1946
	 * cgroup1 allows disabling the OOM killer and waiting for outside
	 * handling until the charge can succeed; remember the context and put
	 * the task to sleep at the end of the page fault when all locks are
	 * released.
1947
	 *
1948 1949 1950 1951 1952 1953 1954
	 * On the other hand, in-kernel OOM killer allows for an async victim
	 * memory reclaim (oom_reaper) and that means that we are not solely
	 * relying on the oom victim to make a forward progress and we can
	 * invoke the oom killer here.
	 *
	 * Please note that mem_cgroup_out_of_memory might fail to find a
	 * victim and then we have to bail out from the charge path.
K
KAMEZAWA Hiroyuki 已提交
1955
	 */
1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966
	if (memcg->oom_kill_disable) {
		if (!current->in_user_fault)
			return OOM_SKIPPED;
		css_get(&memcg->css);
		current->memcg_in_oom = memcg;
		current->memcg_oom_gfp_mask = mask;
		current->memcg_oom_order = order;

		return OOM_ASYNC;
	}

1967 1968 1969 1970 1971 1972 1973 1974
	mem_cgroup_mark_under_oom(memcg);

	locked = mem_cgroup_oom_trylock(memcg);

	if (locked)
		mem_cgroup_oom_notify(memcg);

	mem_cgroup_unmark_under_oom(memcg);
1975
	if (mem_cgroup_out_of_memory(memcg, mask, order))
1976 1977 1978 1979 1980 1981
		ret = OOM_SUCCESS;
	else
		ret = OOM_FAILED;

	if (locked)
		mem_cgroup_oom_unlock(memcg);
1982

1983
	return ret;
1984 1985 1986 1987
}

/**
 * mem_cgroup_oom_synchronize - complete memcg OOM handling
1988
 * @handle: actually kill/wait or just clean up the OOM state
1989
 *
1990 1991
 * This has to be called at the end of a page fault if the memcg OOM
 * handler was enabled.
1992
 *
1993
 * Memcg supports userspace OOM handling where failed allocations must
1994 1995 1996 1997
 * sleep on a waitqueue until the userspace task resolves the
 * situation.  Sleeping directly in the charge context with all kinds
 * of locks held is not a good idea, instead we remember an OOM state
 * in the task and mem_cgroup_oom_synchronize() has to be called at
1998
 * the end of the page fault to complete the OOM handling.
1999 2000
 *
 * Returns %true if an ongoing memcg OOM situation was detected and
2001
 * completed, %false otherwise.
2002
 */
2003
bool mem_cgroup_oom_synchronize(bool handle)
2004
{
T
Tejun Heo 已提交
2005
	struct mem_cgroup *memcg = current->memcg_in_oom;
2006
	struct oom_wait_info owait;
2007
	bool locked;
2008 2009 2010

	/* OOM is global, do not handle */
	if (!memcg)
2011
		return false;
2012

2013
	if (!handle)
2014
		goto cleanup;
2015 2016 2017 2018 2019

	owait.memcg = memcg;
	owait.wait.flags = 0;
	owait.wait.func = memcg_oom_wake_function;
	owait.wait.private = current;
2020
	INIT_LIST_HEAD(&owait.wait.entry);
K
KAMEZAWA Hiroyuki 已提交
2021

2022
	prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
2023 2024 2025 2026 2027 2028 2029 2030 2031 2032
	mem_cgroup_mark_under_oom(memcg);

	locked = mem_cgroup_oom_trylock(memcg);

	if (locked)
		mem_cgroup_oom_notify(memcg);

	if (locked && !memcg->oom_kill_disable) {
		mem_cgroup_unmark_under_oom(memcg);
		finish_wait(&memcg_oom_waitq, &owait.wait);
T
Tejun Heo 已提交
2033 2034
		mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask,
					 current->memcg_oom_order);
2035
	} else {
2036
		schedule();
2037 2038 2039 2040 2041
		mem_cgroup_unmark_under_oom(memcg);
		finish_wait(&memcg_oom_waitq, &owait.wait);
	}

	if (locked) {
2042 2043 2044 2045 2046 2047 2048 2049
		mem_cgroup_oom_unlock(memcg);
		/*
		 * There is no guarantee that an OOM-lock contender
		 * sees the wakeups triggered by the OOM kill
		 * uncharges.  Wake any sleepers explicitely.
		 */
		memcg_oom_recover(memcg);
	}
2050
cleanup:
T
Tejun Heo 已提交
2051
	current->memcg_in_oom = NULL;
2052
	css_put(&memcg->css);
K
KAMEZAWA Hiroyuki 已提交
2053
	return true;
2054 2055
}

2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083
/**
 * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM
 * @victim: task to be killed by the OOM killer
 * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM
 *
 * Returns a pointer to a memory cgroup, which has to be cleaned up
 * by killing all belonging OOM-killable tasks.
 *
 * Caller has to call mem_cgroup_put() on the returned non-NULL memcg.
 */
struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
					    struct mem_cgroup *oom_domain)
{
	struct mem_cgroup *oom_group = NULL;
	struct mem_cgroup *memcg;

	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
		return NULL;

	if (!oom_domain)
		oom_domain = root_mem_cgroup;

	rcu_read_lock();

	memcg = mem_cgroup_from_task(victim);
	if (memcg == root_mem_cgroup)
		goto out;

2084 2085 2086 2087 2088 2089 2090 2091
	/*
	 * If the victim task has been asynchronously moved to a different
	 * memory cgroup, we might end up killing tasks outside oom_domain.
	 * In this case it's better to ignore memory.group.oom.
	 */
	if (unlikely(!mem_cgroup_is_descendant(memcg, oom_domain)))
		goto out;

2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119
	/*
	 * Traverse the memory cgroup hierarchy from the victim task's
	 * cgroup up to the OOMing cgroup (or root) to find the
	 * highest-level memory cgroup with oom.group set.
	 */
	for (; memcg; memcg = parent_mem_cgroup(memcg)) {
		if (memcg->oom_group)
			oom_group = memcg;

		if (memcg == oom_domain)
			break;
	}

	if (oom_group)
		css_get(&oom_group->css);
out:
	rcu_read_unlock();

	return oom_group;
}

void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
{
	pr_info("Tasks in ");
	pr_cont_cgroup_path(memcg->css.cgroup);
	pr_cont(" are going to be killed due to memory.oom.group set\n");
}

2120
/**
2121 2122
 * lock_page_memcg - lock a page->mem_cgroup binding
 * @page: the page
2123
 *
2124
 * This function protects unlocked LRU pages from being moved to
2125 2126 2127 2128 2129
 * another cgroup.
 *
 * It ensures lifetime of the returned memcg. Caller is responsible
 * for the lifetime of the page; __unlock_page_memcg() is available
 * when @page might get freed inside the locked section.
2130
 */
2131
struct mem_cgroup *lock_page_memcg(struct page *page)
2132
{
2133
	struct page *head = compound_head(page); /* rmap on tail pages */
2134
	struct mem_cgroup *memcg;
2135
	unsigned long flags;
2136

2137 2138 2139 2140
	/*
	 * The RCU lock is held throughout the transaction.  The fast
	 * path can get away without acquiring the memcg->move_lock
	 * because page moving starts with an RCU grace period.
2141 2142 2143 2144 2145 2146 2147
	 *
	 * The RCU lock also protects the memcg from being freed when
	 * the page state that is going to change is the only thing
	 * preventing the page itself from being freed. E.g. writeback
	 * doesn't hold a page reference and relies on PG_writeback to
	 * keep off truncation, migration and so forth.
         */
2148 2149 2150
	rcu_read_lock();

	if (mem_cgroup_disabled())
2151
		return NULL;
2152
again:
2153
	memcg = head->mem_cgroup;
2154
	if (unlikely(!memcg))
2155
		return NULL;
2156

Q
Qiang Huang 已提交
2157
	if (atomic_read(&memcg->moving_account) <= 0)
2158
		return memcg;
2159

2160
	spin_lock_irqsave(&memcg->move_lock, flags);
2161
	if (memcg != head->mem_cgroup) {
2162
		spin_unlock_irqrestore(&memcg->move_lock, flags);
2163 2164
		goto again;
	}
2165 2166 2167 2168

	/*
	 * When charge migration first begins, we can have locked and
	 * unlocked page stat updates happening concurrently.  Track
2169
	 * the task who has the lock for unlock_page_memcg().
2170 2171 2172
	 */
	memcg->move_lock_task = current;
	memcg->move_lock_flags = flags;
2173

2174
	return memcg;
2175
}
2176
EXPORT_SYMBOL(lock_page_memcg);
2177

2178
/**
2179 2180 2181 2182
 * __unlock_page_memcg - unlock and unpin a memcg
 * @memcg: the memcg
 *
 * Unlock and unpin a memcg returned by lock_page_memcg().
2183
 */
2184
void __unlock_page_memcg(struct mem_cgroup *memcg)
2185
{
2186 2187 2188 2189 2190 2191 2192 2193
	if (memcg && memcg->move_lock_task == current) {
		unsigned long flags = memcg->move_lock_flags;

		memcg->move_lock_task = NULL;
		memcg->move_lock_flags = 0;

		spin_unlock_irqrestore(&memcg->move_lock, flags);
	}
2194

2195
	rcu_read_unlock();
2196
}
2197 2198 2199 2200 2201 2202 2203

/**
 * unlock_page_memcg - unlock a page->mem_cgroup binding
 * @page: the page
 */
void unlock_page_memcg(struct page *page)
{
2204 2205 2206
	struct page *head = compound_head(page);

	__unlock_page_memcg(head->mem_cgroup);
2207
}
2208
EXPORT_SYMBOL(unlock_page_memcg);
2209

2210 2211
struct memcg_stock_pcp {
	struct mem_cgroup *cached; /* this never be root cgroup */
2212
	unsigned int nr_pages;
R
Roman Gushchin 已提交
2213 2214 2215 2216 2217 2218

#ifdef CONFIG_MEMCG_KMEM
	struct obj_cgroup *cached_objcg;
	unsigned int nr_bytes;
#endif

2219
	struct work_struct work;
2220
	unsigned long flags;
2221
#define FLUSHING_CACHED_CHARGE	0
2222 2223
};
static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
2224
static DEFINE_MUTEX(percpu_charge_mutex);
2225

R
Roman Gushchin 已提交
2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241
#ifdef CONFIG_MEMCG_KMEM
static void drain_obj_stock(struct memcg_stock_pcp *stock);
static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
				     struct mem_cgroup *root_memcg);

#else
static inline void drain_obj_stock(struct memcg_stock_pcp *stock)
{
}
static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
				     struct mem_cgroup *root_memcg)
{
	return false;
}
#endif

2242 2243 2244 2245 2246 2247 2248 2249 2250 2251
/**
 * consume_stock: Try to consume stocked charge on this cpu.
 * @memcg: memcg to consume from.
 * @nr_pages: how many pages to charge.
 *
 * The charges will only happen if @memcg matches the current cpu's memcg
 * stock, and at least @nr_pages are available in that stock.  Failure to
 * service an allocation will refill the stock.
 *
 * returns true if successful, false otherwise.
2252
 */
2253
static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2254 2255
{
	struct memcg_stock_pcp *stock;
2256
	unsigned long flags;
2257
	bool ret = false;
2258

2259
	if (nr_pages > MEMCG_CHARGE_BATCH)
2260
		return ret;
2261

2262 2263 2264
	local_irq_save(flags);

	stock = this_cpu_ptr(&memcg_stock);
2265
	if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
2266
		stock->nr_pages -= nr_pages;
2267 2268
		ret = true;
	}
2269 2270 2271

	local_irq_restore(flags);

2272 2273 2274 2275
	return ret;
}

/*
2276
 * Returns stocks cached in percpu and reset cached information.
2277 2278 2279 2280 2281
 */
static void drain_stock(struct memcg_stock_pcp *stock)
{
	struct mem_cgroup *old = stock->cached;

2282 2283 2284
	if (!old)
		return;

2285
	if (stock->nr_pages) {
2286
		page_counter_uncharge(&old->memory, stock->nr_pages);
2287
		if (do_memsw_account())
2288
			page_counter_uncharge(&old->memsw, stock->nr_pages);
2289
		stock->nr_pages = 0;
2290
	}
2291 2292

	css_put(&old->css);
2293 2294 2295 2296 2297
	stock->cached = NULL;
}

static void drain_local_stock(struct work_struct *dummy)
{
2298 2299 2300
	struct memcg_stock_pcp *stock;
	unsigned long flags;

2301 2302 2303 2304
	/*
	 * The only protection from memory hotplug vs. drain_stock races is
	 * that we always operate on local CPU stock here with IRQ disabled
	 */
2305 2306 2307
	local_irq_save(flags);

	stock = this_cpu_ptr(&memcg_stock);
R
Roman Gushchin 已提交
2308
	drain_obj_stock(stock);
2309
	drain_stock(stock);
2310
	clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
2311 2312

	local_irq_restore(flags);
2313 2314 2315
}

/*
2316
 * Cache charges(val) to local per_cpu area.
2317
 * This will be consumed by consume_stock() function, later.
2318
 */
2319
static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2320
{
2321 2322 2323 2324
	struct memcg_stock_pcp *stock;
	unsigned long flags;

	local_irq_save(flags);
2325

2326
	stock = this_cpu_ptr(&memcg_stock);
2327
	if (stock->cached != memcg) { /* reset if necessary */
2328
		drain_stock(stock);
2329
		css_get(&memcg->css);
2330
		stock->cached = memcg;
2331
	}
2332
	stock->nr_pages += nr_pages;
2333

2334
	if (stock->nr_pages > MEMCG_CHARGE_BATCH)
2335 2336
		drain_stock(stock);

2337
	local_irq_restore(flags);
2338 2339 2340
}

/*
2341
 * Drains all per-CPU charge caches for given root_memcg resp. subtree
2342
 * of the hierarchy under it.
2343
 */
2344
static void drain_all_stock(struct mem_cgroup *root_memcg)
2345
{
2346
	int cpu, curcpu;
2347

2348 2349 2350
	/* If someone's already draining, avoid adding running more workers. */
	if (!mutex_trylock(&percpu_charge_mutex))
		return;
2351 2352 2353 2354 2355 2356
	/*
	 * Notify other cpus that system-wide "drain" is running
	 * We do not care about races with the cpu hotplug because cpu down
	 * as well as workers from this path always operate on the local
	 * per-cpu data. CPU up doesn't touch memcg_stock at all.
	 */
2357
	curcpu = get_cpu();
2358 2359
	for_each_online_cpu(cpu) {
		struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
2360
		struct mem_cgroup *memcg;
2361
		bool flush = false;
2362

2363
		rcu_read_lock();
2364
		memcg = stock->cached;
2365 2366 2367
		if (memcg && stock->nr_pages &&
		    mem_cgroup_is_descendant(memcg, root_memcg))
			flush = true;
R
Roman Gushchin 已提交
2368 2369
		if (obj_stock_flush_required(stock, root_memcg))
			flush = true;
2370 2371 2372 2373
		rcu_read_unlock();

		if (flush &&
		    !test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
2374 2375 2376 2377 2378
			if (cpu == curcpu)
				drain_local_stock(&stock->work);
			else
				schedule_work_on(cpu, &stock->work);
		}
2379
	}
2380
	put_cpu();
2381
	mutex_unlock(&percpu_charge_mutex);
2382 2383
}

2384
static int memcg_hotplug_cpu_dead(unsigned int cpu)
2385 2386
{
	struct memcg_stock_pcp *stock;
2387
	struct mem_cgroup *memcg, *mi;
2388 2389 2390

	stock = &per_cpu(memcg_stock, cpu);
	drain_stock(stock);
2391 2392 2393 2394 2395 2396 2397 2398

	for_each_mem_cgroup(memcg) {
		int i;

		for (i = 0; i < MEMCG_NR_STAT; i++) {
			int nid;
			long x;

2399
			x = this_cpu_xchg(memcg->vmstats_percpu->stat[i], 0);
2400
			if (x)
2401 2402
				for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
					atomic_long_add(x, &memcg->vmstats[i]);
2403 2404 2405 2406 2407 2408 2409 2410 2411

			if (i >= NR_VM_NODE_STAT_ITEMS)
				continue;

			for_each_node(nid) {
				struct mem_cgroup_per_node *pn;

				pn = mem_cgroup_nodeinfo(memcg, nid);
				x = this_cpu_xchg(pn->lruvec_stat_cpu->count[i], 0);
2412
				if (x)
2413 2414 2415
					do {
						atomic_long_add(x, &pn->lruvec_stat[i]);
					} while ((pn = parent_nodeinfo(pn, nid)));
2416 2417 2418
			}
		}

2419
		for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
2420 2421
			long x;

2422
			x = this_cpu_xchg(memcg->vmstats_percpu->events[i], 0);
2423
			if (x)
2424 2425
				for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
					atomic_long_add(x, &memcg->vmevents[i]);
2426 2427 2428
		}
	}

2429
	return 0;
2430 2431
}

2432 2433 2434
static unsigned long reclaim_high(struct mem_cgroup *memcg,
				  unsigned int nr_pages,
				  gfp_t gfp_mask)
2435
{
2436 2437
	unsigned long nr_reclaimed = 0;

2438
	do {
2439 2440
		unsigned long pflags;

2441 2442
		if (page_counter_read(&memcg->memory) <=
		    READ_ONCE(memcg->memory.high))
2443
			continue;
2444

2445
		memcg_memory_event(memcg, MEMCG_HIGH);
2446 2447

		psi_memstall_enter(&pflags);
2448 2449
		nr_reclaimed += try_to_free_mem_cgroup_pages(memcg, nr_pages,
							     gfp_mask, true);
2450
		psi_memstall_leave(&pflags);
2451 2452
	} while ((memcg = parent_mem_cgroup(memcg)) &&
		 !mem_cgroup_is_root(memcg));
2453 2454

	return nr_reclaimed;
2455 2456 2457 2458 2459 2460 2461
}

static void high_work_func(struct work_struct *work)
{
	struct mem_cgroup *memcg;

	memcg = container_of(work, struct mem_cgroup, high_work);
2462
	reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL);
2463 2464
}

2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478
/*
 * Clamp the maximum sleep time per allocation batch to 2 seconds. This is
 * enough to still cause a significant slowdown in most cases, while still
 * allowing diagnostics and tracing to proceed without becoming stuck.
 */
#define MEMCG_MAX_HIGH_DELAY_JIFFIES (2UL*HZ)

/*
 * When calculating the delay, we use these either side of the exponentiation to
 * maintain precision and scale to a reasonable number of jiffies (see the table
 * below.
 *
 * - MEMCG_DELAY_PRECISION_SHIFT: Extra precision bits while translating the
 *   overage ratio to a delay.
2479
 * - MEMCG_DELAY_SCALING_SHIFT: The number of bits to scale down the
2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517
 *   proposed penalty in order to reduce to a reasonable number of jiffies, and
 *   to produce a reasonable delay curve.
 *
 * MEMCG_DELAY_SCALING_SHIFT just happens to be a number that produces a
 * reasonable delay curve compared to precision-adjusted overage, not
 * penalising heavily at first, but still making sure that growth beyond the
 * limit penalises misbehaviour cgroups by slowing them down exponentially. For
 * example, with a high of 100 megabytes:
 *
 *  +-------+------------------------+
 *  | usage | time to allocate in ms |
 *  +-------+------------------------+
 *  | 100M  |                      0 |
 *  | 101M  |                      6 |
 *  | 102M  |                     25 |
 *  | 103M  |                     57 |
 *  | 104M  |                    102 |
 *  | 105M  |                    159 |
 *  | 106M  |                    230 |
 *  | 107M  |                    313 |
 *  | 108M  |                    409 |
 *  | 109M  |                    518 |
 *  | 110M  |                    639 |
 *  | 111M  |                    774 |
 *  | 112M  |                    921 |
 *  | 113M  |                   1081 |
 *  | 114M  |                   1254 |
 *  | 115M  |                   1439 |
 *  | 116M  |                   1638 |
 *  | 117M  |                   1849 |
 *  | 118M  |                   2000 |
 *  | 119M  |                   2000 |
 *  | 120M  |                   2000 |
 *  +-------+------------------------+
 */
 #define MEMCG_DELAY_PRECISION_SHIFT 20
 #define MEMCG_DELAY_SCALING_SHIFT 14

2518
static u64 calculate_overage(unsigned long usage, unsigned long high)
2519
{
2520
	u64 overage;
2521

2522 2523
	if (usage <= high)
		return 0;
2524

2525 2526 2527 2528 2529
	/*
	 * Prevent division by 0 in overage calculation by acting as if
	 * it was a threshold of 1 page
	 */
	high = max(high, 1UL);
2530

2531 2532 2533 2534
	overage = usage - high;
	overage <<= MEMCG_DELAY_PRECISION_SHIFT;
	return div64_u64(overage, high);
}
2535

2536 2537 2538
static u64 mem_find_max_overage(struct mem_cgroup *memcg)
{
	u64 overage, max_overage = 0;
2539

2540 2541
	do {
		overage = calculate_overage(page_counter_read(&memcg->memory),
2542
					    READ_ONCE(memcg->memory.high));
2543
		max_overage = max(overage, max_overage);
2544 2545 2546
	} while ((memcg = parent_mem_cgroup(memcg)) &&
		 !mem_cgroup_is_root(memcg));

2547 2548 2549
	return max_overage;
}

2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565
static u64 swap_find_max_overage(struct mem_cgroup *memcg)
{
	u64 overage, max_overage = 0;

	do {
		overage = calculate_overage(page_counter_read(&memcg->swap),
					    READ_ONCE(memcg->swap.high));
		if (overage)
			memcg_memory_event(memcg, MEMCG_SWAP_HIGH);
		max_overage = max(overage, max_overage);
	} while ((memcg = parent_mem_cgroup(memcg)) &&
		 !mem_cgroup_is_root(memcg));

	return max_overage;
}

2566 2567 2568 2569 2570 2571 2572 2573 2574 2575
/*
 * Get the number of jiffies that we should penalise a mischievous cgroup which
 * is exceeding its memory.high by checking both it and its ancestors.
 */
static unsigned long calculate_high_delay(struct mem_cgroup *memcg,
					  unsigned int nr_pages,
					  u64 max_overage)
{
	unsigned long penalty_jiffies;

2576 2577
	if (!max_overage)
		return 0;
2578 2579 2580 2581 2582 2583 2584 2585 2586

	/*
	 * We use overage compared to memory.high to calculate the number of
	 * jiffies to sleep (penalty_jiffies). Ideally this value should be
	 * fairly lenient on small overages, and increasingly harsh when the
	 * memcg in question makes it clear that it has no intention of stopping
	 * its crazy behaviour, so we exponentially increase the delay based on
	 * overage amount.
	 */
2587 2588 2589
	penalty_jiffies = max_overage * max_overage * HZ;
	penalty_jiffies >>= MEMCG_DELAY_PRECISION_SHIFT;
	penalty_jiffies >>= MEMCG_DELAY_SCALING_SHIFT;
2590 2591 2592 2593 2594 2595 2596 2597 2598

	/*
	 * Factor in the task's own contribution to the overage, such that four
	 * N-sized allocations are throttled approximately the same as one
	 * 4N-sized allocation.
	 *
	 * MEMCG_CHARGE_BATCH pages is nominal, so work out how much smaller or
	 * larger the current charge patch is than that.
	 */
2599
	return penalty_jiffies * nr_pages / MEMCG_CHARGE_BATCH;
2600 2601 2602 2603 2604 2605 2606 2607 2608 2609
}

/*
 * Scheduled by try_charge() to be executed from the userland return path
 * and reclaims memory over the high limit.
 */
void mem_cgroup_handle_over_high(void)
{
	unsigned long penalty_jiffies;
	unsigned long pflags;
2610
	unsigned long nr_reclaimed;
2611
	unsigned int nr_pages = current->memcg_nr_pages_over_high;
2612
	int nr_retries = MAX_RECLAIM_RETRIES;
2613
	struct mem_cgroup *memcg;
2614
	bool in_retry = false;
2615 2616 2617 2618 2619 2620 2621

	if (likely(!nr_pages))
		return;

	memcg = get_mem_cgroup_from_mm(current->mm);
	current->memcg_nr_pages_over_high = 0;

2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635
retry_reclaim:
	/*
	 * The allocating task should reclaim at least the batch size, but for
	 * subsequent retries we only want to do what's necessary to prevent oom
	 * or breaching resource isolation.
	 *
	 * This is distinct from memory.max or page allocator behaviour because
	 * memory.high is currently batched, whereas memory.max and the page
	 * allocator run every time an allocation is made.
	 */
	nr_reclaimed = reclaim_high(memcg,
				    in_retry ? SWAP_CLUSTER_MAX : nr_pages,
				    GFP_KERNEL);

2636 2637 2638 2639
	/*
	 * memory.high is breached and reclaim is unable to keep up. Throttle
	 * allocators proactively to slow down excessive growth.
	 */
2640 2641
	penalty_jiffies = calculate_high_delay(memcg, nr_pages,
					       mem_find_max_overage(memcg));
2642

2643 2644 2645
	penalty_jiffies += calculate_high_delay(memcg, nr_pages,
						swap_find_max_overage(memcg));

2646 2647 2648 2649 2650 2651 2652
	/*
	 * Clamp the max delay per usermode return so as to still keep the
	 * application moving forwards and also permit diagnostics, albeit
	 * extremely slowly.
	 */
	penalty_jiffies = min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES);

2653 2654 2655 2656 2657 2658 2659 2660 2661
	/*
	 * Don't sleep if the amount of jiffies this memcg owes us is so low
	 * that it's not even worth doing, in an attempt to be nice to those who
	 * go only a small amount over their memory.high value and maybe haven't
	 * been aggressively reclaimed enough yet.
	 */
	if (penalty_jiffies <= HZ / 100)
		goto out;

2662 2663 2664 2665 2666 2667 2668 2669 2670 2671
	/*
	 * If reclaim is making forward progress but we're still over
	 * memory.high, we want to encourage that rather than doing allocator
	 * throttling.
	 */
	if (nr_reclaimed || nr_retries--) {
		in_retry = true;
		goto retry_reclaim;
	}

2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682
	/*
	 * If we exit early, we're guaranteed to die (since
	 * schedule_timeout_killable sets TASK_KILLABLE). This means we don't
	 * need to account for any ill-begotten jiffies to pay them off later.
	 */
	psi_memstall_enter(&pflags);
	schedule_timeout_killable(penalty_jiffies);
	psi_memstall_leave(&pflags);

out:
	css_put(&memcg->css);
2683 2684
}

2685 2686
static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
		      unsigned int nr_pages)
2687
{
2688
	unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages);
2689
	int nr_retries = MAX_RECLAIM_RETRIES;
2690
	struct mem_cgroup *mem_over_limit;
2691
	struct page_counter *counter;
2692
	enum oom_status oom_status;
2693
	unsigned long nr_reclaimed;
2694 2695
	bool may_swap = true;
	bool drained = false;
2696
	unsigned long pflags;
2697

2698
	if (mem_cgroup_is_root(memcg))
2699
		return 0;
2700
retry:
2701
	if (consume_stock(memcg, nr_pages))
2702
		return 0;
2703

2704
	if (!do_memsw_account() ||
2705 2706
	    page_counter_try_charge(&memcg->memsw, batch, &counter)) {
		if (page_counter_try_charge(&memcg->memory, batch, &counter))
2707
			goto done_restock;
2708
		if (do_memsw_account())
2709 2710
			page_counter_uncharge(&memcg->memsw, batch);
		mem_over_limit = mem_cgroup_from_counter(counter, memory);
2711
	} else {
2712
		mem_over_limit = mem_cgroup_from_counter(counter, memsw);
2713
		may_swap = false;
2714
	}
2715

2716 2717 2718 2719
	if (batch > nr_pages) {
		batch = nr_pages;
		goto retry;
	}
2720

2721 2722 2723 2724 2725 2726 2727 2728 2729
	/*
	 * Memcg doesn't have a dedicated reserve for atomic
	 * allocations. But like the global atomic pool, we need to
	 * put the burden of reclaim on regular allocation requests
	 * and let these go through as privileged allocations.
	 */
	if (gfp_mask & __GFP_ATOMIC)
		goto force;

2730 2731 2732 2733 2734 2735
	/*
	 * Unlike in global OOM situations, memcg is not in a physical
	 * memory shortage.  Allow dying and OOM-killed tasks to
	 * bypass the last charges so that they can exit quickly and
	 * free their memory.
	 */
2736
	if (unlikely(should_force_charge()))
2737
		goto force;
2738

2739 2740 2741 2742 2743 2744 2745 2746 2747
	/*
	 * Prevent unbounded recursion when reclaim operations need to
	 * allocate memory. This might exceed the limits temporarily,
	 * but we prefer facilitating memory reclaim and getting back
	 * under the limit over triggering OOM kills in these cases.
	 */
	if (unlikely(current->flags & PF_MEMALLOC))
		goto force;

2748 2749 2750
	if (unlikely(task_in_memcg_oom(current)))
		goto nomem;

2751
	if (!gfpflags_allow_blocking(gfp_mask))
2752
		goto nomem;
2753

2754
	memcg_memory_event(mem_over_limit, MEMCG_MAX);
2755

2756
	psi_memstall_enter(&pflags);
2757 2758
	nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
						    gfp_mask, may_swap);
2759
	psi_memstall_leave(&pflags);
2760

2761
	if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
2762
		goto retry;
2763

2764
	if (!drained) {
2765
		drain_all_stock(mem_over_limit);
2766 2767 2768 2769
		drained = true;
		goto retry;
	}

2770 2771
	if (gfp_mask & __GFP_NORETRY)
		goto nomem;
2772 2773 2774 2775 2776 2777 2778 2779 2780
	/*
	 * Even though the limit is exceeded at this point, reclaim
	 * may have been able to free some pages.  Retry the charge
	 * before killing the task.
	 *
	 * Only for regular pages, though: huge pages are rather
	 * unlikely to succeed so close to the limit, and we fall back
	 * to regular pages anyway in case of failure.
	 */
2781
	if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
2782 2783 2784 2785 2786 2787 2788 2789
		goto retry;
	/*
	 * At task move, charge accounts can be doubly counted. So, it's
	 * better to wait until the end of task_move if something is going on.
	 */
	if (mem_cgroup_wait_acct_move(mem_over_limit))
		goto retry;

2790 2791 2792
	if (nr_retries--)
		goto retry;

2793
	if (gfp_mask & __GFP_RETRY_MAYFAIL)
2794 2795
		goto nomem;

2796
	if (gfp_mask & __GFP_NOFAIL)
2797
		goto force;
2798

2799
	if (fatal_signal_pending(current))
2800
		goto force;
2801

2802 2803 2804 2805 2806 2807
	/*
	 * keep retrying as long as the memcg oom killer is able to make
	 * a forward progress or bypass the charge if the oom killer
	 * couldn't make any progress.
	 */
	oom_status = mem_cgroup_oom(mem_over_limit, gfp_mask,
2808
		       get_order(nr_pages * PAGE_SIZE));
2809 2810
	switch (oom_status) {
	case OOM_SUCCESS:
2811
		nr_retries = MAX_RECLAIM_RETRIES;
2812 2813 2814 2815 2816 2817
		goto retry;
	case OOM_FAILED:
		goto force;
	default:
		goto nomem;
	}
2818
nomem:
2819
	if (!(gfp_mask & __GFP_NOFAIL))
2820
		return -ENOMEM;
2821 2822 2823 2824 2825 2826 2827
force:
	/*
	 * The allocation either can't fail or will lead to more memory
	 * being freed very soon.  Allow memory usage go over the limit
	 * temporarily by force charging it.
	 */
	page_counter_charge(&memcg->memory, nr_pages);
2828
	if (do_memsw_account())
2829 2830 2831
		page_counter_charge(&memcg->memsw, nr_pages);

	return 0;
2832 2833 2834 2835

done_restock:
	if (batch > nr_pages)
		refill_stock(memcg, batch - nr_pages);
2836

2837
	/*
2838 2839
	 * If the hierarchy is above the normal consumption range, schedule
	 * reclaim on returning to userland.  We can perform reclaim here
2840
	 * if __GFP_RECLAIM but let's always punt for simplicity and so that
2841 2842 2843 2844
	 * GFP_KERNEL can consistently be used during reclaim.  @memcg is
	 * not recorded as it most likely matches current's and won't
	 * change in the meantime.  As high limit is checked again before
	 * reclaim, the cost of mismatch is negligible.
2845 2846
	 */
	do {
2847 2848 2849 2850 2851 2852 2853 2854 2855 2856
		bool mem_high, swap_high;

		mem_high = page_counter_read(&memcg->memory) >
			READ_ONCE(memcg->memory.high);
		swap_high = page_counter_read(&memcg->swap) >
			READ_ONCE(memcg->swap.high);

		/* Don't bother a random interrupted task */
		if (in_interrupt()) {
			if (mem_high) {
2857 2858 2859
				schedule_work(&memcg->high_work);
				break;
			}
2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872
			continue;
		}

		if (mem_high || swap_high) {
			/*
			 * The allocating tasks in this cgroup will need to do
			 * reclaim or be throttled to prevent further growth
			 * of the memory or swap footprints.
			 *
			 * Target some best-effort fairness between the tasks,
			 * and distribute reclaim work and delay penalties
			 * based on how much each task is actually allocating.
			 */
V
Vladimir Davydov 已提交
2873
			current->memcg_nr_pages_over_high += batch;
2874 2875 2876
			set_notify_resume(current);
			break;
		}
2877
	} while ((memcg = parent_mem_cgroup(memcg)));
2878 2879

	return 0;
2880
}
2881

2882
#if defined(CONFIG_MEMCG_KMEM) || defined(CONFIG_MMU)
2883
static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
2884
{
2885 2886 2887
	if (mem_cgroup_is_root(memcg))
		return;

2888
	page_counter_uncharge(&memcg->memory, nr_pages);
2889
	if (do_memsw_account())
2890
		page_counter_uncharge(&memcg->memsw, nr_pages);
2891
}
2892
#endif
2893

2894
static void commit_charge(struct page *page, struct mem_cgroup *memcg)
2895
{
2896
	VM_BUG_ON_PAGE(page->mem_cgroup, page);
2897
	/*
2898
	 * Any of the following ensures page->mem_cgroup stability:
2899
	 *
2900 2901 2902 2903
	 * - the page lock
	 * - LRU isolation
	 * - lock_page_memcg()
	 * - exclusive reference
2904
	 */
2905
	page->mem_cgroup = memcg;
2906
}
2907

2908
#ifdef CONFIG_MEMCG_KMEM
2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928
int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s,
				 gfp_t gfp)
{
	unsigned int objects = objs_per_slab_page(s, page);
	void *vec;

	vec = kcalloc_node(objects, sizeof(struct obj_cgroup *), gfp,
			   page_to_nid(page));
	if (!vec)
		return -ENOMEM;

	if (cmpxchg(&page->obj_cgroups, NULL,
		    (struct obj_cgroup **) ((unsigned long)vec | 0x1UL)))
		kfree(vec);
	else
		kmemleak_not_leak(vec);

	return 0;
}

2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943
/*
 * Returns a pointer to the memory cgroup to which the kernel object is charged.
 *
 * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(),
 * cgroup_mutex, etc.
 */
struct mem_cgroup *mem_cgroup_from_obj(void *p)
{
	struct page *page;

	if (mem_cgroup_disabled())
		return NULL;

	page = virt_to_head_page(p);

2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954
	/*
	 * If page->mem_cgroup is set, it's either a simple mem_cgroup pointer
	 * or a pointer to obj_cgroup vector. In the latter case the lowest
	 * bit of the pointer is set.
	 * The page->mem_cgroup pointer can be asynchronously changed
	 * from NULL to (obj_cgroup_vec | 0x1UL), but can't be changed
	 * from a valid memcg pointer to objcg vector or back.
	 */
	if (!page->mem_cgroup)
		return NULL;

2955
	/*
2956 2957 2958
	 * Slab objects are accounted individually, not per-page.
	 * Memcg membership data for each individual object is saved in
	 * the page->obj_cgroups.
2959
	 */
2960 2961 2962 2963 2964 2965
	if (page_has_obj_cgroups(page)) {
		struct obj_cgroup *objcg;
		unsigned int off;

		off = obj_to_index(page->slab_cache, page, p);
		objcg = page_obj_cgroups(page)[off];
2966 2967 2968 2969
		if (objcg)
			return obj_cgroup_memcg(objcg);

		return NULL;
2970
	}
2971 2972 2973 2974 2975

	/* All other pages use page->mem_cgroup */
	return page->mem_cgroup;
}

R
Roman Gushchin 已提交
2976 2977 2978 2979 2980
__always_inline struct obj_cgroup *get_obj_cgroup_from_current(void)
{
	struct obj_cgroup *objcg = NULL;
	struct mem_cgroup *memcg;

2981 2982 2983
	if (memcg_kmem_bypass())
		return NULL;

R
Roman Gushchin 已提交
2984
	rcu_read_lock();
2985 2986
	if (unlikely(active_memcg()))
		memcg = active_memcg();
R
Roman Gushchin 已提交
2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999
	else
		memcg = mem_cgroup_from_task(current);

	for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) {
		objcg = rcu_dereference(memcg->objcg);
		if (objcg && obj_cgroup_tryget(objcg))
			break;
	}
	rcu_read_unlock();

	return objcg;
}

3000
static int memcg_alloc_cache_id(void)
3001
{
3002 3003 3004
	int id, size;
	int err;

3005
	id = ida_simple_get(&memcg_cache_ida,
3006 3007 3008
			    0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL);
	if (id < 0)
		return id;
3009

3010
	if (id < memcg_nr_cache_ids)
3011 3012 3013 3014 3015 3016
		return id;

	/*
	 * There's no space for the new id in memcg_caches arrays,
	 * so we have to grow them.
	 */
3017
	down_write(&memcg_cache_ids_sem);
3018 3019

	size = 2 * (id + 1);
3020 3021 3022 3023 3024
	if (size < MEMCG_CACHES_MIN_SIZE)
		size = MEMCG_CACHES_MIN_SIZE;
	else if (size > MEMCG_CACHES_MAX_SIZE)
		size = MEMCG_CACHES_MAX_SIZE;

3025
	err = memcg_update_all_list_lrus(size);
3026 3027 3028 3029 3030
	if (!err)
		memcg_nr_cache_ids = size;

	up_write(&memcg_cache_ids_sem);

3031
	if (err) {
3032
		ida_simple_remove(&memcg_cache_ida, id);
3033 3034 3035 3036 3037 3038 3039
		return err;
	}
	return id;
}

static void memcg_free_cache_id(int id)
{
3040
	ida_simple_remove(&memcg_cache_ida, id);
3041 3042
}

3043
/**
3044
 * __memcg_kmem_charge: charge a number of kernel pages to a memcg
3045
 * @memcg: memory cgroup to charge
3046
 * @gfp: reclaim mode
3047
 * @nr_pages: number of pages to charge
3048 3049 3050
 *
 * Returns 0 on success, an error code on failure.
 */
3051 3052
int __memcg_kmem_charge(struct mem_cgroup *memcg, gfp_t gfp,
			unsigned int nr_pages)
3053
{
3054
	struct page_counter *counter;
3055 3056
	int ret;

3057
	ret = try_charge(memcg, gfp, nr_pages);
3058
	if (ret)
3059
		return ret;
3060 3061 3062

	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
	    !page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) {
3063 3064 3065 3066 3067 3068 3069 3070 3071 3072

		/*
		 * Enforce __GFP_NOFAIL allocation because callers are not
		 * prepared to see failures and likely do not have any failure
		 * handling code.
		 */
		if (gfp & __GFP_NOFAIL) {
			page_counter_charge(&memcg->kmem, nr_pages);
			return 0;
		}
3073 3074
		cancel_charge(memcg, nr_pages);
		return -ENOMEM;
3075
	}
3076
	return 0;
3077 3078
}

3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093
/**
 * __memcg_kmem_uncharge: uncharge a number of kernel pages from a memcg
 * @memcg: memcg to uncharge
 * @nr_pages: number of pages to uncharge
 */
void __memcg_kmem_uncharge(struct mem_cgroup *memcg, unsigned int nr_pages)
{
	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
		page_counter_uncharge(&memcg->kmem, nr_pages);

	page_counter_uncharge(&memcg->memory, nr_pages);
	if (do_memsw_account())
		page_counter_uncharge(&memcg->memsw, nr_pages);
}

3094
/**
3095
 * __memcg_kmem_charge_page: charge a kmem page to the current memory cgroup
3096 3097 3098 3099 3100 3101
 * @page: page to charge
 * @gfp: reclaim mode
 * @order: allocation order
 *
 * Returns 0 on success, an error code on failure.
 */
3102
int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order)
3103
{
3104
	struct mem_cgroup *memcg;
3105
	int ret = 0;
3106

3107
	memcg = get_mem_cgroup_from_current();
3108
	if (memcg && !mem_cgroup_is_root(memcg)) {
3109
		ret = __memcg_kmem_charge(memcg, gfp, 1 << order);
3110 3111
		if (!ret) {
			page->mem_cgroup = memcg;
3112
			__SetPageKmemcg(page);
3113
			return 0;
3114
		}
3115
		css_put(&memcg->css);
3116
	}
3117
	return ret;
3118
}
3119

3120
/**
3121
 * __memcg_kmem_uncharge_page: uncharge a kmem page
3122 3123 3124
 * @page: page to uncharge
 * @order: allocation order
 */
3125
void __memcg_kmem_uncharge_page(struct page *page, int order)
3126
{
3127
	struct mem_cgroup *memcg = page->mem_cgroup;
3128
	unsigned int nr_pages = 1 << order;
3129 3130 3131 3132

	if (!memcg)
		return;

3133
	VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page);
3134
	__memcg_kmem_uncharge(memcg, nr_pages);
3135
	page->mem_cgroup = NULL;
3136
	css_put(&memcg->css);
3137 3138 3139 3140

	/* slab pages do not have PageKmemcg flag set */
	if (PageKmemcg(page))
		__ClearPageKmemcg(page);
3141
}
R
Roman Gushchin 已提交
3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275

static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
{
	struct memcg_stock_pcp *stock;
	unsigned long flags;
	bool ret = false;

	local_irq_save(flags);

	stock = this_cpu_ptr(&memcg_stock);
	if (objcg == stock->cached_objcg && stock->nr_bytes >= nr_bytes) {
		stock->nr_bytes -= nr_bytes;
		ret = true;
	}

	local_irq_restore(flags);

	return ret;
}

static void drain_obj_stock(struct memcg_stock_pcp *stock)
{
	struct obj_cgroup *old = stock->cached_objcg;

	if (!old)
		return;

	if (stock->nr_bytes) {
		unsigned int nr_pages = stock->nr_bytes >> PAGE_SHIFT;
		unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1);

		if (nr_pages) {
			rcu_read_lock();
			__memcg_kmem_uncharge(obj_cgroup_memcg(old), nr_pages);
			rcu_read_unlock();
		}

		/*
		 * The leftover is flushed to the centralized per-memcg value.
		 * On the next attempt to refill obj stock it will be moved
		 * to a per-cpu stock (probably, on an other CPU), see
		 * refill_obj_stock().
		 *
		 * How often it's flushed is a trade-off between the memory
		 * limit enforcement accuracy and potential CPU contention,
		 * so it might be changed in the future.
		 */
		atomic_add(nr_bytes, &old->nr_charged_bytes);
		stock->nr_bytes = 0;
	}

	obj_cgroup_put(old);
	stock->cached_objcg = NULL;
}

static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
				     struct mem_cgroup *root_memcg)
{
	struct mem_cgroup *memcg;

	if (stock->cached_objcg) {
		memcg = obj_cgroup_memcg(stock->cached_objcg);
		if (memcg && mem_cgroup_is_descendant(memcg, root_memcg))
			return true;
	}

	return false;
}

static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
{
	struct memcg_stock_pcp *stock;
	unsigned long flags;

	local_irq_save(flags);

	stock = this_cpu_ptr(&memcg_stock);
	if (stock->cached_objcg != objcg) { /* reset if necessary */
		drain_obj_stock(stock);
		obj_cgroup_get(objcg);
		stock->cached_objcg = objcg;
		stock->nr_bytes = atomic_xchg(&objcg->nr_charged_bytes, 0);
	}
	stock->nr_bytes += nr_bytes;

	if (stock->nr_bytes > PAGE_SIZE)
		drain_obj_stock(stock);

	local_irq_restore(flags);
}

int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size)
{
	struct mem_cgroup *memcg;
	unsigned int nr_pages, nr_bytes;
	int ret;

	if (consume_obj_stock(objcg, size))
		return 0;

	/*
	 * In theory, memcg->nr_charged_bytes can have enough
	 * pre-charged bytes to satisfy the allocation. However,
	 * flushing memcg->nr_charged_bytes requires two atomic
	 * operations, and memcg->nr_charged_bytes can't be big,
	 * so it's better to ignore it and try grab some new pages.
	 * memcg->nr_charged_bytes will be flushed in
	 * refill_obj_stock(), called from this function or
	 * independently later.
	 */
	rcu_read_lock();
	memcg = obj_cgroup_memcg(objcg);
	css_get(&memcg->css);
	rcu_read_unlock();

	nr_pages = size >> PAGE_SHIFT;
	nr_bytes = size & (PAGE_SIZE - 1);

	if (nr_bytes)
		nr_pages += 1;

	ret = __memcg_kmem_charge(memcg, gfp, nr_pages);
	if (!ret && nr_bytes)
		refill_obj_stock(objcg, PAGE_SIZE - nr_bytes);

	css_put(&memcg->css);
	return ret;
}

void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size)
{
	refill_obj_stock(objcg, size);
}

3276
#endif /* CONFIG_MEMCG_KMEM */
3277

3278 3279 3280 3281
#ifdef CONFIG_TRANSPARENT_HUGEPAGE

/*
 * Because tail pages are not marked as "used", set it. We're under
3282
 * pgdat->lru_lock and migration entries setup in all page mappings.
3283
 */
3284
void mem_cgroup_split_huge_fixup(struct page *head)
3285
{
3286
	struct mem_cgroup *memcg = head->mem_cgroup;
3287
	int i;
3288

3289 3290
	if (mem_cgroup_disabled())
		return;
3291

3292 3293 3294 3295
	for (i = 1; i < HPAGE_PMD_NR; i++) {
		css_get(&memcg->css);
		head[i].mem_cgroup = memcg;
	}
3296
}
3297
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
3298

A
Andrew Morton 已提交
3299
#ifdef CONFIG_MEMCG_SWAP
3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310
/**
 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
 * @entry: swap entry to be moved
 * @from:  mem_cgroup which the entry is moved from
 * @to:  mem_cgroup which the entry is moved to
 *
 * It succeeds only when the swap_cgroup's record for this entry is the same
 * as the mem_cgroup's id of @from.
 *
 * Returns 0 on success, -EINVAL on failure.
 *
3311
 * The caller must have charged to @to, IOW, called page_counter_charge() about
3312 3313 3314
 * both res and memsw, and called css_get().
 */
static int mem_cgroup_move_swap_account(swp_entry_t entry,
3315
				struct mem_cgroup *from, struct mem_cgroup *to)
3316 3317 3318
{
	unsigned short old_id, new_id;

L
Li Zefan 已提交
3319 3320
	old_id = mem_cgroup_id(from);
	new_id = mem_cgroup_id(to);
3321 3322

	if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
3323 3324
		mod_memcg_state(from, MEMCG_SWAP, -1);
		mod_memcg_state(to, MEMCG_SWAP, 1);
3325 3326 3327 3328 3329 3330
		return 0;
	}
	return -EINVAL;
}
#else
static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
3331
				struct mem_cgroup *from, struct mem_cgroup *to)
3332 3333 3334
{
	return -EINVAL;
}
3335
#endif
K
KAMEZAWA Hiroyuki 已提交
3336

3337
static DEFINE_MUTEX(memcg_max_mutex);
3338

3339 3340
static int mem_cgroup_resize_max(struct mem_cgroup *memcg,
				 unsigned long max, bool memsw)
3341
{
3342
	bool enlarge = false;
3343
	bool drained = false;
3344
	int ret;
3345 3346
	bool limits_invariant;
	struct page_counter *counter = memsw ? &memcg->memsw : &memcg->memory;
3347

3348
	do {
3349 3350 3351 3352
		if (signal_pending(current)) {
			ret = -EINTR;
			break;
		}
3353

3354
		mutex_lock(&memcg_max_mutex);
3355 3356
		/*
		 * Make sure that the new limit (memsw or memory limit) doesn't
3357
		 * break our basic invariant rule memory.max <= memsw.max.
3358
		 */
3359
		limits_invariant = memsw ? max >= READ_ONCE(memcg->memory.max) :
3360
					   max <= memcg->memsw.max;
3361
		if (!limits_invariant) {
3362
			mutex_unlock(&memcg_max_mutex);
3363 3364 3365
			ret = -EINVAL;
			break;
		}
3366
		if (max > counter->max)
3367
			enlarge = true;
3368 3369
		ret = page_counter_set_max(counter, max);
		mutex_unlock(&memcg_max_mutex);
3370 3371 3372 3373

		if (!ret)
			break;

3374 3375 3376 3377 3378 3379
		if (!drained) {
			drain_all_stock(memcg);
			drained = true;
			continue;
		}

3380 3381 3382 3383 3384 3385
		if (!try_to_free_mem_cgroup_pages(memcg, 1,
					GFP_KERNEL, !memsw)) {
			ret = -EBUSY;
			break;
		}
	} while (true);
3386

3387 3388
	if (!ret && enlarge)
		memcg_oom_recover(memcg);
3389

3390 3391 3392
	return ret;
}

3393
unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
3394 3395 3396 3397
					    gfp_t gfp_mask,
					    unsigned long *total_scanned)
{
	unsigned long nr_reclaimed = 0;
3398
	struct mem_cgroup_per_node *mz, *next_mz = NULL;
3399 3400
	unsigned long reclaimed;
	int loop = 0;
3401
	struct mem_cgroup_tree_per_node *mctz;
3402
	unsigned long excess;
3403 3404 3405 3406 3407
	unsigned long nr_scanned;

	if (order > 0)
		return 0;

3408
	mctz = soft_limit_tree_node(pgdat->node_id);
3409 3410 3411 3412 3413 3414

	/*
	 * Do not even bother to check the largest node if the root
	 * is empty. Do it lockless to prevent lock bouncing. Races
	 * are acceptable as soft limit is best effort anyway.
	 */
3415
	if (!mctz || RB_EMPTY_ROOT(&mctz->rb_root))
3416 3417
		return 0;

3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431
	/*
	 * This loop can run a while, specially if mem_cgroup's continuously
	 * keep exceeding their soft limit and putting the system under
	 * pressure
	 */
	do {
		if (next_mz)
			mz = next_mz;
		else
			mz = mem_cgroup_largest_soft_limit_node(mctz);
		if (!mz)
			break;

		nr_scanned = 0;
3432
		reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat,
3433 3434 3435
						    gfp_mask, &nr_scanned);
		nr_reclaimed += reclaimed;
		*total_scanned += nr_scanned;
3436
		spin_lock_irq(&mctz->lock);
3437
		__mem_cgroup_remove_exceeded(mz, mctz);
3438 3439 3440 3441 3442 3443

		/*
		 * If we failed to reclaim anything from this memory cgroup
		 * it is time to move on to the next cgroup
		 */
		next_mz = NULL;
3444 3445 3446
		if (!reclaimed)
			next_mz = __mem_cgroup_largest_soft_limit_node(mctz);

3447
		excess = soft_limit_excess(mz->memcg);
3448 3449 3450 3451 3452 3453 3454 3455 3456
		/*
		 * One school of thought says that we should not add
		 * back the node to the tree if reclaim returns 0.
		 * But our reclaim could return 0, simply because due
		 * to priority we are exposing a smaller subset of
		 * memory to reclaim from. Consider this as a longer
		 * term TODO.
		 */
		/* If excess == 0, no tree ops */
3457
		__mem_cgroup_insert_exceeded(mz, mctz, excess);
3458
		spin_unlock_irq(&mctz->lock);
3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475
		css_put(&mz->memcg->css);
		loop++;
		/*
		 * Could not reclaim anything and there are no more
		 * mem cgroups to try or we seem to be looping without
		 * reclaiming anything.
		 */
		if (!nr_reclaimed &&
			(next_mz == NULL ||
			loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
			break;
	} while (!nr_reclaimed);
	if (next_mz)
		css_put(&next_mz->memcg->css);
	return nr_reclaimed;
}

3476 3477 3478 3479
/*
 * Test whether @memcg has children, dead or alive.  Note that this
 * function doesn't care whether @memcg has use_hierarchy enabled and
 * returns %true if there are child csses according to the cgroup
3480
 * hierarchy.  Testing use_hierarchy is the caller's responsibility.
3481
 */
3482 3483
static inline bool memcg_has_children(struct mem_cgroup *memcg)
{
3484 3485 3486 3487 3488 3489
	bool ret;

	rcu_read_lock();
	ret = css_next_child(NULL, &memcg->css);
	rcu_read_unlock();
	return ret;
3490 3491
}

3492
/*
3493
 * Reclaims as many pages from the given memcg as possible.
3494 3495 3496 3497 3498
 *
 * Caller is responsible for holding css reference for memcg.
 */
static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
{
3499
	int nr_retries = MAX_RECLAIM_RETRIES;
3500

3501 3502
	/* we call try-to-free pages for make this cgroup empty */
	lru_add_drain_all();
3503 3504 3505

	drain_all_stock(memcg);

3506
	/* try to free all pages in this cgroup */
3507
	while (nr_retries && page_counter_read(&memcg->memory)) {
3508
		int progress;
3509

3510 3511 3512
		if (signal_pending(current))
			return -EINTR;

3513 3514
		progress = try_to_free_mem_cgroup_pages(memcg, 1,
							GFP_KERNEL, true);
3515
		if (!progress) {
3516
			nr_retries--;
3517
			/* maybe some writeback is necessary */
3518
			congestion_wait(BLK_RW_ASYNC, HZ/10);
3519
		}
3520 3521

	}
3522 3523

	return 0;
3524 3525
}

3526 3527 3528
static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of,
					    char *buf, size_t nbytes,
					    loff_t off)
3529
{
3530
	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3531

3532 3533
	if (mem_cgroup_is_root(memcg))
		return -EINVAL;
3534
	return mem_cgroup_force_empty(memcg) ?: nbytes;
3535 3536
}

3537 3538
static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
				     struct cftype *cft)
3539
{
3540
	return mem_cgroup_from_css(css)->use_hierarchy;
3541 3542
}

3543 3544
static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
				      struct cftype *cft, u64 val)
3545 3546
{
	int retval = 0;
3547
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
T
Tejun Heo 已提交
3548
	struct mem_cgroup *parent_memcg = mem_cgroup_from_css(memcg->css.parent);
3549

3550
	if (memcg->use_hierarchy == val)
3551
		return 0;
3552

3553
	/*
3554
	 * If parent's use_hierarchy is set, we can't make any modifications
3555 3556 3557 3558 3559 3560
	 * in the child subtrees. If it is unset, then the change can
	 * occur, provided the current cgroup has no children.
	 *
	 * For the root cgroup, parent_mem is NULL, we allow value to be
	 * set if there are no children.
	 */
3561
	if ((!parent_memcg || !parent_memcg->use_hierarchy) &&
3562
				(val == 1 || val == 0)) {
3563
		if (!memcg_has_children(memcg))
3564
			memcg->use_hierarchy = val;
3565 3566 3567 3568
		else
			retval = -EBUSY;
	} else
		retval = -EINVAL;
3569

3570 3571 3572
	return retval;
}

3573
static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
3574
{
3575
	unsigned long val;
3576

3577
	if (mem_cgroup_is_root(memcg)) {
3578
		val = memcg_page_state(memcg, NR_FILE_PAGES) +
3579
			memcg_page_state(memcg, NR_ANON_MAPPED);
3580 3581
		if (swap)
			val += memcg_page_state(memcg, MEMCG_SWAP);
3582
	} else {
3583
		if (!swap)
3584
			val = page_counter_read(&memcg->memory);
3585
		else
3586
			val = page_counter_read(&memcg->memsw);
3587
	}
3588
	return val;
3589 3590
}

3591 3592 3593 3594 3595 3596 3597
enum {
	RES_USAGE,
	RES_LIMIT,
	RES_MAX_USAGE,
	RES_FAILCNT,
	RES_SOFT_LIMIT,
};
3598

3599
static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
3600
			       struct cftype *cft)
B
Balbir Singh 已提交
3601
{
3602
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3603
	struct page_counter *counter;
3604

3605
	switch (MEMFILE_TYPE(cft->private)) {
3606
	case _MEM:
3607 3608
		counter = &memcg->memory;
		break;
3609
	case _MEMSWAP:
3610 3611
		counter = &memcg->memsw;
		break;
3612
	case _KMEM:
3613
		counter = &memcg->kmem;
3614
		break;
V
Vladimir Davydov 已提交
3615
	case _TCP:
3616
		counter = &memcg->tcpmem;
V
Vladimir Davydov 已提交
3617
		break;
3618 3619 3620
	default:
		BUG();
	}
3621 3622 3623 3624

	switch (MEMFILE_ATTR(cft->private)) {
	case RES_USAGE:
		if (counter == &memcg->memory)
3625
			return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE;
3626
		if (counter == &memcg->memsw)
3627
			return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE;
3628 3629
		return (u64)page_counter_read(counter) * PAGE_SIZE;
	case RES_LIMIT:
3630
		return (u64)counter->max * PAGE_SIZE;
3631 3632 3633 3634 3635 3636 3637 3638 3639
	case RES_MAX_USAGE:
		return (u64)counter->watermark * PAGE_SIZE;
	case RES_FAILCNT:
		return counter->failcnt;
	case RES_SOFT_LIMIT:
		return (u64)memcg->soft_limit * PAGE_SIZE;
	default:
		BUG();
	}
B
Balbir Singh 已提交
3640
}
3641

3642
static void memcg_flush_percpu_vmstats(struct mem_cgroup *memcg)
3643
{
3644
	unsigned long stat[MEMCG_NR_STAT] = {0};
3645 3646 3647 3648
	struct mem_cgroup *mi;
	int node, cpu, i;

	for_each_online_cpu(cpu)
3649
		for (i = 0; i < MEMCG_NR_STAT; i++)
3650
			stat[i] += per_cpu(memcg->vmstats_percpu->stat[i], cpu);
3651 3652

	for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
3653
		for (i = 0; i < MEMCG_NR_STAT; i++)
3654 3655 3656 3657 3658 3659
			atomic_long_add(stat[i], &mi->vmstats[i]);

	for_each_node(node) {
		struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
		struct mem_cgroup_per_node *pi;

3660
		for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
3661 3662 3663
			stat[i] = 0;

		for_each_online_cpu(cpu)
3664
			for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
3665 3666
				stat[i] += per_cpu(
					pn->lruvec_stat_cpu->count[i], cpu);
3667 3668

		for (pi = pn; pi; pi = parent_nodeinfo(pi, node))
3669
			for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
3670 3671 3672 3673
				atomic_long_add(stat[i], &pi->lruvec_stat[i]);
	}
}

3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684
static void memcg_flush_percpu_vmevents(struct mem_cgroup *memcg)
{
	unsigned long events[NR_VM_EVENT_ITEMS];
	struct mem_cgroup *mi;
	int cpu, i;

	for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
		events[i] = 0;

	for_each_online_cpu(cpu)
		for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
3685 3686
			events[i] += per_cpu(memcg->vmstats_percpu->events[i],
					     cpu);
3687 3688 3689 3690 3691 3692

	for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
		for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
			atomic_long_add(events[i], &mi->vmevents[i]);
}

3693
#ifdef CONFIG_MEMCG_KMEM
3694
static int memcg_online_kmem(struct mem_cgroup *memcg)
3695
{
R
Roman Gushchin 已提交
3696
	struct obj_cgroup *objcg;
3697 3698
	int memcg_id;

3699 3700 3701
	if (cgroup_memory_nokmem)
		return 0;

3702
	BUG_ON(memcg->kmemcg_id >= 0);
3703
	BUG_ON(memcg->kmem_state);
3704

3705
	memcg_id = memcg_alloc_cache_id();
3706 3707
	if (memcg_id < 0)
		return memcg_id;
3708

R
Roman Gushchin 已提交
3709 3710 3711 3712 3713 3714 3715 3716
	objcg = obj_cgroup_alloc();
	if (!objcg) {
		memcg_free_cache_id(memcg_id);
		return -ENOMEM;
	}
	objcg->memcg = memcg;
	rcu_assign_pointer(memcg->objcg, objcg);

3717 3718
	static_branch_enable(&memcg_kmem_enabled_key);

3719
	/*
3720
	 * A memory cgroup is considered kmem-online as soon as it gets
V
Vladimir Davydov 已提交
3721
	 * kmemcg_id. Setting the id after enabling static branching will
3722 3723 3724
	 * guarantee no one starts accounting before all call sites are
	 * patched.
	 */
V
Vladimir Davydov 已提交
3725
	memcg->kmemcg_id = memcg_id;
3726
	memcg->kmem_state = KMEM_ONLINE;
3727 3728

	return 0;
3729 3730
}

3731 3732 3733 3734 3735 3736 3737 3738
static void memcg_offline_kmem(struct mem_cgroup *memcg)
{
	struct cgroup_subsys_state *css;
	struct mem_cgroup *parent, *child;
	int kmemcg_id;

	if (memcg->kmem_state != KMEM_ONLINE)
		return;
3739

3740 3741 3742 3743 3744 3745
	memcg->kmem_state = KMEM_ALLOCATED;

	parent = parent_mem_cgroup(memcg);
	if (!parent)
		parent = root_mem_cgroup;

R
Roman Gushchin 已提交
3746
	memcg_reparent_objcgs(memcg, parent);
3747 3748 3749 3750

	kmemcg_id = memcg->kmemcg_id;
	BUG_ON(kmemcg_id < 0);

3751 3752 3753 3754 3755 3756 3757 3758
	/*
	 * Change kmemcg_id of this cgroup and all its descendants to the
	 * parent's id, and then move all entries from this cgroup's list_lrus
	 * to ones of the parent. After we have finished, all list_lrus
	 * corresponding to this cgroup are guaranteed to remain empty. The
	 * ordering is imposed by list_lru_node->lock taken by
	 * memcg_drain_all_list_lrus().
	 */
3759
	rcu_read_lock(); /* can be called from css_free w/o cgroup_mutex */
3760 3761 3762 3763 3764 3765 3766
	css_for_each_descendant_pre(css, &memcg->css) {
		child = mem_cgroup_from_css(css);
		BUG_ON(child->kmemcg_id != kmemcg_id);
		child->kmemcg_id = parent->kmemcg_id;
		if (!memcg->use_hierarchy)
			break;
	}
3767 3768
	rcu_read_unlock();

3769
	memcg_drain_all_list_lrus(kmemcg_id, parent);
3770 3771 3772 3773 3774 3775

	memcg_free_cache_id(kmemcg_id);
}

static void memcg_free_kmem(struct mem_cgroup *memcg)
{
3776 3777 3778
	/* css_alloc() failed, offlining didn't happen */
	if (unlikely(memcg->kmem_state == KMEM_ONLINE))
		memcg_offline_kmem(memcg);
3779
}
3780
#else
3781
static int memcg_online_kmem(struct mem_cgroup *memcg)
3782 3783 3784 3785 3786 3787 3788 3789 3790
{
	return 0;
}
static void memcg_offline_kmem(struct mem_cgroup *memcg)
{
}
static void memcg_free_kmem(struct mem_cgroup *memcg)
{
}
3791
#endif /* CONFIG_MEMCG_KMEM */
3792

3793 3794
static int memcg_update_kmem_max(struct mem_cgroup *memcg,
				 unsigned long max)
3795
{
3796
	int ret;
3797

3798 3799 3800
	mutex_lock(&memcg_max_mutex);
	ret = page_counter_set_max(&memcg->kmem, max);
	mutex_unlock(&memcg_max_mutex);
3801
	return ret;
3802
}
3803

3804
static int memcg_update_tcp_max(struct mem_cgroup *memcg, unsigned long max)
V
Vladimir Davydov 已提交
3805 3806 3807
{
	int ret;

3808
	mutex_lock(&memcg_max_mutex);
V
Vladimir Davydov 已提交
3809

3810
	ret = page_counter_set_max(&memcg->tcpmem, max);
V
Vladimir Davydov 已提交
3811 3812 3813
	if (ret)
		goto out;

3814
	if (!memcg->tcpmem_active) {
V
Vladimir Davydov 已提交
3815 3816 3817
		/*
		 * The active flag needs to be written after the static_key
		 * update. This is what guarantees that the socket activation
3818 3819 3820
		 * function is the last one to run. See mem_cgroup_sk_alloc()
		 * for details, and note that we don't mark any socket as
		 * belonging to this memcg until that flag is up.
V
Vladimir Davydov 已提交
3821 3822 3823 3824 3825 3826
		 *
		 * We need to do this, because static_keys will span multiple
		 * sites, but we can't control their order. If we mark a socket
		 * as accounted, but the accounting functions are not patched in
		 * yet, we'll lose accounting.
		 *
3827
		 * We never race with the readers in mem_cgroup_sk_alloc(),
V
Vladimir Davydov 已提交
3828 3829 3830 3831
		 * because when this value change, the code to process it is not
		 * patched in yet.
		 */
		static_branch_inc(&memcg_sockets_enabled_key);
3832
		memcg->tcpmem_active = true;
V
Vladimir Davydov 已提交
3833 3834
	}
out:
3835
	mutex_unlock(&memcg_max_mutex);
V
Vladimir Davydov 已提交
3836 3837 3838
	return ret;
}

3839 3840 3841 3842
/*
 * The user of this function is...
 * RES_LIMIT.
 */
3843 3844
static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
				char *buf, size_t nbytes, loff_t off)
B
Balbir Singh 已提交
3845
{
3846
	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3847
	unsigned long nr_pages;
3848 3849
	int ret;

3850
	buf = strstrip(buf);
3851
	ret = page_counter_memparse(buf, "-1", &nr_pages);
3852 3853
	if (ret)
		return ret;
3854

3855
	switch (MEMFILE_ATTR(of_cft(of)->private)) {
3856
	case RES_LIMIT:
3857 3858 3859 3860
		if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
			ret = -EINVAL;
			break;
		}
3861 3862
		switch (MEMFILE_TYPE(of_cft(of)->private)) {
		case _MEM:
3863
			ret = mem_cgroup_resize_max(memcg, nr_pages, false);
3864
			break;
3865
		case _MEMSWAP:
3866
			ret = mem_cgroup_resize_max(memcg, nr_pages, true);
3867
			break;
3868
		case _KMEM:
3869 3870 3871
			pr_warn_once("kmem.limit_in_bytes is deprecated and will be removed. "
				     "Please report your usecase to linux-mm@kvack.org if you "
				     "depend on this functionality.\n");
3872
			ret = memcg_update_kmem_max(memcg, nr_pages);
3873
			break;
V
Vladimir Davydov 已提交
3874
		case _TCP:
3875
			ret = memcg_update_tcp_max(memcg, nr_pages);
V
Vladimir Davydov 已提交
3876
			break;
3877
		}
3878
		break;
3879 3880 3881
	case RES_SOFT_LIMIT:
		memcg->soft_limit = nr_pages;
		ret = 0;
3882 3883
		break;
	}
3884
	return ret ?: nbytes;
B
Balbir Singh 已提交
3885 3886
}

3887 3888
static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf,
				size_t nbytes, loff_t off)
3889
{
3890
	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3891
	struct page_counter *counter;
3892

3893 3894 3895 3896 3897 3898 3899 3900 3901 3902
	switch (MEMFILE_TYPE(of_cft(of)->private)) {
	case _MEM:
		counter = &memcg->memory;
		break;
	case _MEMSWAP:
		counter = &memcg->memsw;
		break;
	case _KMEM:
		counter = &memcg->kmem;
		break;
V
Vladimir Davydov 已提交
3903
	case _TCP:
3904
		counter = &memcg->tcpmem;
V
Vladimir Davydov 已提交
3905
		break;
3906 3907 3908
	default:
		BUG();
	}
3909

3910
	switch (MEMFILE_ATTR(of_cft(of)->private)) {
3911
	case RES_MAX_USAGE:
3912
		page_counter_reset_watermark(counter);
3913 3914
		break;
	case RES_FAILCNT:
3915
		counter->failcnt = 0;
3916
		break;
3917 3918
	default:
		BUG();
3919
	}
3920

3921
	return nbytes;
3922 3923
}

3924
static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
3925 3926
					struct cftype *cft)
{
3927
	return mem_cgroup_from_css(css)->move_charge_at_immigrate;
3928 3929
}

3930
#ifdef CONFIG_MMU
3931
static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3932 3933
					struct cftype *cft, u64 val)
{
3934
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3935

3936
	if (val & ~MOVE_MASK)
3937
		return -EINVAL;
3938

3939
	/*
3940 3941 3942 3943
	 * No kind of locking is needed in here, because ->can_attach() will
	 * check this value once in the beginning of the process, and then carry
	 * on with stale data. This means that changes to this value will only
	 * affect task migrations starting after the change.
3944
	 */
3945
	memcg->move_charge_at_immigrate = val;
3946 3947
	return 0;
}
3948
#else
3949
static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3950 3951 3952 3953 3954
					struct cftype *cft, u64 val)
{
	return -ENOSYS;
}
#endif
3955

3956
#ifdef CONFIG_NUMA
3957 3958 3959 3960 3961 3962

#define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE))
#define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
#define LRU_ALL	     ((1 << NR_LRU_LISTS) - 1)

static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
3963
				int nid, unsigned int lru_mask, bool tree)
3964
{
3965
	struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
3966 3967 3968 3969 3970 3971 3972 3973
	unsigned long nr = 0;
	enum lru_list lru;

	VM_BUG_ON((unsigned)nid >= nr_node_ids);

	for_each_lru(lru) {
		if (!(BIT(lru) & lru_mask))
			continue;
3974 3975 3976 3977
		if (tree)
			nr += lruvec_page_state(lruvec, NR_LRU_BASE + lru);
		else
			nr += lruvec_page_state_local(lruvec, NR_LRU_BASE + lru);
3978 3979 3980 3981 3982
	}
	return nr;
}

static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
3983 3984
					     unsigned int lru_mask,
					     bool tree)
3985 3986 3987 3988 3989 3990 3991
{
	unsigned long nr = 0;
	enum lru_list lru;

	for_each_lru(lru) {
		if (!(BIT(lru) & lru_mask))
			continue;
3992 3993 3994 3995
		if (tree)
			nr += memcg_page_state(memcg, NR_LRU_BASE + lru);
		else
			nr += memcg_page_state_local(memcg, NR_LRU_BASE + lru);
3996 3997 3998 3999
	}
	return nr;
}

4000
static int memcg_numa_stat_show(struct seq_file *m, void *v)
4001
{
4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013
	struct numa_stat {
		const char *name;
		unsigned int lru_mask;
	};

	static const struct numa_stat stats[] = {
		{ "total", LRU_ALL },
		{ "file", LRU_ALL_FILE },
		{ "anon", LRU_ALL_ANON },
		{ "unevictable", BIT(LRU_UNEVICTABLE) },
	};
	const struct numa_stat *stat;
4014
	int nid;
4015
	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4016

4017
	for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
4018 4019 4020 4021 4022 4023 4024
		seq_printf(m, "%s=%lu", stat->name,
			   mem_cgroup_nr_lru_pages(memcg, stat->lru_mask,
						   false));
		for_each_node_state(nid, N_MEMORY)
			seq_printf(m, " N%d=%lu", nid,
				   mem_cgroup_node_nr_lru_pages(memcg, nid,
							stat->lru_mask, false));
4025
		seq_putc(m, '\n');
4026 4027
	}

4028
	for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
4029 4030 4031 4032 4033 4034 4035 4036

		seq_printf(m, "hierarchical_%s=%lu", stat->name,
			   mem_cgroup_nr_lru_pages(memcg, stat->lru_mask,
						   true));
		for_each_node_state(nid, N_MEMORY)
			seq_printf(m, " N%d=%lu", nid,
				   mem_cgroup_node_nr_lru_pages(memcg, nid,
							stat->lru_mask, true));
4037
		seq_putc(m, '\n');
4038 4039 4040 4041 4042 4043
	}

	return 0;
}
#endif /* CONFIG_NUMA */

4044
static const unsigned int memcg1_stats[] = {
4045
	NR_FILE_PAGES,
4046
	NR_ANON_MAPPED,
4047 4048 4049
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
	NR_ANON_THPS,
#endif
4050 4051 4052 4053 4054 4055 4056 4057 4058 4059
	NR_SHMEM,
	NR_FILE_MAPPED,
	NR_FILE_DIRTY,
	NR_WRITEBACK,
	MEMCG_SWAP,
};

static const char *const memcg1_stat_names[] = {
	"cache",
	"rss",
4060
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4061
	"rss_huge",
4062
#endif
4063 4064 4065 4066 4067 4068 4069
	"shmem",
	"mapped_file",
	"dirty",
	"writeback",
	"swap",
};

4070
/* Universal VM events cgroup1 shows, original sort order */
4071
static const unsigned int memcg1_events[] = {
4072 4073 4074 4075 4076 4077
	PGPGIN,
	PGPGOUT,
	PGFAULT,
	PGMAJFAULT,
};

4078
static int memcg_stat_show(struct seq_file *m, void *v)
4079
{
4080
	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4081
	unsigned long memory, memsw;
4082 4083
	struct mem_cgroup *mi;
	unsigned int i;
4084

4085
	BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats));
4086

4087
	for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
4088 4089
		unsigned long nr;

4090
		if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
4091
			continue;
4092 4093 4094 4095 4096 4097
		nr = memcg_page_state_local(memcg, memcg1_stats[i]);
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
		if (memcg1_stats[i] == NR_ANON_THPS)
			nr *= HPAGE_PMD_NR;
#endif
		seq_printf(m, "%s %lu\n", memcg1_stat_names[i], nr * PAGE_SIZE);
4098
	}
L
Lee Schermerhorn 已提交
4099

4100
	for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
4101
		seq_printf(m, "%s %lu\n", vm_event_name(memcg1_events[i]),
4102
			   memcg_events_local(memcg, memcg1_events[i]));
4103 4104

	for (i = 0; i < NR_LRU_LISTS; i++)
4105
		seq_printf(m, "%s %lu\n", lru_list_name(i),
4106
			   memcg_page_state_local(memcg, NR_LRU_BASE + i) *
4107
			   PAGE_SIZE);
4108

K
KAMEZAWA Hiroyuki 已提交
4109
	/* Hierarchical information */
4110 4111
	memory = memsw = PAGE_COUNTER_MAX;
	for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) {
4112 4113
		memory = min(memory, READ_ONCE(mi->memory.max));
		memsw = min(memsw, READ_ONCE(mi->memsw.max));
4114
	}
4115 4116
	seq_printf(m, "hierarchical_memory_limit %llu\n",
		   (u64)memory * PAGE_SIZE);
4117
	if (do_memsw_account())
4118 4119
		seq_printf(m, "hierarchical_memsw_limit %llu\n",
			   (u64)memsw * PAGE_SIZE);
K
KOSAKI Motohiro 已提交
4120

4121
	for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
4122 4123
		unsigned long nr;

4124
		if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
4125
			continue;
4126 4127 4128 4129 4130
		nr = memcg_page_state(memcg, memcg1_stats[i]);
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
		if (memcg1_stats[i] == NR_ANON_THPS)
			nr *= HPAGE_PMD_NR;
#endif
4131
		seq_printf(m, "total_%s %llu\n", memcg1_stat_names[i],
4132
						(u64)nr * PAGE_SIZE);
4133 4134
	}

4135
	for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
4136 4137
		seq_printf(m, "total_%s %llu\n",
			   vm_event_name(memcg1_events[i]),
4138
			   (u64)memcg_events(memcg, memcg1_events[i]));
4139

4140
	for (i = 0; i < NR_LRU_LISTS; i++)
4141
		seq_printf(m, "total_%s %llu\n", lru_list_name(i),
4142 4143
			   (u64)memcg_page_state(memcg, NR_LRU_BASE + i) *
			   PAGE_SIZE);
K
KAMEZAWA Hiroyuki 已提交
4144

K
KOSAKI Motohiro 已提交
4145 4146
#ifdef CONFIG_DEBUG_VM
	{
4147 4148
		pg_data_t *pgdat;
		struct mem_cgroup_per_node *mz;
4149 4150
		unsigned long anon_cost = 0;
		unsigned long file_cost = 0;
K
KOSAKI Motohiro 已提交
4151

4152 4153
		for_each_online_pgdat(pgdat) {
			mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
K
KOSAKI Motohiro 已提交
4154

4155 4156
			anon_cost += mz->lruvec.anon_cost;
			file_cost += mz->lruvec.file_cost;
4157
		}
4158 4159
		seq_printf(m, "anon_cost %lu\n", anon_cost);
		seq_printf(m, "file_cost %lu\n", file_cost);
K
KOSAKI Motohiro 已提交
4160 4161 4162
	}
#endif

4163 4164 4165
	return 0;
}

4166 4167
static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
				      struct cftype *cft)
K
KOSAKI Motohiro 已提交
4168
{
4169
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
K
KOSAKI Motohiro 已提交
4170

4171
	return mem_cgroup_swappiness(memcg);
K
KOSAKI Motohiro 已提交
4172 4173
}

4174 4175
static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
				       struct cftype *cft, u64 val)
K
KOSAKI Motohiro 已提交
4176
{
4177
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
K
KOSAKI Motohiro 已提交
4178

4179
	if (val > 100)
K
KOSAKI Motohiro 已提交
4180 4181
		return -EINVAL;

4182
	if (css->parent)
4183 4184 4185
		memcg->swappiness = val;
	else
		vm_swappiness = val;
4186

K
KOSAKI Motohiro 已提交
4187 4188 4189
	return 0;
}

4190 4191 4192
static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
{
	struct mem_cgroup_threshold_ary *t;
4193
	unsigned long usage;
4194 4195 4196 4197
	int i;

	rcu_read_lock();
	if (!swap)
4198
		t = rcu_dereference(memcg->thresholds.primary);
4199
	else
4200
		t = rcu_dereference(memcg->memsw_thresholds.primary);
4201 4202 4203 4204

	if (!t)
		goto unlock;

4205
	usage = mem_cgroup_usage(memcg, swap);
4206 4207

	/*
4208
	 * current_threshold points to threshold just below or equal to usage.
4209 4210 4211
	 * If it's not true, a threshold was crossed after last
	 * call of __mem_cgroup_threshold().
	 */
4212
	i = t->current_threshold;
4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235

	/*
	 * Iterate backward over array of thresholds starting from
	 * current_threshold and check if a threshold is crossed.
	 * If none of thresholds below usage is crossed, we read
	 * only one element of the array here.
	 */
	for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
		eventfd_signal(t->entries[i].eventfd, 1);

	/* i = current_threshold + 1 */
	i++;

	/*
	 * Iterate forward over array of thresholds starting from
	 * current_threshold+1 and check if a threshold is crossed.
	 * If none of thresholds above usage is crossed, we read
	 * only one element of the array here.
	 */
	for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
		eventfd_signal(t->entries[i].eventfd, 1);

	/* Update current_threshold */
4236
	t->current_threshold = i - 1;
4237 4238 4239 4240 4241 4242
unlock:
	rcu_read_unlock();
}

static void mem_cgroup_threshold(struct mem_cgroup *memcg)
{
4243 4244
	while (memcg) {
		__mem_cgroup_threshold(memcg, false);
4245
		if (do_memsw_account())
4246 4247 4248 4249
			__mem_cgroup_threshold(memcg, true);

		memcg = parent_mem_cgroup(memcg);
	}
4250 4251 4252 4253 4254 4255 4256
}

static int compare_thresholds(const void *a, const void *b)
{
	const struct mem_cgroup_threshold *_a = a;
	const struct mem_cgroup_threshold *_b = b;

4257 4258 4259 4260 4261 4262 4263
	if (_a->threshold > _b->threshold)
		return 1;

	if (_a->threshold < _b->threshold)
		return -1;

	return 0;
4264 4265
}

4266
static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
K
KAMEZAWA Hiroyuki 已提交
4267 4268 4269
{
	struct mem_cgroup_eventfd_list *ev;

4270 4271
	spin_lock(&memcg_oom_lock);

4272
	list_for_each_entry(ev, &memcg->oom_notify, list)
K
KAMEZAWA Hiroyuki 已提交
4273
		eventfd_signal(ev->eventfd, 1);
4274 4275

	spin_unlock(&memcg_oom_lock);
K
KAMEZAWA Hiroyuki 已提交
4276 4277 4278
	return 0;
}

4279
static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
K
KAMEZAWA Hiroyuki 已提交
4280
{
K
KAMEZAWA Hiroyuki 已提交
4281 4282
	struct mem_cgroup *iter;

4283
	for_each_mem_cgroup_tree(iter, memcg)
K
KAMEZAWA Hiroyuki 已提交
4284
		mem_cgroup_oom_notify_cb(iter);
K
KAMEZAWA Hiroyuki 已提交
4285 4286
}

4287
static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
T
Tejun Heo 已提交
4288
	struct eventfd_ctx *eventfd, const char *args, enum res_type type)
4289
{
4290 4291
	struct mem_cgroup_thresholds *thresholds;
	struct mem_cgroup_threshold_ary *new;
4292 4293
	unsigned long threshold;
	unsigned long usage;
4294
	int i, size, ret;
4295

4296
	ret = page_counter_memparse(args, "-1", &threshold);
4297 4298 4299 4300
	if (ret)
		return ret;

	mutex_lock(&memcg->thresholds_lock);
4301

4302
	if (type == _MEM) {
4303
		thresholds = &memcg->thresholds;
4304
		usage = mem_cgroup_usage(memcg, false);
4305
	} else if (type == _MEMSWAP) {
4306
		thresholds = &memcg->memsw_thresholds;
4307
		usage = mem_cgroup_usage(memcg, true);
4308
	} else
4309 4310 4311
		BUG();

	/* Check if a threshold crossed before adding a new one */
4312
	if (thresholds->primary)
4313 4314
		__mem_cgroup_threshold(memcg, type == _MEMSWAP);

4315
	size = thresholds->primary ? thresholds->primary->size + 1 : 1;
4316 4317

	/* Allocate memory for new array of thresholds */
4318
	new = kmalloc(struct_size(new, entries, size), GFP_KERNEL);
4319
	if (!new) {
4320 4321 4322
		ret = -ENOMEM;
		goto unlock;
	}
4323
	new->size = size;
4324 4325

	/* Copy thresholds (if any) to new array */
4326 4327 4328
	if (thresholds->primary)
		memcpy(new->entries, thresholds->primary->entries,
		       flex_array_size(new, entries, size - 1));
4329

4330
	/* Add new threshold */
4331 4332
	new->entries[size - 1].eventfd = eventfd;
	new->entries[size - 1].threshold = threshold;
4333 4334

	/* Sort thresholds. Registering of new threshold isn't time-critical */
4335
	sort(new->entries, size, sizeof(*new->entries),
4336 4337 4338
			compare_thresholds, NULL);

	/* Find current threshold */
4339
	new->current_threshold = -1;
4340
	for (i = 0; i < size; i++) {
4341
		if (new->entries[i].threshold <= usage) {
4342
			/*
4343 4344
			 * new->current_threshold will not be used until
			 * rcu_assign_pointer(), so it's safe to increment
4345 4346
			 * it here.
			 */
4347
			++new->current_threshold;
4348 4349
		} else
			break;
4350 4351
	}

4352 4353 4354 4355 4356
	/* Free old spare buffer and save old primary buffer as spare */
	kfree(thresholds->spare);
	thresholds->spare = thresholds->primary;

	rcu_assign_pointer(thresholds->primary, new);
4357

4358
	/* To be sure that nobody uses thresholds */
4359 4360 4361 4362 4363 4364 4365 4366
	synchronize_rcu();

unlock:
	mutex_unlock(&memcg->thresholds_lock);

	return ret;
}

4367
static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
T
Tejun Heo 已提交
4368 4369
	struct eventfd_ctx *eventfd, const char *args)
{
4370
	return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM);
T
Tejun Heo 已提交
4371 4372
}

4373
static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg,
T
Tejun Heo 已提交
4374 4375
	struct eventfd_ctx *eventfd, const char *args)
{
4376
	return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP);
T
Tejun Heo 已提交
4377 4378
}

4379
static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
T
Tejun Heo 已提交
4380
	struct eventfd_ctx *eventfd, enum res_type type)
4381
{
4382 4383
	struct mem_cgroup_thresholds *thresholds;
	struct mem_cgroup_threshold_ary *new;
4384
	unsigned long usage;
4385
	int i, j, size, entries;
4386 4387

	mutex_lock(&memcg->thresholds_lock);
4388 4389

	if (type == _MEM) {
4390
		thresholds = &memcg->thresholds;
4391
		usage = mem_cgroup_usage(memcg, false);
4392
	} else if (type == _MEMSWAP) {
4393
		thresholds = &memcg->memsw_thresholds;
4394
		usage = mem_cgroup_usage(memcg, true);
4395
	} else
4396 4397
		BUG();

4398 4399 4400
	if (!thresholds->primary)
		goto unlock;

4401 4402 4403 4404
	/* Check if a threshold crossed before removing */
	__mem_cgroup_threshold(memcg, type == _MEMSWAP);

	/* Calculate new number of threshold */
4405
	size = entries = 0;
4406 4407
	for (i = 0; i < thresholds->primary->size; i++) {
		if (thresholds->primary->entries[i].eventfd != eventfd)
4408
			size++;
4409 4410
		else
			entries++;
4411 4412
	}

4413
	new = thresholds->spare;
4414

4415 4416 4417 4418
	/* If no items related to eventfd have been cleared, nothing to do */
	if (!entries)
		goto unlock;

4419 4420
	/* Set thresholds array to NULL if we don't have thresholds */
	if (!size) {
4421 4422
		kfree(new);
		new = NULL;
4423
		goto swap_buffers;
4424 4425
	}

4426
	new->size = size;
4427 4428

	/* Copy thresholds and find current threshold */
4429 4430 4431
	new->current_threshold = -1;
	for (i = 0, j = 0; i < thresholds->primary->size; i++) {
		if (thresholds->primary->entries[i].eventfd == eventfd)
4432 4433
			continue;

4434
		new->entries[j] = thresholds->primary->entries[i];
4435
		if (new->entries[j].threshold <= usage) {
4436
			/*
4437
			 * new->current_threshold will not be used
4438 4439 4440
			 * until rcu_assign_pointer(), so it's safe to increment
			 * it here.
			 */
4441
			++new->current_threshold;
4442 4443 4444 4445
		}
		j++;
	}

4446
swap_buffers:
4447 4448
	/* Swap primary and spare array */
	thresholds->spare = thresholds->primary;
4449

4450
	rcu_assign_pointer(thresholds->primary, new);
4451

4452
	/* To be sure that nobody uses thresholds */
4453
	synchronize_rcu();
4454 4455 4456 4457 4458 4459

	/* If all events are unregistered, free the spare array */
	if (!new) {
		kfree(thresholds->spare);
		thresholds->spare = NULL;
	}
4460
unlock:
4461 4462
	mutex_unlock(&memcg->thresholds_lock);
}
4463

4464
static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
T
Tejun Heo 已提交
4465 4466
	struct eventfd_ctx *eventfd)
{
4467
	return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM);
T
Tejun Heo 已提交
4468 4469
}

4470
static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
T
Tejun Heo 已提交
4471 4472
	struct eventfd_ctx *eventfd)
{
4473
	return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP);
T
Tejun Heo 已提交
4474 4475
}

4476
static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
T
Tejun Heo 已提交
4477
	struct eventfd_ctx *eventfd, const char *args)
K
KAMEZAWA Hiroyuki 已提交
4478 4479 4480 4481 4482 4483 4484
{
	struct mem_cgroup_eventfd_list *event;

	event = kmalloc(sizeof(*event),	GFP_KERNEL);
	if (!event)
		return -ENOMEM;

4485
	spin_lock(&memcg_oom_lock);
K
KAMEZAWA Hiroyuki 已提交
4486 4487 4488 4489 4490

	event->eventfd = eventfd;
	list_add(&event->list, &memcg->oom_notify);

	/* already in OOM ? */
4491
	if (memcg->under_oom)
K
KAMEZAWA Hiroyuki 已提交
4492
		eventfd_signal(eventfd, 1);
4493
	spin_unlock(&memcg_oom_lock);
K
KAMEZAWA Hiroyuki 已提交
4494 4495 4496 4497

	return 0;
}

4498
static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg,
T
Tejun Heo 已提交
4499
	struct eventfd_ctx *eventfd)
K
KAMEZAWA Hiroyuki 已提交
4500 4501 4502
{
	struct mem_cgroup_eventfd_list *ev, *tmp;

4503
	spin_lock(&memcg_oom_lock);
K
KAMEZAWA Hiroyuki 已提交
4504

4505
	list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
K
KAMEZAWA Hiroyuki 已提交
4506 4507 4508 4509 4510 4511
		if (ev->eventfd == eventfd) {
			list_del(&ev->list);
			kfree(ev);
		}
	}

4512
	spin_unlock(&memcg_oom_lock);
K
KAMEZAWA Hiroyuki 已提交
4513 4514
}

4515
static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
4516
{
4517
	struct mem_cgroup *memcg = mem_cgroup_from_seq(sf);
4518

4519
	seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable);
4520
	seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom);
R
Roman Gushchin 已提交
4521 4522
	seq_printf(sf, "oom_kill %lu\n",
		   atomic_long_read(&memcg->memory_events[MEMCG_OOM_KILL]));
4523 4524 4525
	return 0;
}

4526
static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
4527 4528
	struct cftype *cft, u64 val)
{
4529
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4530 4531

	/* cannot set to root cgroup and only 0 and 1 are allowed */
4532
	if (!css->parent || !((val == 0) || (val == 1)))
4533 4534
		return -EINVAL;

4535
	memcg->oom_kill_disable = val;
4536
	if (!val)
4537
		memcg_oom_recover(memcg);
4538

4539 4540 4541
	return 0;
}

4542 4543
#ifdef CONFIG_CGROUP_WRITEBACK

4544 4545
#include <trace/events/writeback.h>

T
Tejun Heo 已提交
4546 4547 4548 4549 4550 4551 4552 4553 4554 4555
static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
{
	return wb_domain_init(&memcg->cgwb_domain, gfp);
}

static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
{
	wb_domain_exit(&memcg->cgwb_domain);
}

4556 4557 4558 4559 4560
static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
{
	wb_domain_size_changed(&memcg->cgwb_domain);
}

T
Tejun Heo 已提交
4561 4562 4563 4564 4565 4566 4567 4568 4569 4570
struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);

	if (!memcg->css.parent)
		return NULL;

	return &memcg->cgwb_domain;
}

4571 4572 4573 4574 4575 4576
/*
 * idx can be of type enum memcg_stat_item or node_stat_item.
 * Keep in sync with memcg_exact_page().
 */
static unsigned long memcg_exact_page_state(struct mem_cgroup *memcg, int idx)
{
4577
	long x = atomic_long_read(&memcg->vmstats[idx]);
4578 4579 4580
	int cpu;

	for_each_online_cpu(cpu)
4581
		x += per_cpu_ptr(memcg->vmstats_percpu, cpu)->stat[idx];
4582 4583 4584 4585 4586
	if (x < 0)
		x = 0;
	return x;
}

4587 4588 4589
/**
 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
 * @wb: bdi_writeback in question
4590 4591
 * @pfilepages: out parameter for number of file pages
 * @pheadroom: out parameter for number of allocatable pages according to memcg
4592 4593 4594
 * @pdirty: out parameter for number of dirty pages
 * @pwriteback: out parameter for number of pages under writeback
 *
4595 4596 4597
 * Determine the numbers of file, headroom, dirty, and writeback pages in
 * @wb's memcg.  File, dirty and writeback are self-explanatory.  Headroom
 * is a bit more involved.
4598
 *
4599 4600 4601 4602 4603
 * A memcg's headroom is "min(max, high) - used".  In the hierarchy, the
 * headroom is calculated as the lowest headroom of itself and the
 * ancestors.  Note that this doesn't consider the actual amount of
 * available memory in the system.  The caller should further cap
 * *@pheadroom accordingly.
4604
 */
4605 4606 4607
void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
			 unsigned long *pheadroom, unsigned long *pdirty,
			 unsigned long *pwriteback)
4608 4609 4610 4611
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
	struct mem_cgroup *parent;

4612
	*pdirty = memcg_exact_page_state(memcg, NR_FILE_DIRTY);
4613

4614
	*pwriteback = memcg_exact_page_state(memcg, NR_WRITEBACK);
4615 4616
	*pfilepages = memcg_exact_page_state(memcg, NR_INACTIVE_FILE) +
			memcg_exact_page_state(memcg, NR_ACTIVE_FILE);
4617
	*pheadroom = PAGE_COUNTER_MAX;
4618 4619

	while ((parent = parent_mem_cgroup(memcg))) {
4620
		unsigned long ceiling = min(READ_ONCE(memcg->memory.max),
4621
					    READ_ONCE(memcg->memory.high));
4622 4623
		unsigned long used = page_counter_read(&memcg->memory);

4624
		*pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
4625 4626 4627 4628
		memcg = parent;
	}
}

4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682
/*
 * Foreign dirty flushing
 *
 * There's an inherent mismatch between memcg and writeback.  The former
 * trackes ownership per-page while the latter per-inode.  This was a
 * deliberate design decision because honoring per-page ownership in the
 * writeback path is complicated, may lead to higher CPU and IO overheads
 * and deemed unnecessary given that write-sharing an inode across
 * different cgroups isn't a common use-case.
 *
 * Combined with inode majority-writer ownership switching, this works well
 * enough in most cases but there are some pathological cases.  For
 * example, let's say there are two cgroups A and B which keep writing to
 * different but confined parts of the same inode.  B owns the inode and
 * A's memory is limited far below B's.  A's dirty ratio can rise enough to
 * trigger balance_dirty_pages() sleeps but B's can be low enough to avoid
 * triggering background writeback.  A will be slowed down without a way to
 * make writeback of the dirty pages happen.
 *
 * Conditions like the above can lead to a cgroup getting repatedly and
 * severely throttled after making some progress after each
 * dirty_expire_interval while the underyling IO device is almost
 * completely idle.
 *
 * Solving this problem completely requires matching the ownership tracking
 * granularities between memcg and writeback in either direction.  However,
 * the more egregious behaviors can be avoided by simply remembering the
 * most recent foreign dirtying events and initiating remote flushes on
 * them when local writeback isn't enough to keep the memory clean enough.
 *
 * The following two functions implement such mechanism.  When a foreign
 * page - a page whose memcg and writeback ownerships don't match - is
 * dirtied, mem_cgroup_track_foreign_dirty() records the inode owning
 * bdi_writeback on the page owning memcg.  When balance_dirty_pages()
 * decides that the memcg needs to sleep due to high dirty ratio, it calls
 * mem_cgroup_flush_foreign() which queues writeback on the recorded
 * foreign bdi_writebacks which haven't expired.  Both the numbers of
 * recorded bdi_writebacks and concurrent in-flight foreign writebacks are
 * limited to MEMCG_CGWB_FRN_CNT.
 *
 * The mechanism only remembers IDs and doesn't hold any object references.
 * As being wrong occasionally doesn't matter, updates and accesses to the
 * records are lockless and racy.
 */
void mem_cgroup_track_foreign_dirty_slowpath(struct page *page,
					     struct bdi_writeback *wb)
{
	struct mem_cgroup *memcg = page->mem_cgroup;
	struct memcg_cgwb_frn *frn;
	u64 now = get_jiffies_64();
	u64 oldest_at = now;
	int oldest = -1;
	int i;

4683 4684
	trace_track_foreign_dirty(page, wb);

4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744
	/*
	 * Pick the slot to use.  If there is already a slot for @wb, keep
	 * using it.  If not replace the oldest one which isn't being
	 * written out.
	 */
	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
		frn = &memcg->cgwb_frn[i];
		if (frn->bdi_id == wb->bdi->id &&
		    frn->memcg_id == wb->memcg_css->id)
			break;
		if (time_before64(frn->at, oldest_at) &&
		    atomic_read(&frn->done.cnt) == 1) {
			oldest = i;
			oldest_at = frn->at;
		}
	}

	if (i < MEMCG_CGWB_FRN_CNT) {
		/*
		 * Re-using an existing one.  Update timestamp lazily to
		 * avoid making the cacheline hot.  We want them to be
		 * reasonably up-to-date and significantly shorter than
		 * dirty_expire_interval as that's what expires the record.
		 * Use the shorter of 1s and dirty_expire_interval / 8.
		 */
		unsigned long update_intv =
			min_t(unsigned long, HZ,
			      msecs_to_jiffies(dirty_expire_interval * 10) / 8);

		if (time_before64(frn->at, now - update_intv))
			frn->at = now;
	} else if (oldest >= 0) {
		/* replace the oldest free one */
		frn = &memcg->cgwb_frn[oldest];
		frn->bdi_id = wb->bdi->id;
		frn->memcg_id = wb->memcg_css->id;
		frn->at = now;
	}
}

/* issue foreign writeback flushes for recorded foreign dirtying events */
void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
	unsigned long intv = msecs_to_jiffies(dirty_expire_interval * 10);
	u64 now = jiffies_64;
	int i;

	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
		struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i];

		/*
		 * If the record is older than dirty_expire_interval,
		 * writeback on it has already started.  No need to kick it
		 * off again.  Also, don't start a new one if there's
		 * already one in flight.
		 */
		if (time_after64(frn->at, now - intv) &&
		    atomic_read(&frn->done.cnt) == 1) {
			frn->at = 0;
4745
			trace_flush_foreign(wb, frn->bdi_id, frn->memcg_id);
4746 4747 4748 4749 4750 4751 4752
			cgroup_writeback_by_id(frn->bdi_id, frn->memcg_id, 0,
					       WB_REASON_FOREIGN_FLUSH,
					       &frn->done);
		}
	}
}

T
Tejun Heo 已提交
4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763
#else	/* CONFIG_CGROUP_WRITEBACK */

static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
{
	return 0;
}

static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
{
}

4764 4765 4766 4767
static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
{
}

4768 4769
#endif	/* CONFIG_CGROUP_WRITEBACK */

4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782
/*
 * DO NOT USE IN NEW FILES.
 *
 * "cgroup.event_control" implementation.
 *
 * This is way over-engineered.  It tries to support fully configurable
 * events for each user.  Such level of flexibility is completely
 * unnecessary especially in the light of the planned unified hierarchy.
 *
 * Please deprecate this and replace with something simpler if at all
 * possible.
 */

4783 4784 4785 4786 4787
/*
 * Unregister event and free resources.
 *
 * Gets called from workqueue.
 */
4788
static void memcg_event_remove(struct work_struct *work)
4789
{
4790 4791
	struct mem_cgroup_event *event =
		container_of(work, struct mem_cgroup_event, remove);
4792
	struct mem_cgroup *memcg = event->memcg;
4793 4794 4795

	remove_wait_queue(event->wqh, &event->wait);

4796
	event->unregister_event(memcg, event->eventfd);
4797 4798 4799 4800 4801 4802

	/* Notify userspace the event is going away. */
	eventfd_signal(event->eventfd, 1);

	eventfd_ctx_put(event->eventfd);
	kfree(event);
4803
	css_put(&memcg->css);
4804 4805 4806
}

/*
4807
 * Gets called on EPOLLHUP on eventfd when user closes it.
4808 4809 4810
 *
 * Called with wqh->lock held and interrupts disabled.
 */
4811
static int memcg_event_wake(wait_queue_entry_t *wait, unsigned mode,
4812
			    int sync, void *key)
4813
{
4814 4815
	struct mem_cgroup_event *event =
		container_of(wait, struct mem_cgroup_event, wait);
4816
	struct mem_cgroup *memcg = event->memcg;
A
Al Viro 已提交
4817
	__poll_t flags = key_to_poll(key);
4818

4819
	if (flags & EPOLLHUP) {
4820 4821 4822 4823 4824 4825 4826 4827 4828
		/*
		 * If the event has been detached at cgroup removal, we
		 * can simply return knowing the other side will cleanup
		 * for us.
		 *
		 * We can't race against event freeing since the other
		 * side will require wqh->lock via remove_wait_queue(),
		 * which we hold.
		 */
4829
		spin_lock(&memcg->event_list_lock);
4830 4831 4832 4833 4834 4835 4836 4837
		if (!list_empty(&event->list)) {
			list_del_init(&event->list);
			/*
			 * We are in atomic context, but cgroup_event_remove()
			 * may sleep, so we have to call it in workqueue.
			 */
			schedule_work(&event->remove);
		}
4838
		spin_unlock(&memcg->event_list_lock);
4839 4840 4841 4842 4843
	}

	return 0;
}

4844
static void memcg_event_ptable_queue_proc(struct file *file,
4845 4846
		wait_queue_head_t *wqh, poll_table *pt)
{
4847 4848
	struct mem_cgroup_event *event =
		container_of(pt, struct mem_cgroup_event, pt);
4849 4850 4851 4852 4853 4854

	event->wqh = wqh;
	add_wait_queue(wqh, &event->wait);
}

/*
4855 4856
 * DO NOT USE IN NEW FILES.
 *
4857 4858 4859 4860 4861
 * Parse input and register new cgroup event handler.
 *
 * Input must be in format '<event_fd> <control_fd> <args>'.
 * Interpretation of args is defined by control file implementation.
 */
4862 4863
static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
					 char *buf, size_t nbytes, loff_t off)
4864
{
4865
	struct cgroup_subsys_state *css = of_css(of);
4866
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4867
	struct mem_cgroup_event *event;
4868 4869 4870 4871
	struct cgroup_subsys_state *cfile_css;
	unsigned int efd, cfd;
	struct fd efile;
	struct fd cfile;
4872
	const char *name;
4873 4874 4875
	char *endp;
	int ret;

4876 4877 4878
	buf = strstrip(buf);

	efd = simple_strtoul(buf, &endp, 10);
4879 4880
	if (*endp != ' ')
		return -EINVAL;
4881
	buf = endp + 1;
4882

4883
	cfd = simple_strtoul(buf, &endp, 10);
4884 4885
	if ((*endp != ' ') && (*endp != '\0'))
		return -EINVAL;
4886
	buf = endp + 1;
4887 4888 4889 4890 4891

	event = kzalloc(sizeof(*event), GFP_KERNEL);
	if (!event)
		return -ENOMEM;

4892
	event->memcg = memcg;
4893
	INIT_LIST_HEAD(&event->list);
4894 4895 4896
	init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc);
	init_waitqueue_func_entry(&event->wait, memcg_event_wake);
	INIT_WORK(&event->remove, memcg_event_remove);
4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921

	efile = fdget(efd);
	if (!efile.file) {
		ret = -EBADF;
		goto out_kfree;
	}

	event->eventfd = eventfd_ctx_fileget(efile.file);
	if (IS_ERR(event->eventfd)) {
		ret = PTR_ERR(event->eventfd);
		goto out_put_efile;
	}

	cfile = fdget(cfd);
	if (!cfile.file) {
		ret = -EBADF;
		goto out_put_eventfd;
	}

	/* the process need read permission on control file */
	/* AV: shouldn't we check that it's been opened for read instead? */
	ret = inode_permission(file_inode(cfile.file), MAY_READ);
	if (ret < 0)
		goto out_put_cfile;

4922 4923 4924 4925 4926
	/*
	 * Determine the event callbacks and set them in @event.  This used
	 * to be done via struct cftype but cgroup core no longer knows
	 * about these events.  The following is crude but the whole thing
	 * is for compatibility anyway.
4927 4928
	 *
	 * DO NOT ADD NEW FILES.
4929
	 */
A
Al Viro 已提交
4930
	name = cfile.file->f_path.dentry->d_name.name;
4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941

	if (!strcmp(name, "memory.usage_in_bytes")) {
		event->register_event = mem_cgroup_usage_register_event;
		event->unregister_event = mem_cgroup_usage_unregister_event;
	} else if (!strcmp(name, "memory.oom_control")) {
		event->register_event = mem_cgroup_oom_register_event;
		event->unregister_event = mem_cgroup_oom_unregister_event;
	} else if (!strcmp(name, "memory.pressure_level")) {
		event->register_event = vmpressure_register_event;
		event->unregister_event = vmpressure_unregister_event;
	} else if (!strcmp(name, "memory.memsw.usage_in_bytes")) {
T
Tejun Heo 已提交
4942 4943
		event->register_event = memsw_cgroup_usage_register_event;
		event->unregister_event = memsw_cgroup_usage_unregister_event;
4944 4945 4946 4947 4948
	} else {
		ret = -EINVAL;
		goto out_put_cfile;
	}

4949
	/*
4950 4951 4952
	 * Verify @cfile should belong to @css.  Also, remaining events are
	 * automatically removed on cgroup destruction but the removal is
	 * asynchronous, so take an extra ref on @css.
4953
	 */
A
Al Viro 已提交
4954
	cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent,
4955
					       &memory_cgrp_subsys);
4956
	ret = -EINVAL;
4957
	if (IS_ERR(cfile_css))
4958
		goto out_put_cfile;
4959 4960
	if (cfile_css != css) {
		css_put(cfile_css);
4961
		goto out_put_cfile;
4962
	}
4963

4964
	ret = event->register_event(memcg, event->eventfd, buf);
4965 4966 4967
	if (ret)
		goto out_put_css;

4968
	vfs_poll(efile.file, &event->pt);
4969

4970 4971 4972
	spin_lock(&memcg->event_list_lock);
	list_add(&event->list, &memcg->event_list);
	spin_unlock(&memcg->event_list_lock);
4973 4974 4975 4976

	fdput(cfile);
	fdput(efile);

4977
	return nbytes;
4978 4979

out_put_css:
4980
	css_put(css);
4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992
out_put_cfile:
	fdput(cfile);
out_put_eventfd:
	eventfd_ctx_put(event->eventfd);
out_put_efile:
	fdput(efile);
out_kfree:
	kfree(event);

	return ret;
}

4993
static struct cftype mem_cgroup_legacy_files[] = {
B
Balbir Singh 已提交
4994
	{
4995
		.name = "usage_in_bytes",
4996
		.private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
4997
		.read_u64 = mem_cgroup_read_u64,
B
Balbir Singh 已提交
4998
	},
4999 5000
	{
		.name = "max_usage_in_bytes",
5001
		.private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
5002
		.write = mem_cgroup_reset,
5003
		.read_u64 = mem_cgroup_read_u64,
5004
	},
B
Balbir Singh 已提交
5005
	{
5006
		.name = "limit_in_bytes",
5007
		.private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
5008
		.write = mem_cgroup_write,
5009
		.read_u64 = mem_cgroup_read_u64,
B
Balbir Singh 已提交
5010
	},
5011 5012 5013
	{
		.name = "soft_limit_in_bytes",
		.private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
5014
		.write = mem_cgroup_write,
5015
		.read_u64 = mem_cgroup_read_u64,
5016
	},
B
Balbir Singh 已提交
5017 5018
	{
		.name = "failcnt",
5019
		.private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
5020
		.write = mem_cgroup_reset,
5021
		.read_u64 = mem_cgroup_read_u64,
B
Balbir Singh 已提交
5022
	},
5023 5024
	{
		.name = "stat",
5025
		.seq_show = memcg_stat_show,
5026
	},
5027 5028
	{
		.name = "force_empty",
5029
		.write = mem_cgroup_force_empty_write,
5030
	},
5031 5032 5033 5034 5035
	{
		.name = "use_hierarchy",
		.write_u64 = mem_cgroup_hierarchy_write,
		.read_u64 = mem_cgroup_hierarchy_read,
	},
5036
	{
5037
		.name = "cgroup.event_control",		/* XXX: for compat */
5038
		.write = memcg_write_event_control,
5039
		.flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE,
5040
	},
K
KOSAKI Motohiro 已提交
5041 5042 5043 5044 5045
	{
		.name = "swappiness",
		.read_u64 = mem_cgroup_swappiness_read,
		.write_u64 = mem_cgroup_swappiness_write,
	},
5046 5047 5048 5049 5050
	{
		.name = "move_charge_at_immigrate",
		.read_u64 = mem_cgroup_move_charge_read,
		.write_u64 = mem_cgroup_move_charge_write,
	},
K
KAMEZAWA Hiroyuki 已提交
5051 5052
	{
		.name = "oom_control",
5053
		.seq_show = mem_cgroup_oom_control_read,
5054
		.write_u64 = mem_cgroup_oom_control_write,
K
KAMEZAWA Hiroyuki 已提交
5055 5056
		.private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
	},
5057 5058 5059
	{
		.name = "pressure_level",
	},
5060 5061 5062
#ifdef CONFIG_NUMA
	{
		.name = "numa_stat",
5063
		.seq_show = memcg_numa_stat_show,
5064 5065
	},
#endif
5066 5067 5068
	{
		.name = "kmem.limit_in_bytes",
		.private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
5069
		.write = mem_cgroup_write,
5070
		.read_u64 = mem_cgroup_read_u64,
5071 5072 5073 5074
	},
	{
		.name = "kmem.usage_in_bytes",
		.private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
5075
		.read_u64 = mem_cgroup_read_u64,
5076 5077 5078 5079
	},
	{
		.name = "kmem.failcnt",
		.private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
5080
		.write = mem_cgroup_reset,
5081
		.read_u64 = mem_cgroup_read_u64,
5082 5083 5084 5085
	},
	{
		.name = "kmem.max_usage_in_bytes",
		.private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
5086
		.write = mem_cgroup_reset,
5087
		.read_u64 = mem_cgroup_read_u64,
5088
	},
5089 5090
#if defined(CONFIG_MEMCG_KMEM) && \
	(defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG))
5091 5092
	{
		.name = "kmem.slabinfo",
5093
		.seq_show = memcg_slab_show,
5094 5095
	},
#endif
V
Vladimir Davydov 已提交
5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118
	{
		.name = "kmem.tcp.limit_in_bytes",
		.private = MEMFILE_PRIVATE(_TCP, RES_LIMIT),
		.write = mem_cgroup_write,
		.read_u64 = mem_cgroup_read_u64,
	},
	{
		.name = "kmem.tcp.usage_in_bytes",
		.private = MEMFILE_PRIVATE(_TCP, RES_USAGE),
		.read_u64 = mem_cgroup_read_u64,
	},
	{
		.name = "kmem.tcp.failcnt",
		.private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT),
		.write = mem_cgroup_reset,
		.read_u64 = mem_cgroup_read_u64,
	},
	{
		.name = "kmem.tcp.max_usage_in_bytes",
		.private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE),
		.write = mem_cgroup_reset,
		.read_u64 = mem_cgroup_read_u64,
	},
5119
	{ },	/* terminate */
5120
};
5121

5122 5123 5124 5125 5126 5127 5128 5129
/*
 * Private memory cgroup IDR
 *
 * Swap-out records and page cache shadow entries need to store memcg
 * references in constrained space, so we maintain an ID space that is
 * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of
 * memory-controlled cgroups to 64k.
 *
5130
 * However, there usually are many references to the offline CSS after
5131 5132 5133 5134 5135 5136 5137 5138 5139 5140 5141 5142 5143 5144 5145 5146 5147
 * the cgroup has been destroyed, such as page cache or reclaimable
 * slab objects, that don't need to hang on to the ID. We want to keep
 * those dead CSS from occupying IDs, or we might quickly exhaust the
 * relatively small ID space and prevent the creation of new cgroups
 * even when there are much fewer than 64k cgroups - possibly none.
 *
 * Maintain a private 16-bit ID space for memcg, and allow the ID to
 * be freed and recycled when it's no longer needed, which is usually
 * when the CSS is offlined.
 *
 * The only exception to that are records of swapped out tmpfs/shmem
 * pages that need to be attributed to live ancestors on swapin. But
 * those references are manageable from userspace.
 */

static DEFINE_IDR(mem_cgroup_idr);

5148 5149 5150 5151 5152 5153 5154 5155
static void mem_cgroup_id_remove(struct mem_cgroup *memcg)
{
	if (memcg->id.id > 0) {
		idr_remove(&mem_cgroup_idr, memcg->id.id);
		memcg->id.id = 0;
	}
}

5156 5157
static void __maybe_unused mem_cgroup_id_get_many(struct mem_cgroup *memcg,
						  unsigned int n)
5158
{
5159
	refcount_add(n, &memcg->id.ref);
5160 5161
}

5162
static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
5163
{
5164
	if (refcount_sub_and_test(n, &memcg->id.ref)) {
5165
		mem_cgroup_id_remove(memcg);
5166 5167 5168 5169 5170 5171

		/* Memcg ID pins CSS */
		css_put(&memcg->css);
	}
}

5172 5173 5174 5175 5176
static inline void mem_cgroup_id_put(struct mem_cgroup *memcg)
{
	mem_cgroup_id_put_many(memcg, 1);
}

5177 5178 5179 5180 5181 5182 5183 5184 5185 5186 5187 5188
/**
 * mem_cgroup_from_id - look up a memcg from a memcg id
 * @id: the memcg id to look up
 *
 * Caller must hold rcu_read_lock().
 */
struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
{
	WARN_ON_ONCE(!rcu_read_lock_held());
	return idr_find(&mem_cgroup_idr, id);
}

5189
static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
5190 5191
{
	struct mem_cgroup_per_node *pn;
5192
	int tmp = node;
5193 5194 5195 5196 5197 5198 5199 5200
	/*
	 * This routine is called against possible nodes.
	 * But it's BUG to call kmalloc() against offline node.
	 *
	 * TODO: this routine can waste much memory for nodes which will
	 *       never be onlined. It's better to use memory hotplug callback
	 *       function.
	 */
5201 5202
	if (!node_state(node, N_NORMAL_MEMORY))
		tmp = -1;
5203
	pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
5204 5205
	if (!pn)
		return 1;
5206

5207 5208
	pn->lruvec_stat_local = alloc_percpu_gfp(struct lruvec_stat,
						 GFP_KERNEL_ACCOUNT);
5209 5210 5211 5212 5213
	if (!pn->lruvec_stat_local) {
		kfree(pn);
		return 1;
	}

5214 5215
	pn->lruvec_stat_cpu = alloc_percpu_gfp(struct lruvec_stat,
					       GFP_KERNEL_ACCOUNT);
5216
	if (!pn->lruvec_stat_cpu) {
5217
		free_percpu(pn->lruvec_stat_local);
5218 5219 5220 5221
		kfree(pn);
		return 1;
	}

5222 5223 5224 5225 5226
	lruvec_init(&pn->lruvec);
	pn->usage_in_excess = 0;
	pn->on_tree = false;
	pn->memcg = memcg;

5227
	memcg->nodeinfo[node] = pn;
5228 5229 5230
	return 0;
}

5231
static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
5232
{
5233 5234
	struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];

M
Michal Hocko 已提交
5235 5236 5237
	if (!pn)
		return;

5238
	free_percpu(pn->lruvec_stat_cpu);
5239
	free_percpu(pn->lruvec_stat_local);
5240
	kfree(pn);
5241 5242
}

5243
static void __mem_cgroup_free(struct mem_cgroup *memcg)
5244
{
5245
	int node;
5246

5247
	for_each_node(node)
5248
		free_mem_cgroup_per_node_info(memcg, node);
5249
	free_percpu(memcg->vmstats_percpu);
5250
	free_percpu(memcg->vmstats_local);
5251
	kfree(memcg);
5252
}
5253

5254 5255 5256
static void mem_cgroup_free(struct mem_cgroup *memcg)
{
	memcg_wb_domain_exit(memcg);
5257 5258 5259 5260
	/*
	 * Flush percpu vmstats and vmevents to guarantee the value correctness
	 * on parent's and all ancestor levels.
	 */
5261
	memcg_flush_percpu_vmstats(memcg);
5262
	memcg_flush_percpu_vmevents(memcg);
5263 5264 5265
	__mem_cgroup_free(memcg);
}

5266
static struct mem_cgroup *mem_cgroup_alloc(void)
B
Balbir Singh 已提交
5267
{
5268
	struct mem_cgroup *memcg;
5269
	unsigned int size;
5270
	int node;
5271
	int __maybe_unused i;
5272
	long error = -ENOMEM;
B
Balbir Singh 已提交
5273

5274 5275 5276 5277
	size = sizeof(struct mem_cgroup);
	size += nr_node_ids * sizeof(struct mem_cgroup_per_node *);

	memcg = kzalloc(size, GFP_KERNEL);
5278
	if (!memcg)
5279
		return ERR_PTR(error);
5280

5281 5282 5283
	memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL,
				 1, MEM_CGROUP_ID_MAX,
				 GFP_KERNEL);
5284 5285
	if (memcg->id.id < 0) {
		error = memcg->id.id;
5286
		goto fail;
5287
	}
5288

5289 5290
	memcg->vmstats_local = alloc_percpu_gfp(struct memcg_vmstats_percpu,
						GFP_KERNEL_ACCOUNT);
5291 5292 5293
	if (!memcg->vmstats_local)
		goto fail;

5294 5295
	memcg->vmstats_percpu = alloc_percpu_gfp(struct memcg_vmstats_percpu,
						 GFP_KERNEL_ACCOUNT);
5296
	if (!memcg->vmstats_percpu)
5297
		goto fail;
5298

B
Bob Liu 已提交
5299
	for_each_node(node)
5300
		if (alloc_mem_cgroup_per_node_info(memcg, node))
5301
			goto fail;
5302

5303 5304
	if (memcg_wb_domain_init(memcg, GFP_KERNEL))
		goto fail;
5305

5306
	INIT_WORK(&memcg->high_work, high_work_func);
5307 5308 5309
	INIT_LIST_HEAD(&memcg->oom_notify);
	mutex_init(&memcg->thresholds_lock);
	spin_lock_init(&memcg->move_lock);
5310
	vmpressure_init(&memcg->vmpressure);
5311 5312
	INIT_LIST_HEAD(&memcg->event_list);
	spin_lock_init(&memcg->event_list_lock);
5313
	memcg->socket_pressure = jiffies;
5314
#ifdef CONFIG_MEMCG_KMEM
V
Vladimir Davydov 已提交
5315
	memcg->kmemcg_id = -1;
R
Roman Gushchin 已提交
5316
	INIT_LIST_HEAD(&memcg->objcg_list);
V
Vladimir Davydov 已提交
5317
#endif
5318 5319
#ifdef CONFIG_CGROUP_WRITEBACK
	INIT_LIST_HEAD(&memcg->cgwb_list);
5320 5321 5322
	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
		memcg->cgwb_frn[i].done =
			__WB_COMPLETION_INIT(&memcg_cgwb_frn_waitq);
5323 5324 5325 5326 5327
#endif
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
	spin_lock_init(&memcg->deferred_split_queue.split_queue_lock);
	INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue);
	memcg->deferred_split_queue.split_queue_len = 0;
5328
#endif
5329
	idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
5330 5331
	return memcg;
fail:
5332
	mem_cgroup_id_remove(memcg);
5333
	__mem_cgroup_free(memcg);
5334
	return ERR_PTR(error);
5335 5336
}

5337 5338
static struct cgroup_subsys_state * __ref
mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
5339
{
5340
	struct mem_cgroup *parent = mem_cgroup_from_css(parent_css);
5341
	struct mem_cgroup *memcg, *old_memcg;
5342
	long error = -ENOMEM;
5343

5344
	old_memcg = set_active_memcg(parent);
5345
	memcg = mem_cgroup_alloc();
5346
	set_active_memcg(old_memcg);
5347 5348
	if (IS_ERR(memcg))
		return ERR_CAST(memcg);
5349

5350
	page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
5351
	memcg->soft_limit = PAGE_COUNTER_MAX;
5352
	page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
5353 5354 5355 5356
	if (parent) {
		memcg->swappiness = mem_cgroup_swappiness(parent);
		memcg->oom_kill_disable = parent->oom_kill_disable;
	}
5357 5358 5359 5360 5361 5362
	if (!parent) {
		page_counter_init(&memcg->memory, NULL);
		page_counter_init(&memcg->swap, NULL);
		page_counter_init(&memcg->kmem, NULL);
		page_counter_init(&memcg->tcpmem, NULL);
	} else if (parent->use_hierarchy) {
5363
		memcg->use_hierarchy = true;
5364
		page_counter_init(&memcg->memory, &parent->memory);
5365
		page_counter_init(&memcg->swap, &parent->swap);
5366
		page_counter_init(&memcg->kmem, &parent->kmem);
5367
		page_counter_init(&memcg->tcpmem, &parent->tcpmem);
5368
	} else {
5369 5370 5371 5372
		page_counter_init(&memcg->memory, &root_mem_cgroup->memory);
		page_counter_init(&memcg->swap, &root_mem_cgroup->swap);
		page_counter_init(&memcg->kmem, &root_mem_cgroup->kmem);
		page_counter_init(&memcg->tcpmem, &root_mem_cgroup->tcpmem);
5373 5374 5375 5376 5377
		/*
		 * Deeper hierachy with use_hierarchy == false doesn't make
		 * much sense so let cgroup subsystem know about this
		 * unfortunate state in our controller.
		 */
5378
		if (parent != root_mem_cgroup)
5379
			memory_cgrp_subsys.broken_hierarchy = true;
5380
	}
5381

5382 5383 5384 5385 5386 5387
	/* The following stuff does not apply to the root */
	if (!parent) {
		root_mem_cgroup = memcg;
		return &memcg->css;
	}

5388
	error = memcg_online_kmem(memcg);
5389 5390
	if (error)
		goto fail;
5391

5392
	if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
5393
		static_branch_inc(&memcg_sockets_enabled_key);
5394

5395 5396
	return &memcg->css;
fail:
5397
	mem_cgroup_id_remove(memcg);
5398
	mem_cgroup_free(memcg);
5399
	return ERR_PTR(error);
5400 5401
}

5402
static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
5403
{
5404 5405
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);

5406 5407 5408 5409 5410 5411 5412 5413 5414 5415
	/*
	 * A memcg must be visible for memcg_expand_shrinker_maps()
	 * by the time the maps are allocated. So, we allocate maps
	 * here, when for_each_mem_cgroup() can't skip it.
	 */
	if (memcg_alloc_shrinker_maps(memcg)) {
		mem_cgroup_id_remove(memcg);
		return -ENOMEM;
	}

5416
	/* Online state pins memcg ID, memcg ID pins CSS */
5417
	refcount_set(&memcg->id.ref, 1);
5418
	css_get(css);
5419
	return 0;
B
Balbir Singh 已提交
5420 5421
}

5422
static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
5423
{
5424
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5425
	struct mem_cgroup_event *event, *tmp;
5426 5427 5428 5429 5430 5431

	/*
	 * Unregister events and notify userspace.
	 * Notify userspace about cgroup removing only after rmdir of cgroup
	 * directory to avoid race between userspace and kernelspace.
	 */
5432 5433
	spin_lock(&memcg->event_list_lock);
	list_for_each_entry_safe(event, tmp, &memcg->event_list, list) {
5434 5435 5436
		list_del_init(&event->list);
		schedule_work(&event->remove);
	}
5437
	spin_unlock(&memcg->event_list_lock);
5438

R
Roman Gushchin 已提交
5439
	page_counter_set_min(&memcg->memory, 0);
5440
	page_counter_set_low(&memcg->memory, 0);
5441

5442
	memcg_offline_kmem(memcg);
5443
	wb_memcg_offline(memcg);
5444

5445 5446
	drain_all_stock(memcg);

5447
	mem_cgroup_id_put(memcg);
5448 5449
}

5450 5451 5452 5453 5454 5455 5456
static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);

	invalidate_reclaim_iterators(memcg);
}

5457
static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
B
Balbir Singh 已提交
5458
{
5459
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5460
	int __maybe_unused i;
5461

5462 5463 5464 5465
#ifdef CONFIG_CGROUP_WRITEBACK
	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
		wb_wait_for_completion(&memcg->cgwb_frn[i].done);
#endif
5466
	if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
5467
		static_branch_dec(&memcg_sockets_enabled_key);
5468

5469
	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active)
V
Vladimir Davydov 已提交
5470
		static_branch_dec(&memcg_sockets_enabled_key);
5471

5472 5473 5474
	vmpressure_cleanup(&memcg->vmpressure);
	cancel_work_sync(&memcg->high_work);
	mem_cgroup_remove_from_trees(memcg);
5475
	memcg_free_shrinker_maps(memcg);
5476
	memcg_free_kmem(memcg);
5477
	mem_cgroup_free(memcg);
B
Balbir Singh 已提交
5478 5479
}

5480 5481 5482 5483 5484 5485 5486 5487 5488 5489 5490 5491 5492 5493 5494 5495 5496
/**
 * mem_cgroup_css_reset - reset the states of a mem_cgroup
 * @css: the target css
 *
 * Reset the states of the mem_cgroup associated with @css.  This is
 * invoked when the userland requests disabling on the default hierarchy
 * but the memcg is pinned through dependency.  The memcg should stop
 * applying policies and should revert to the vanilla state as it may be
 * made visible again.
 *
 * The current implementation only resets the essential configurations.
 * This needs to be expanded to cover all the visible parts.
 */
static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);

5497 5498 5499 5500
	page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX);
	page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX);
	page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX);
	page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX);
R
Roman Gushchin 已提交
5501
	page_counter_set_min(&memcg->memory, 0);
5502
	page_counter_set_low(&memcg->memory, 0);
5503
	page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
5504
	memcg->soft_limit = PAGE_COUNTER_MAX;
5505
	page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
5506
	memcg_wb_domain_size_changed(memcg);
5507 5508
}

5509
#ifdef CONFIG_MMU
5510
/* Handlers for move charge at task migration. */
5511
static int mem_cgroup_do_precharge(unsigned long count)
5512
{
5513
	int ret;
5514

5515 5516
	/* Try a single bulk charge without reclaim first, kswapd may wake */
	ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count);
5517
	if (!ret) {
5518 5519 5520
		mc.precharge += count;
		return ret;
	}
5521

5522
	/* Try charges one by one with reclaim, but do not retry */
5523
	while (count--) {
5524
		ret = try_charge(mc.to, GFP_KERNEL | __GFP_NORETRY, 1);
5525 5526
		if (ret)
			return ret;
5527
		mc.precharge++;
5528
		cond_resched();
5529
	}
5530
	return 0;
5531 5532 5533 5534
}

union mc_target {
	struct page	*page;
5535
	swp_entry_t	ent;
5536 5537 5538
};

enum mc_target_type {
5539
	MC_TARGET_NONE = 0,
5540
	MC_TARGET_PAGE,
5541
	MC_TARGET_SWAP,
5542
	MC_TARGET_DEVICE,
5543 5544
};

D
Daisuke Nishimura 已提交
5545 5546
static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
						unsigned long addr, pte_t ptent)
5547
{
5548
	struct page *page = vm_normal_page(vma, addr, ptent);
5549

D
Daisuke Nishimura 已提交
5550 5551 5552
	if (!page || !page_mapped(page))
		return NULL;
	if (PageAnon(page)) {
5553
		if (!(mc.flags & MOVE_ANON))
D
Daisuke Nishimura 已提交
5554
			return NULL;
5555 5556 5557 5558
	} else {
		if (!(mc.flags & MOVE_FILE))
			return NULL;
	}
D
Daisuke Nishimura 已提交
5559 5560 5561 5562 5563 5564
	if (!get_page_unless_zero(page))
		return NULL;

	return page;
}

5565
#if defined(CONFIG_SWAP) || defined(CONFIG_DEVICE_PRIVATE)
D
Daisuke Nishimura 已提交
5566
static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5567
			pte_t ptent, swp_entry_t *entry)
D
Daisuke Nishimura 已提交
5568 5569 5570 5571
{
	struct page *page = NULL;
	swp_entry_t ent = pte_to_swp_entry(ptent);

5572
	if (!(mc.flags & MOVE_ANON))
D
Daisuke Nishimura 已提交
5573
		return NULL;
5574 5575 5576 5577 5578 5579 5580 5581 5582 5583 5584 5585 5586 5587 5588 5589 5590

	/*
	 * Handle MEMORY_DEVICE_PRIVATE which are ZONE_DEVICE page belonging to
	 * a device and because they are not accessible by CPU they are store
	 * as special swap entry in the CPU page table.
	 */
	if (is_device_private_entry(ent)) {
		page = device_private_entry_to_page(ent);
		/*
		 * MEMORY_DEVICE_PRIVATE means ZONE_DEVICE page and which have
		 * a refcount of 1 when free (unlike normal page)
		 */
		if (!page_ref_add_unless(page, 1, 1))
			return NULL;
		return page;
	}

5591 5592 5593
	if (non_swap_entry(ent))
		return NULL;

5594 5595 5596 5597
	/*
	 * Because lookup_swap_cache() updates some statistics counter,
	 * we call find_get_page() with swapper_space directly.
	 */
5598
	page = find_get_page(swap_address_space(ent), swp_offset(ent));
5599
	entry->val = ent.val;
D
Daisuke Nishimura 已提交
5600 5601 5602

	return page;
}
5603 5604
#else
static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5605
			pte_t ptent, swp_entry_t *entry)
5606 5607 5608 5609
{
	return NULL;
}
#endif
D
Daisuke Nishimura 已提交
5610

5611 5612 5613 5614 5615
static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
			unsigned long addr, pte_t ptent, swp_entry_t *entry)
{
	if (!vma->vm_file) /* anonymous vma */
		return NULL;
5616
	if (!(mc.flags & MOVE_FILE))
5617 5618 5619
		return NULL;

	/* page is moved even if it's not RSS of this task(page-faulted). */
5620
	/* shmem/tmpfs may report page out on swap: account for that too. */
5621 5622
	return find_get_incore_page(vma->vm_file->f_mapping,
			linear_page_index(vma, addr));
5623 5624
}

5625 5626 5627
/**
 * mem_cgroup_move_account - move account of the page
 * @page: the page
5628
 * @compound: charge the page as compound or small page
5629 5630 5631
 * @from: mem_cgroup which the page is moved from.
 * @to:	mem_cgroup which the page is moved to. @from != @to.
 *
5632
 * The caller must make sure the page is not on LRU (isolate_page() is useful.)
5633 5634 5635 5636 5637
 *
 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
 * from old cgroup.
 */
static int mem_cgroup_move_account(struct page *page,
5638
				   bool compound,
5639 5640 5641
				   struct mem_cgroup *from,
				   struct mem_cgroup *to)
{
5642 5643
	struct lruvec *from_vec, *to_vec;
	struct pglist_data *pgdat;
5644
	unsigned int nr_pages = compound ? thp_nr_pages(page) : 1;
5645 5646 5647 5648
	int ret;

	VM_BUG_ON(from == to);
	VM_BUG_ON_PAGE(PageLRU(page), page);
5649
	VM_BUG_ON(compound && !PageTransHuge(page));
5650 5651

	/*
5652
	 * Prevent mem_cgroup_migrate() from looking at
5653
	 * page->mem_cgroup of its source page while we change it.
5654
	 */
5655
	ret = -EBUSY;
5656 5657 5658 5659 5660 5661 5662
	if (!trylock_page(page))
		goto out;

	ret = -EINVAL;
	if (page->mem_cgroup != from)
		goto out_unlock;

5663
	pgdat = page_pgdat(page);
5664 5665
	from_vec = mem_cgroup_lruvec(from, pgdat);
	to_vec = mem_cgroup_lruvec(to, pgdat);
5666

5667
	lock_page_memcg(page);
5668

5669 5670 5671 5672
	if (PageAnon(page)) {
		if (page_mapped(page)) {
			__mod_lruvec_state(from_vec, NR_ANON_MAPPED, -nr_pages);
			__mod_lruvec_state(to_vec, NR_ANON_MAPPED, nr_pages);
5673 5674 5675 5676 5677 5678 5679
			if (PageTransHuge(page)) {
				__mod_lruvec_state(from_vec, NR_ANON_THPS,
						   -nr_pages);
				__mod_lruvec_state(to_vec, NR_ANON_THPS,
						   nr_pages);
			}

5680 5681
		}
	} else {
5682 5683 5684 5685 5686 5687 5688 5689
		__mod_lruvec_state(from_vec, NR_FILE_PAGES, -nr_pages);
		__mod_lruvec_state(to_vec, NR_FILE_PAGES, nr_pages);

		if (PageSwapBacked(page)) {
			__mod_lruvec_state(from_vec, NR_SHMEM, -nr_pages);
			__mod_lruvec_state(to_vec, NR_SHMEM, nr_pages);
		}

5690 5691 5692 5693
		if (page_mapped(page)) {
			__mod_lruvec_state(from_vec, NR_FILE_MAPPED, -nr_pages);
			__mod_lruvec_state(to_vec, NR_FILE_MAPPED, nr_pages);
		}
5694

5695 5696
		if (PageDirty(page)) {
			struct address_space *mapping = page_mapping(page);
5697

5698
			if (mapping_can_writeback(mapping)) {
5699 5700 5701 5702 5703
				__mod_lruvec_state(from_vec, NR_FILE_DIRTY,
						   -nr_pages);
				__mod_lruvec_state(to_vec, NR_FILE_DIRTY,
						   nr_pages);
			}
5704 5705 5706
		}
	}

5707
	if (PageWriteback(page)) {
5708 5709
		__mod_lruvec_state(from_vec, NR_WRITEBACK, -nr_pages);
		__mod_lruvec_state(to_vec, NR_WRITEBACK, nr_pages);
5710 5711 5712
	}

	/*
5713 5714
	 * All state has been migrated, let's switch to the new memcg.
	 *
5715
	 * It is safe to change page->mem_cgroup here because the page
5716 5717 5718 5719 5720 5721 5722 5723
	 * is referenced, charged, isolated, and locked: we can't race
	 * with (un)charging, migration, LRU putback, or anything else
	 * that would rely on a stable page->mem_cgroup.
	 *
	 * Note that lock_page_memcg is a memcg lock, not a page lock,
	 * to save space. As soon as we switch page->mem_cgroup to a
	 * new memcg that isn't locked, the above state can change
	 * concurrently again. Make sure we're truly done with it.
5724
	 */
5725
	smp_mb();
5726

5727 5728 5729 5730
	css_get(&to->css);
	css_put(&from->css);

	page->mem_cgroup = to;
5731

5732
	__unlock_page_memcg(from);
5733 5734 5735 5736

	ret = 0;

	local_irq_disable();
5737
	mem_cgroup_charge_statistics(to, page, nr_pages);
5738
	memcg_check_events(to, page);
5739
	mem_cgroup_charge_statistics(from, page, -nr_pages);
5740 5741 5742 5743 5744 5745 5746 5747
	memcg_check_events(from, page);
	local_irq_enable();
out_unlock:
	unlock_page(page);
out:
	return ret;
}

5748 5749 5750 5751 5752 5753 5754 5755 5756 5757 5758 5759 5760 5761 5762
/**
 * get_mctgt_type - get target type of moving charge
 * @vma: the vma the pte to be checked belongs
 * @addr: the address corresponding to the pte to be checked
 * @ptent: the pte to be checked
 * @target: the pointer the target page or swap ent will be stored(can be NULL)
 *
 * Returns
 *   0(MC_TARGET_NONE): if the pte is not a target for move charge.
 *   1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
 *     move charge. if @target is not NULL, the page is stored in target->page
 *     with extra refcnt got(Callers should handle it).
 *   2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
 *     target for charge migration. if @target is not NULL, the entry is stored
 *     in target->ent.
5763 5764
 *   3(MC_TARGET_DEVICE): like MC_TARGET_PAGE  but page is MEMORY_DEVICE_PRIVATE
 *     (so ZONE_DEVICE page and thus not on the lru).
5765 5766 5767
 *     For now we such page is charge like a regular page would be as for all
 *     intent and purposes it is just special memory taking the place of a
 *     regular page.
5768 5769
 *
 *     See Documentations/vm/hmm.txt and include/linux/hmm.h
5770 5771 5772 5773
 *
 * Called with pte lock held.
 */

5774
static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
D
Daisuke Nishimura 已提交
5775 5776 5777
		unsigned long addr, pte_t ptent, union mc_target *target)
{
	struct page *page = NULL;
5778
	enum mc_target_type ret = MC_TARGET_NONE;
D
Daisuke Nishimura 已提交
5779 5780 5781 5782 5783
	swp_entry_t ent = { .val = 0 };

	if (pte_present(ptent))
		page = mc_handle_present_pte(vma, addr, ptent);
	else if (is_swap_pte(ptent))
5784
		page = mc_handle_swap_pte(vma, ptent, &ent);
5785
	else if (pte_none(ptent))
5786
		page = mc_handle_file_pte(vma, addr, ptent, &ent);
D
Daisuke Nishimura 已提交
5787 5788

	if (!page && !ent.val)
5789
		return ret;
5790 5791
	if (page) {
		/*
5792
		 * Do only loose check w/o serialization.
5793
		 * mem_cgroup_move_account() checks the page is valid or
5794
		 * not under LRU exclusion.
5795
		 */
5796
		if (page->mem_cgroup == mc.from) {
5797
			ret = MC_TARGET_PAGE;
5798
			if (is_device_private_page(page))
5799
				ret = MC_TARGET_DEVICE;
5800 5801 5802 5803 5804 5805
			if (target)
				target->page = page;
		}
		if (!ret || !target)
			put_page(page);
	}
5806 5807 5808 5809 5810
	/*
	 * There is a swap entry and a page doesn't exist or isn't charged.
	 * But we cannot move a tail-page in a THP.
	 */
	if (ent.val && !ret && (!page || !PageTransCompound(page)) &&
L
Li Zefan 已提交
5811
	    mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) {
5812 5813 5814
		ret = MC_TARGET_SWAP;
		if (target)
			target->ent = ent;
5815 5816 5817 5818
	}
	return ret;
}

5819 5820
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/*
5821 5822
 * We don't consider PMD mapped swapping or file mapped pages because THP does
 * not support them for now.
5823 5824 5825 5826 5827 5828 5829 5830
 * Caller should make sure that pmd_trans_huge(pmd) is true.
 */
static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
		unsigned long addr, pmd_t pmd, union mc_target *target)
{
	struct page *page = NULL;
	enum mc_target_type ret = MC_TARGET_NONE;

5831 5832 5833 5834 5835
	if (unlikely(is_swap_pmd(pmd))) {
		VM_BUG_ON(thp_migration_supported() &&
				  !is_pmd_migration_entry(pmd));
		return ret;
	}
5836
	page = pmd_page(pmd);
5837
	VM_BUG_ON_PAGE(!page || !PageHead(page), page);
5838
	if (!(mc.flags & MOVE_ANON))
5839
		return ret;
5840
	if (page->mem_cgroup == mc.from) {
5841 5842 5843 5844 5845 5846 5847 5848 5849 5850 5851 5852 5853 5854 5855 5856
		ret = MC_TARGET_PAGE;
		if (target) {
			get_page(page);
			target->page = page;
		}
	}
	return ret;
}
#else
static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
		unsigned long addr, pmd_t pmd, union mc_target *target)
{
	return MC_TARGET_NONE;
}
#endif

5857 5858 5859 5860
static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
					unsigned long addr, unsigned long end,
					struct mm_walk *walk)
{
5861
	struct vm_area_struct *vma = walk->vma;
5862 5863 5864
	pte_t *pte;
	spinlock_t *ptl;

5865 5866
	ptl = pmd_trans_huge_lock(pmd, vma);
	if (ptl) {
5867 5868
		/*
		 * Note their can not be MC_TARGET_DEVICE for now as we do not
5869 5870
		 * support transparent huge page with MEMORY_DEVICE_PRIVATE but
		 * this might change.
5871
		 */
5872 5873
		if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
			mc.precharge += HPAGE_PMD_NR;
5874
		spin_unlock(ptl);
5875
		return 0;
5876
	}
5877

5878 5879
	if (pmd_trans_unstable(pmd))
		return 0;
5880 5881
	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
	for (; addr != end; pte++, addr += PAGE_SIZE)
5882
		if (get_mctgt_type(vma, addr, *pte, NULL))
5883 5884 5885 5886
			mc.precharge++;	/* increment precharge temporarily */
	pte_unmap_unlock(pte - 1, ptl);
	cond_resched();

5887 5888 5889
	return 0;
}

5890 5891 5892 5893
static const struct mm_walk_ops precharge_walk_ops = {
	.pmd_entry	= mem_cgroup_count_precharge_pte_range,
};

5894 5895 5896 5897
static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
{
	unsigned long precharge;

5898
	mmap_read_lock(mm);
5899
	walk_page_range(mm, 0, mm->highest_vm_end, &precharge_walk_ops, NULL);
5900
	mmap_read_unlock(mm);
5901 5902 5903 5904 5905 5906 5907 5908 5909

	precharge = mc.precharge;
	mc.precharge = 0;

	return precharge;
}

static int mem_cgroup_precharge_mc(struct mm_struct *mm)
{
5910 5911 5912 5913 5914
	unsigned long precharge = mem_cgroup_count_precharge(mm);

	VM_BUG_ON(mc.moving_task);
	mc.moving_task = current;
	return mem_cgroup_do_precharge(precharge);
5915 5916
}

5917 5918
/* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
static void __mem_cgroup_clear_mc(void)
5919
{
5920 5921 5922
	struct mem_cgroup *from = mc.from;
	struct mem_cgroup *to = mc.to;

5923
	/* we must uncharge all the leftover precharges from mc.to */
5924
	if (mc.precharge) {
5925
		cancel_charge(mc.to, mc.precharge);
5926 5927 5928 5929 5930 5931 5932
		mc.precharge = 0;
	}
	/*
	 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
	 * we must uncharge here.
	 */
	if (mc.moved_charge) {
5933
		cancel_charge(mc.from, mc.moved_charge);
5934
		mc.moved_charge = 0;
5935
	}
5936 5937 5938
	/* we must fixup refcnts and charges */
	if (mc.moved_swap) {
		/* uncharge swap account from the old cgroup */
5939
		if (!mem_cgroup_is_root(mc.from))
5940
			page_counter_uncharge(&mc.from->memsw, mc.moved_swap);
5941

5942 5943
		mem_cgroup_id_put_many(mc.from, mc.moved_swap);

5944
		/*
5945 5946
		 * we charged both to->memory and to->memsw, so we
		 * should uncharge to->memory.
5947
		 */
5948
		if (!mem_cgroup_is_root(mc.to))
5949 5950
			page_counter_uncharge(&mc.to->memory, mc.moved_swap);

5951 5952
		mc.moved_swap = 0;
	}
5953 5954 5955 5956 5957 5958 5959
	memcg_oom_recover(from);
	memcg_oom_recover(to);
	wake_up_all(&mc.waitq);
}

static void mem_cgroup_clear_mc(void)
{
5960 5961
	struct mm_struct *mm = mc.mm;

5962 5963 5964 5965 5966 5967
	/*
	 * we must clear moving_task before waking up waiters at the end of
	 * task migration.
	 */
	mc.moving_task = NULL;
	__mem_cgroup_clear_mc();
5968
	spin_lock(&mc.lock);
5969 5970
	mc.from = NULL;
	mc.to = NULL;
5971
	mc.mm = NULL;
5972
	spin_unlock(&mc.lock);
5973 5974

	mmput(mm);
5975 5976
}

5977
static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
5978
{
5979
	struct cgroup_subsys_state *css;
5980
	struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */
5981
	struct mem_cgroup *from;
5982
	struct task_struct *leader, *p;
5983
	struct mm_struct *mm;
5984
	unsigned long move_flags;
5985
	int ret = 0;
5986

5987 5988
	/* charge immigration isn't supported on the default hierarchy */
	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
5989 5990
		return 0;

5991 5992 5993 5994 5995 5996 5997
	/*
	 * Multi-process migrations only happen on the default hierarchy
	 * where charge immigration is not used.  Perform charge
	 * immigration if @tset contains a leader and whine if there are
	 * multiple.
	 */
	p = NULL;
5998
	cgroup_taskset_for_each_leader(leader, css, tset) {
5999 6000
		WARN_ON_ONCE(p);
		p = leader;
6001
		memcg = mem_cgroup_from_css(css);
6002 6003 6004 6005
	}
	if (!p)
		return 0;

6006 6007 6008 6009 6010 6011 6012 6013 6014
	/*
	 * We are now commited to this value whatever it is. Changes in this
	 * tunable will only affect upcoming migrations, not the current one.
	 * So we need to save it, and keep it going.
	 */
	move_flags = READ_ONCE(memcg->move_charge_at_immigrate);
	if (!move_flags)
		return 0;

6015 6016 6017 6018 6019 6020 6021 6022 6023 6024 6025 6026 6027 6028 6029 6030
	from = mem_cgroup_from_task(p);

	VM_BUG_ON(from == memcg);

	mm = get_task_mm(p);
	if (!mm)
		return 0;
	/* We move charges only when we move a owner of the mm */
	if (mm->owner == p) {
		VM_BUG_ON(mc.from);
		VM_BUG_ON(mc.to);
		VM_BUG_ON(mc.precharge);
		VM_BUG_ON(mc.moved_charge);
		VM_BUG_ON(mc.moved_swap);

		spin_lock(&mc.lock);
6031
		mc.mm = mm;
6032 6033 6034 6035 6036 6037 6038 6039 6040
		mc.from = from;
		mc.to = memcg;
		mc.flags = move_flags;
		spin_unlock(&mc.lock);
		/* We set mc.moving_task later */

		ret = mem_cgroup_precharge_mc(mm);
		if (ret)
			mem_cgroup_clear_mc();
6041 6042
	} else {
		mmput(mm);
6043 6044 6045 6046
	}
	return ret;
}

6047
static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
6048
{
6049 6050
	if (mc.to)
		mem_cgroup_clear_mc();
6051 6052
}

6053 6054 6055
static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
				unsigned long addr, unsigned long end,
				struct mm_walk *walk)
6056
{
6057
	int ret = 0;
6058
	struct vm_area_struct *vma = walk->vma;
6059 6060
	pte_t *pte;
	spinlock_t *ptl;
6061 6062 6063
	enum mc_target_type target_type;
	union mc_target target;
	struct page *page;
6064

6065 6066
	ptl = pmd_trans_huge_lock(pmd, vma);
	if (ptl) {
6067
		if (mc.precharge < HPAGE_PMD_NR) {
6068
			spin_unlock(ptl);
6069 6070 6071 6072 6073 6074
			return 0;
		}
		target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
		if (target_type == MC_TARGET_PAGE) {
			page = target.page;
			if (!isolate_lru_page(page)) {
6075
				if (!mem_cgroup_move_account(page, true,
6076
							     mc.from, mc.to)) {
6077 6078 6079 6080 6081 6082
					mc.precharge -= HPAGE_PMD_NR;
					mc.moved_charge += HPAGE_PMD_NR;
				}
				putback_lru_page(page);
			}
			put_page(page);
6083 6084 6085 6086 6087 6088 6089 6090
		} else if (target_type == MC_TARGET_DEVICE) {
			page = target.page;
			if (!mem_cgroup_move_account(page, true,
						     mc.from, mc.to)) {
				mc.precharge -= HPAGE_PMD_NR;
				mc.moved_charge += HPAGE_PMD_NR;
			}
			put_page(page);
6091
		}
6092
		spin_unlock(ptl);
6093
		return 0;
6094 6095
	}

6096 6097
	if (pmd_trans_unstable(pmd))
		return 0;
6098 6099 6100 6101
retry:
	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
	for (; addr != end; addr += PAGE_SIZE) {
		pte_t ptent = *(pte++);
6102
		bool device = false;
6103
		swp_entry_t ent;
6104 6105 6106 6107

		if (!mc.precharge)
			break;

6108
		switch (get_mctgt_type(vma, addr, ptent, &target)) {
6109 6110
		case MC_TARGET_DEVICE:
			device = true;
J
Joe Perches 已提交
6111
			fallthrough;
6112 6113
		case MC_TARGET_PAGE:
			page = target.page;
6114 6115 6116 6117 6118 6119 6120 6121
			/*
			 * We can have a part of the split pmd here. Moving it
			 * can be done but it would be too convoluted so simply
			 * ignore such a partial THP and keep it in original
			 * memcg. There should be somebody mapping the head.
			 */
			if (PageTransCompound(page))
				goto put;
6122
			if (!device && isolate_lru_page(page))
6123
				goto put;
6124 6125
			if (!mem_cgroup_move_account(page, false,
						mc.from, mc.to)) {
6126
				mc.precharge--;
6127 6128
				/* we uncharge from mc.from later. */
				mc.moved_charge++;
6129
			}
6130 6131
			if (!device)
				putback_lru_page(page);
6132
put:			/* get_mctgt_type() gets the page */
6133 6134
			put_page(page);
			break;
6135 6136
		case MC_TARGET_SWAP:
			ent = target.ent;
6137
			if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
6138
				mc.precharge--;
6139 6140
				mem_cgroup_id_get_many(mc.to, 1);
				/* we fixup other refcnts and charges later. */
6141 6142
				mc.moved_swap++;
			}
6143
			break;
6144 6145 6146 6147 6148 6149 6150 6151 6152 6153 6154 6155 6156 6157
		default:
			break;
		}
	}
	pte_unmap_unlock(pte - 1, ptl);
	cond_resched();

	if (addr != end) {
		/*
		 * We have consumed all precharges we got in can_attach().
		 * We try charge one by one, but don't do any additional
		 * charges to mc.to if we have failed in charge once in attach()
		 * phase.
		 */
6158
		ret = mem_cgroup_do_precharge(1);
6159 6160 6161 6162 6163 6164 6165
		if (!ret)
			goto retry;
	}

	return ret;
}

6166 6167 6168 6169
static const struct mm_walk_ops charge_walk_ops = {
	.pmd_entry	= mem_cgroup_move_charge_pte_range,
};

6170
static void mem_cgroup_move_charge(void)
6171 6172
{
	lru_add_drain_all();
6173
	/*
6174 6175 6176
	 * Signal lock_page_memcg() to take the memcg's move_lock
	 * while we're moving its pages to another memcg. Then wait
	 * for already started RCU-only updates to finish.
6177 6178 6179
	 */
	atomic_inc(&mc.from->moving_account);
	synchronize_rcu();
6180
retry:
6181
	if (unlikely(!mmap_read_trylock(mc.mm))) {
6182
		/*
6183
		 * Someone who are holding the mmap_lock might be waiting in
6184 6185 6186 6187 6188 6189 6190 6191 6192
		 * waitq. So we cancel all extra charges, wake up all waiters,
		 * and retry. Because we cancel precharges, we might not be able
		 * to move enough charges, but moving charge is a best-effort
		 * feature anyway, so it wouldn't be a big problem.
		 */
		__mem_cgroup_clear_mc();
		cond_resched();
		goto retry;
	}
6193 6194 6195 6196
	/*
	 * When we have consumed all precharges and failed in doing
	 * additional charge, the page walk just aborts.
	 */
6197 6198
	walk_page_range(mc.mm, 0, mc.mm->highest_vm_end, &charge_walk_ops,
			NULL);
6199

6200
	mmap_read_unlock(mc.mm);
6201
	atomic_dec(&mc.from->moving_account);
6202 6203
}

6204
static void mem_cgroup_move_task(void)
B
Balbir Singh 已提交
6205
{
6206 6207
	if (mc.to) {
		mem_cgroup_move_charge();
6208
		mem_cgroup_clear_mc();
6209
	}
B
Balbir Singh 已提交
6210
}
6211
#else	/* !CONFIG_MMU */
6212
static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
6213 6214 6215
{
	return 0;
}
6216
static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
6217 6218
{
}
6219
static void mem_cgroup_move_task(void)
6220 6221 6222
{
}
#endif
B
Balbir Singh 已提交
6223

6224 6225
/*
 * Cgroup retains root cgroups across [un]mount cycles making it necessary
6226 6227
 * to verify whether we're attached to the default hierarchy on each mount
 * attempt.
6228
 */
6229
static void mem_cgroup_bind(struct cgroup_subsys_state *root_css)
6230 6231
{
	/*
6232
	 * use_hierarchy is forced on the default hierarchy.  cgroup core
6233 6234 6235
	 * guarantees that @root doesn't have any children, so turning it
	 * on for the root memcg is enough.
	 */
6236
	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
6237 6238 6239
		root_mem_cgroup->use_hierarchy = true;
	else
		root_mem_cgroup->use_hierarchy = false;
6240 6241
}

6242 6243 6244 6245 6246 6247 6248 6249 6250 6251
static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value)
{
	if (value == PAGE_COUNTER_MAX)
		seq_puts(m, "max\n");
	else
		seq_printf(m, "%llu\n", (u64)value * PAGE_SIZE);

	return 0;
}

6252 6253 6254
static u64 memory_current_read(struct cgroup_subsys_state *css,
			       struct cftype *cft)
{
6255 6256 6257
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);

	return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE;
6258 6259
}

R
Roman Gushchin 已提交
6260 6261
static int memory_min_show(struct seq_file *m, void *v)
{
6262 6263
	return seq_puts_memcg_tunable(m,
		READ_ONCE(mem_cgroup_from_seq(m)->memory.min));
R
Roman Gushchin 已提交
6264 6265 6266 6267 6268 6269 6270 6271 6272 6273 6274 6275 6276 6277 6278 6279 6280 6281 6282
}

static ssize_t memory_min_write(struct kernfs_open_file *of,
				char *buf, size_t nbytes, loff_t off)
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
	unsigned long min;
	int err;

	buf = strstrip(buf);
	err = page_counter_memparse(buf, "max", &min);
	if (err)
		return err;

	page_counter_set_min(&memcg->memory, min);

	return nbytes;
}

6283 6284
static int memory_low_show(struct seq_file *m, void *v)
{
6285 6286
	return seq_puts_memcg_tunable(m,
		READ_ONCE(mem_cgroup_from_seq(m)->memory.low));
6287 6288 6289 6290 6291 6292 6293 6294 6295 6296
}

static ssize_t memory_low_write(struct kernfs_open_file *of,
				char *buf, size_t nbytes, loff_t off)
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
	unsigned long low;
	int err;

	buf = strstrip(buf);
6297
	err = page_counter_memparse(buf, "max", &low);
6298 6299 6300
	if (err)
		return err;

6301
	page_counter_set_low(&memcg->memory, low);
6302 6303 6304 6305 6306 6307

	return nbytes;
}

static int memory_high_show(struct seq_file *m, void *v)
{
6308 6309
	return seq_puts_memcg_tunable(m,
		READ_ONCE(mem_cgroup_from_seq(m)->memory.high));
6310 6311 6312 6313 6314 6315
}

static ssize_t memory_high_write(struct kernfs_open_file *of,
				 char *buf, size_t nbytes, loff_t off)
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6316
	unsigned int nr_retries = MAX_RECLAIM_RETRIES;
6317
	bool drained = false;
6318 6319 6320 6321
	unsigned long high;
	int err;

	buf = strstrip(buf);
6322
	err = page_counter_memparse(buf, "max", &high);
6323 6324 6325
	if (err)
		return err;

6326 6327 6328 6329 6330 6331 6332 6333 6334 6335 6336 6337 6338 6339 6340 6341 6342 6343 6344 6345 6346 6347
	for (;;) {
		unsigned long nr_pages = page_counter_read(&memcg->memory);
		unsigned long reclaimed;

		if (nr_pages <= high)
			break;

		if (signal_pending(current))
			break;

		if (!drained) {
			drain_all_stock(memcg);
			drained = true;
			continue;
		}

		reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
							 GFP_KERNEL, true);

		if (!reclaimed && !nr_retries--)
			break;
	}
6348

6349 6350
	page_counter_set_high(&memcg->memory, high);

6351 6352
	memcg_wb_domain_size_changed(memcg);

6353 6354 6355 6356 6357
	return nbytes;
}

static int memory_max_show(struct seq_file *m, void *v)
{
6358 6359
	return seq_puts_memcg_tunable(m,
		READ_ONCE(mem_cgroup_from_seq(m)->memory.max));
6360 6361 6362 6363 6364 6365
}

static ssize_t memory_max_write(struct kernfs_open_file *of,
				char *buf, size_t nbytes, loff_t off)
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6366
	unsigned int nr_reclaims = MAX_RECLAIM_RETRIES;
6367
	bool drained = false;
6368 6369 6370 6371
	unsigned long max;
	int err;

	buf = strstrip(buf);
6372
	err = page_counter_memparse(buf, "max", &max);
6373 6374 6375
	if (err)
		return err;

6376
	xchg(&memcg->memory.max, max);
6377 6378 6379 6380 6381 6382 6383

	for (;;) {
		unsigned long nr_pages = page_counter_read(&memcg->memory);

		if (nr_pages <= max)
			break;

6384
		if (signal_pending(current))
6385 6386 6387 6388 6389 6390 6391 6392 6393 6394 6395 6396 6397 6398 6399
			break;

		if (!drained) {
			drain_all_stock(memcg);
			drained = true;
			continue;
		}

		if (nr_reclaims) {
			if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
							  GFP_KERNEL, true))
				nr_reclaims--;
			continue;
		}

6400
		memcg_memory_event(memcg, MEMCG_OOM);
6401 6402 6403
		if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
			break;
	}
6404

6405
	memcg_wb_domain_size_changed(memcg);
6406 6407 6408
	return nbytes;
}

6409 6410 6411 6412 6413 6414 6415 6416 6417 6418
static void __memory_events_show(struct seq_file *m, atomic_long_t *events)
{
	seq_printf(m, "low %lu\n", atomic_long_read(&events[MEMCG_LOW]));
	seq_printf(m, "high %lu\n", atomic_long_read(&events[MEMCG_HIGH]));
	seq_printf(m, "max %lu\n", atomic_long_read(&events[MEMCG_MAX]));
	seq_printf(m, "oom %lu\n", atomic_long_read(&events[MEMCG_OOM]));
	seq_printf(m, "oom_kill %lu\n",
		   atomic_long_read(&events[MEMCG_OOM_KILL]));
}

6419 6420
static int memory_events_show(struct seq_file *m, void *v)
{
6421
	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6422

6423 6424 6425 6426 6427 6428 6429
	__memory_events_show(m, memcg->memory_events);
	return 0;
}

static int memory_events_local_show(struct seq_file *m, void *v)
{
	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6430

6431
	__memory_events_show(m, memcg->memory_events_local);
6432 6433 6434
	return 0;
}

6435 6436
static int memory_stat_show(struct seq_file *m, void *v)
{
6437
	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6438
	char *buf;
6439

6440 6441 6442 6443 6444
	buf = memory_stat_format(memcg);
	if (!buf)
		return -ENOMEM;
	seq_puts(m, buf);
	kfree(buf);
6445 6446 6447
	return 0;
}

6448 6449 6450 6451 6452 6453 6454 6455 6456 6457 6458 6459 6460 6461 6462 6463 6464 6465 6466 6467 6468 6469 6470 6471 6472 6473 6474 6475 6476
#ifdef CONFIG_NUMA
static int memory_numa_stat_show(struct seq_file *m, void *v)
{
	int i;
	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);

	for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
		int nid;

		if (memory_stats[i].idx >= NR_VM_NODE_STAT_ITEMS)
			continue;

		seq_printf(m, "%s", memory_stats[i].name);
		for_each_node_state(nid, N_MEMORY) {
			u64 size;
			struct lruvec *lruvec;

			lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
			size = lruvec_page_state(lruvec, memory_stats[i].idx);
			size *= memory_stats[i].ratio;
			seq_printf(m, " N%d=%llu", nid, size);
		}
		seq_putc(m, '\n');
	}

	return 0;
}
#endif

6477 6478
static int memory_oom_group_show(struct seq_file *m, void *v)
{
6479
	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6480 6481 6482 6483 6484 6485 6486 6487 6488 6489 6490 6491 6492 6493 6494 6495 6496 6497 6498 6499 6500 6501 6502 6503 6504 6505 6506 6507

	seq_printf(m, "%d\n", memcg->oom_group);

	return 0;
}

static ssize_t memory_oom_group_write(struct kernfs_open_file *of,
				      char *buf, size_t nbytes, loff_t off)
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
	int ret, oom_group;

	buf = strstrip(buf);
	if (!buf)
		return -EINVAL;

	ret = kstrtoint(buf, 0, &oom_group);
	if (ret)
		return ret;

	if (oom_group != 0 && oom_group != 1)
		return -EINVAL;

	memcg->oom_group = oom_group;

	return nbytes;
}

6508 6509 6510
static struct cftype memory_files[] = {
	{
		.name = "current",
6511
		.flags = CFTYPE_NOT_ON_ROOT,
6512 6513
		.read_u64 = memory_current_read,
	},
R
Roman Gushchin 已提交
6514 6515 6516 6517 6518 6519
	{
		.name = "min",
		.flags = CFTYPE_NOT_ON_ROOT,
		.seq_show = memory_min_show,
		.write = memory_min_write,
	},
6520 6521 6522 6523 6524 6525 6526 6527 6528 6529 6530 6531 6532 6533 6534 6535 6536 6537 6538 6539 6540
	{
		.name = "low",
		.flags = CFTYPE_NOT_ON_ROOT,
		.seq_show = memory_low_show,
		.write = memory_low_write,
	},
	{
		.name = "high",
		.flags = CFTYPE_NOT_ON_ROOT,
		.seq_show = memory_high_show,
		.write = memory_high_write,
	},
	{
		.name = "max",
		.flags = CFTYPE_NOT_ON_ROOT,
		.seq_show = memory_max_show,
		.write = memory_max_write,
	},
	{
		.name = "events",
		.flags = CFTYPE_NOT_ON_ROOT,
6541
		.file_offset = offsetof(struct mem_cgroup, events_file),
6542 6543
		.seq_show = memory_events_show,
	},
6544 6545 6546 6547 6548 6549
	{
		.name = "events.local",
		.flags = CFTYPE_NOT_ON_ROOT,
		.file_offset = offsetof(struct mem_cgroup, events_local_file),
		.seq_show = memory_events_local_show,
	},
6550 6551 6552 6553
	{
		.name = "stat",
		.seq_show = memory_stat_show,
	},
6554 6555 6556 6557 6558 6559
#ifdef CONFIG_NUMA
	{
		.name = "numa_stat",
		.seq_show = memory_numa_stat_show,
	},
#endif
6560 6561 6562 6563 6564 6565
	{
		.name = "oom.group",
		.flags = CFTYPE_NOT_ON_ROOT | CFTYPE_NS_DELEGATABLE,
		.seq_show = memory_oom_group_show,
		.write = memory_oom_group_write,
	},
6566 6567 6568
	{ }	/* terminate */
};

6569
struct cgroup_subsys memory_cgrp_subsys = {
6570
	.css_alloc = mem_cgroup_css_alloc,
6571
	.css_online = mem_cgroup_css_online,
6572
	.css_offline = mem_cgroup_css_offline,
6573
	.css_released = mem_cgroup_css_released,
6574
	.css_free = mem_cgroup_css_free,
6575
	.css_reset = mem_cgroup_css_reset,
6576 6577
	.can_attach = mem_cgroup_can_attach,
	.cancel_attach = mem_cgroup_cancel_attach,
6578
	.post_attach = mem_cgroup_move_task,
6579
	.bind = mem_cgroup_bind,
6580 6581
	.dfl_cftypes = memory_files,
	.legacy_cftypes = mem_cgroup_legacy_files,
6582
	.early_init = 0,
B
Balbir Singh 已提交
6583
};
6584

6585 6586 6587 6588 6589 6590 6591 6592 6593 6594 6595 6596 6597 6598 6599 6600 6601 6602 6603 6604 6605 6606 6607 6608 6609 6610 6611 6612 6613 6614
/*
 * This function calculates an individual cgroup's effective
 * protection which is derived from its own memory.min/low, its
 * parent's and siblings' settings, as well as the actual memory
 * distribution in the tree.
 *
 * The following rules apply to the effective protection values:
 *
 * 1. At the first level of reclaim, effective protection is equal to
 *    the declared protection in memory.min and memory.low.
 *
 * 2. To enable safe delegation of the protection configuration, at
 *    subsequent levels the effective protection is capped to the
 *    parent's effective protection.
 *
 * 3. To make complex and dynamic subtrees easier to configure, the
 *    user is allowed to overcommit the declared protection at a given
 *    level. If that is the case, the parent's effective protection is
 *    distributed to the children in proportion to how much protection
 *    they have declared and how much of it they are utilizing.
 *
 *    This makes distribution proportional, but also work-conserving:
 *    if one cgroup claims much more protection than it uses memory,
 *    the unused remainder is available to its siblings.
 *
 * 4. Conversely, when the declared protection is undercommitted at a
 *    given level, the distribution of the larger parental protection
 *    budget is NOT proportional. A cgroup's protection from a sibling
 *    is capped to its own memory.min/low setting.
 *
6615 6616 6617 6618 6619 6620 6621 6622 6623 6624 6625 6626
 * 5. However, to allow protecting recursive subtrees from each other
 *    without having to declare each individual cgroup's fixed share
 *    of the ancestor's claim to protection, any unutilized -
 *    "floating" - protection from up the tree is distributed in
 *    proportion to each cgroup's *usage*. This makes the protection
 *    neutral wrt sibling cgroups and lets them compete freely over
 *    the shared parental protection budget, but it protects the
 *    subtree as a whole from neighboring subtrees.
 *
 * Note that 4. and 5. are not in conflict: 4. is about protecting
 * against immediate siblings whereas 5. is about protecting against
 * neighboring subtrees.
6627 6628
 */
static unsigned long effective_protection(unsigned long usage,
6629
					  unsigned long parent_usage,
6630 6631 6632 6633 6634
					  unsigned long setting,
					  unsigned long parent_effective,
					  unsigned long siblings_protected)
{
	unsigned long protected;
6635
	unsigned long ep;
6636 6637 6638 6639 6640 6641 6642 6643 6644 6645 6646 6647 6648 6649 6650 6651 6652 6653 6654 6655 6656 6657 6658 6659 6660 6661 6662 6663 6664 6665

	protected = min(usage, setting);
	/*
	 * If all cgroups at this level combined claim and use more
	 * protection then what the parent affords them, distribute
	 * shares in proportion to utilization.
	 *
	 * We are using actual utilization rather than the statically
	 * claimed protection in order to be work-conserving: claimed
	 * but unused protection is available to siblings that would
	 * otherwise get a smaller chunk than what they claimed.
	 */
	if (siblings_protected > parent_effective)
		return protected * parent_effective / siblings_protected;

	/*
	 * Ok, utilized protection of all children is within what the
	 * parent affords them, so we know whatever this child claims
	 * and utilizes is effectively protected.
	 *
	 * If there is unprotected usage beyond this value, reclaim
	 * will apply pressure in proportion to that amount.
	 *
	 * If there is unutilized protection, the cgroup will be fully
	 * shielded from reclaim, but we do return a smaller value for
	 * protection than what the group could enjoy in theory. This
	 * is okay. With the overcommit distribution above, effective
	 * protection is always dependent on how memory is actually
	 * consumed among the siblings anyway.
	 */
6666 6667 6668 6669 6670 6671 6672 6673 6674 6675 6676 6677 6678
	ep = protected;

	/*
	 * If the children aren't claiming (all of) the protection
	 * afforded to them by the parent, distribute the remainder in
	 * proportion to the (unprotected) memory of each cgroup. That
	 * way, cgroups that aren't explicitly prioritized wrt each
	 * other compete freely over the allowance, but they are
	 * collectively protected from neighboring trees.
	 *
	 * We're using unprotected memory for the weight so that if
	 * some cgroups DO claim explicit protection, we don't protect
	 * the same bytes twice.
6679 6680 6681 6682
	 *
	 * Check both usage and parent_usage against the respective
	 * protected values. One should imply the other, but they
	 * aren't read atomically - make sure the division is sane.
6683 6684 6685
	 */
	if (!(cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT))
		return ep;
6686 6687 6688
	if (parent_effective > siblings_protected &&
	    parent_usage > siblings_protected &&
	    usage > protected) {
6689 6690 6691 6692 6693 6694 6695 6696 6697 6698
		unsigned long unclaimed;

		unclaimed = parent_effective - siblings_protected;
		unclaimed *= usage - protected;
		unclaimed /= parent_usage - siblings_protected;

		ep += unclaimed;
	}

	return ep;
6699 6700
}

6701
/**
R
Roman Gushchin 已提交
6702
 * mem_cgroup_protected - check if memory consumption is in the normal range
6703
 * @root: the top ancestor of the sub-tree being checked
6704 6705
 * @memcg: the memory cgroup to check
 *
6706 6707
 * WARNING: This function is not stateless! It can only be used as part
 *          of a top-down tree iteration, not for isolated queries.
6708
 */
6709 6710
void mem_cgroup_calculate_protection(struct mem_cgroup *root,
				     struct mem_cgroup *memcg)
6711
{
6712
	unsigned long usage, parent_usage;
6713 6714
	struct mem_cgroup *parent;

6715
	if (mem_cgroup_disabled())
6716
		return;
6717

6718 6719
	if (!root)
		root = root_mem_cgroup;
6720 6721 6722 6723 6724 6725 6726 6727

	/*
	 * Effective values of the reclaim targets are ignored so they
	 * can be stale. Have a look at mem_cgroup_protection for more
	 * details.
	 * TODO: calculation should be more robust so that we do not need
	 * that special casing.
	 */
6728
	if (memcg == root)
6729
		return;
6730

6731
	usage = page_counter_read(&memcg->memory);
R
Roman Gushchin 已提交
6732
	if (!usage)
6733
		return;
R
Roman Gushchin 已提交
6734 6735

	parent = parent_mem_cgroup(memcg);
6736 6737
	/* No parent means a non-hierarchical mode on v1 memcg */
	if (!parent)
6738
		return;
6739

6740
	if (parent == root) {
6741
		memcg->memory.emin = READ_ONCE(memcg->memory.min);
6742
		memcg->memory.elow = READ_ONCE(memcg->memory.low);
6743
		return;
R
Roman Gushchin 已提交
6744 6745
	}

6746 6747
	parent_usage = page_counter_read(&parent->memory);

6748
	WRITE_ONCE(memcg->memory.emin, effective_protection(usage, parent_usage,
6749 6750
			READ_ONCE(memcg->memory.min),
			READ_ONCE(parent->memory.emin),
6751
			atomic_long_read(&parent->memory.children_min_usage)));
6752

6753
	WRITE_ONCE(memcg->memory.elow, effective_protection(usage, parent_usage,
6754 6755
			READ_ONCE(memcg->memory.low),
			READ_ONCE(parent->memory.elow),
6756
			atomic_long_read(&parent->memory.children_low_usage)));
6757 6758
}

6759
/**
6760
 * mem_cgroup_charge - charge a newly allocated page to a cgroup
6761 6762 6763 6764 6765 6766 6767
 * @page: page to charge
 * @mm: mm context of the victim
 * @gfp_mask: reclaim mode
 *
 * Try to charge @page to the memcg that @mm belongs to, reclaiming
 * pages according to @gfp_mask if necessary.
 *
6768
 * Returns 0 on success. Otherwise, an error code is returned.
6769
 */
6770
int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask)
6771
{
6772
	unsigned int nr_pages = thp_nr_pages(page);
6773 6774 6775 6776 6777 6778 6779
	struct mem_cgroup *memcg = NULL;
	int ret = 0;

	if (mem_cgroup_disabled())
		goto out;

	if (PageSwapCache(page)) {
6780 6781 6782
		swp_entry_t ent = { .val = page_private(page), };
		unsigned short id;

6783 6784 6785
		/*
		 * Every swap fault against a single page tries to charge the
		 * page, bail as early as possible.  shmem_unuse() encounters
6786 6787
		 * already charged pages, too.  page->mem_cgroup is protected
		 * by the page lock, which serializes swap cache removal, which
6788 6789
		 * in turn serializes uncharging.
		 */
6790
		VM_BUG_ON_PAGE(!PageLocked(page), page);
6791
		if (compound_head(page)->mem_cgroup)
6792
			goto out;
6793

6794 6795 6796 6797 6798 6799
		id = lookup_swap_cgroup_id(ent);
		rcu_read_lock();
		memcg = mem_cgroup_from_id(id);
		if (memcg && !css_tryget_online(&memcg->css))
			memcg = NULL;
		rcu_read_unlock();
6800 6801 6802 6803 6804 6805
	}

	if (!memcg)
		memcg = get_mem_cgroup_from_mm(mm);

	ret = try_charge(memcg, gfp_mask, nr_pages);
6806 6807
	if (ret)
		goto out_put;
6808

6809
	css_get(&memcg->css);
6810
	commit_charge(page, memcg);
6811 6812

	local_irq_disable();
6813
	mem_cgroup_charge_statistics(memcg, page, nr_pages);
6814 6815
	memcg_check_events(memcg, page);
	local_irq_enable();
6816

6817
	if (PageSwapCache(page)) {
6818 6819 6820 6821 6822 6823
		swp_entry_t entry = { .val = page_private(page) };
		/*
		 * The swap entry might not get freed for a long time,
		 * let's not wait for it.  The page already received a
		 * memory+swap charge, drop the swap entry duplicate.
		 */
6824
		mem_cgroup_uncharge_swap(entry, nr_pages);
6825 6826
	}

6827 6828 6829 6830
out_put:
	css_put(&memcg->css);
out:
	return ret;
6831 6832
}

6833 6834
struct uncharge_gather {
	struct mem_cgroup *memcg;
6835
	unsigned long nr_pages;
6836 6837 6838 6839 6840 6841
	unsigned long pgpgout;
	unsigned long nr_kmem;
	struct page *dummy_page;
};

static inline void uncharge_gather_clear(struct uncharge_gather *ug)
6842
{
6843 6844 6845 6846 6847
	memset(ug, 0, sizeof(*ug));
}

static void uncharge_batch(const struct uncharge_gather *ug)
{
6848 6849
	unsigned long flags;

6850
	if (!mem_cgroup_is_root(ug->memcg)) {
6851
		page_counter_uncharge(&ug->memcg->memory, ug->nr_pages);
6852
		if (do_memsw_account())
6853
			page_counter_uncharge(&ug->memcg->memsw, ug->nr_pages);
6854 6855 6856
		if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && ug->nr_kmem)
			page_counter_uncharge(&ug->memcg->kmem, ug->nr_kmem);
		memcg_oom_recover(ug->memcg);
6857
	}
6858 6859

	local_irq_save(flags);
6860
	__count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout);
6861
	__this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_pages);
6862
	memcg_check_events(ug->memcg, ug->dummy_page);
6863
	local_irq_restore(flags);
6864 6865 6866

	/* drop reference from uncharge_page */
	css_put(&ug->memcg->css);
6867 6868 6869 6870
}

static void uncharge_page(struct page *page, struct uncharge_gather *ug)
{
6871 6872
	unsigned long nr_pages;

6873 6874 6875 6876 6877 6878 6879 6880 6881 6882 6883 6884 6885 6886 6887 6888 6889
	VM_BUG_ON_PAGE(PageLRU(page), page);

	if (!page->mem_cgroup)
		return;

	/*
	 * Nobody should be changing or seriously looking at
	 * page->mem_cgroup at this point, we have fully
	 * exclusive access to the page.
	 */

	if (ug->memcg != page->mem_cgroup) {
		if (ug->memcg) {
			uncharge_batch(ug);
			uncharge_gather_clear(ug);
		}
		ug->memcg = page->mem_cgroup;
6890 6891 6892

		/* pairs with css_put in uncharge_batch */
		css_get(&ug->memcg->css);
6893 6894
	}

6895 6896
	nr_pages = compound_nr(page);
	ug->nr_pages += nr_pages;
6897

6898
	if (!PageKmemcg(page)) {
6899 6900
		ug->pgpgout++;
	} else {
6901
		ug->nr_kmem += nr_pages;
6902 6903 6904 6905 6906
		__ClearPageKmemcg(page);
	}

	ug->dummy_page = page;
	page->mem_cgroup = NULL;
6907
	css_put(&ug->memcg->css);
6908 6909 6910 6911
}

static void uncharge_list(struct list_head *page_list)
{
6912
	struct uncharge_gather ug;
6913
	struct list_head *next;
6914 6915

	uncharge_gather_clear(&ug);
6916

6917 6918 6919 6920
	/*
	 * Note that the list can be a single page->lru; hence the
	 * do-while loop instead of a simple list_for_each_entry().
	 */
6921 6922
	next = page_list->next;
	do {
6923 6924
		struct page *page;

6925 6926 6927
		page = list_entry(next, struct page, lru);
		next = page->lru.next;

6928
		uncharge_page(page, &ug);
6929 6930
	} while (next != page_list);

6931 6932
	if (ug.memcg)
		uncharge_batch(&ug);
6933 6934
}

6935 6936 6937 6938
/**
 * mem_cgroup_uncharge - uncharge a page
 * @page: page to uncharge
 *
6939
 * Uncharge a page previously charged with mem_cgroup_charge().
6940 6941 6942
 */
void mem_cgroup_uncharge(struct page *page)
{
6943 6944
	struct uncharge_gather ug;

6945 6946 6947
	if (mem_cgroup_disabled())
		return;

6948
	/* Don't touch page->lru of any random page, pre-check: */
6949
	if (!page->mem_cgroup)
6950 6951
		return;

6952 6953 6954
	uncharge_gather_clear(&ug);
	uncharge_page(page, &ug);
	uncharge_batch(&ug);
6955
}
6956

6957 6958 6959 6960 6961
/**
 * mem_cgroup_uncharge_list - uncharge a list of page
 * @page_list: list of pages to uncharge
 *
 * Uncharge a list of pages previously charged with
6962
 * mem_cgroup_charge().
6963 6964 6965 6966 6967
 */
void mem_cgroup_uncharge_list(struct list_head *page_list)
{
	if (mem_cgroup_disabled())
		return;
6968

6969 6970
	if (!list_empty(page_list))
		uncharge_list(page_list);
6971 6972 6973
}

/**
6974 6975 6976
 * mem_cgroup_migrate - charge a page's replacement
 * @oldpage: currently circulating page
 * @newpage: replacement page
6977
 *
6978 6979
 * Charge @newpage as a replacement page for @oldpage. @oldpage will
 * be uncharged upon free.
6980 6981 6982
 *
 * Both pages must be locked, @newpage->mapping must be set up.
 */
6983
void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
6984
{
6985
	struct mem_cgroup *memcg;
6986
	unsigned int nr_pages;
6987
	unsigned long flags;
6988 6989 6990 6991

	VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
	VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
	VM_BUG_ON_PAGE(PageAnon(oldpage) != PageAnon(newpage), newpage);
6992 6993
	VM_BUG_ON_PAGE(PageTransHuge(oldpage) != PageTransHuge(newpage),
		       newpage);
6994 6995 6996 6997 6998

	if (mem_cgroup_disabled())
		return;

	/* Page cache replacement: new page already charged? */
6999
	if (newpage->mem_cgroup)
7000 7001
		return;

7002
	/* Swapcache readahead pages can get replaced before being charged */
7003
	memcg = oldpage->mem_cgroup;
7004
	if (!memcg)
7005 7006
		return;

7007
	/* Force-charge the new page. The old one will be freed soon */
7008
	nr_pages = thp_nr_pages(newpage);
7009 7010 7011 7012

	page_counter_charge(&memcg->memory, nr_pages);
	if (do_memsw_account())
		page_counter_charge(&memcg->memsw, nr_pages);
7013

7014
	css_get(&memcg->css);
7015
	commit_charge(newpage, memcg);
7016

7017
	local_irq_save(flags);
7018
	mem_cgroup_charge_statistics(memcg, newpage, nr_pages);
7019
	memcg_check_events(memcg, newpage);
7020
	local_irq_restore(flags);
7021 7022
}

7023
DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
7024 7025
EXPORT_SYMBOL(memcg_sockets_enabled_key);

7026
void mem_cgroup_sk_alloc(struct sock *sk)
7027 7028 7029
{
	struct mem_cgroup *memcg;

7030 7031 7032
	if (!mem_cgroup_sockets_enabled)
		return;

7033 7034 7035 7036
	/* Do not associate the sock with unrelated interrupted task's memcg. */
	if (in_interrupt())
		return;

7037 7038
	rcu_read_lock();
	memcg = mem_cgroup_from_task(current);
7039 7040
	if (memcg == root_mem_cgroup)
		goto out;
7041
	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active)
7042
		goto out;
S
Shakeel Butt 已提交
7043
	if (css_tryget(&memcg->css))
7044
		sk->sk_memcg = memcg;
7045
out:
7046 7047 7048
	rcu_read_unlock();
}

7049
void mem_cgroup_sk_free(struct sock *sk)
7050
{
7051 7052
	if (sk->sk_memcg)
		css_put(&sk->sk_memcg->css);
7053 7054 7055 7056 7057 7058 7059 7060 7061 7062 7063 7064
}

/**
 * mem_cgroup_charge_skmem - charge socket memory
 * @memcg: memcg to charge
 * @nr_pages: number of pages to charge
 *
 * Charges @nr_pages to @memcg. Returns %true if the charge fit within
 * @memcg's configured limit, %false if the charge had to be forced.
 */
bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
{
7065
	gfp_t gfp_mask = GFP_KERNEL;
7066

7067
	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
7068
		struct page_counter *fail;
7069

7070 7071
		if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) {
			memcg->tcpmem_pressure = 0;
7072 7073
			return true;
		}
7074 7075
		page_counter_charge(&memcg->tcpmem, nr_pages);
		memcg->tcpmem_pressure = 1;
7076
		return false;
7077
	}
7078

7079 7080 7081 7082
	/* Don't block in the packet receive path */
	if (in_softirq())
		gfp_mask = GFP_NOWAIT;

7083
	mod_memcg_state(memcg, MEMCG_SOCK, nr_pages);
7084

7085 7086 7087 7088
	if (try_charge(memcg, gfp_mask, nr_pages) == 0)
		return true;

	try_charge(memcg, gfp_mask|__GFP_NOFAIL, nr_pages);
7089 7090 7091 7092 7093
	return false;
}

/**
 * mem_cgroup_uncharge_skmem - uncharge socket memory
M
Mike Rapoport 已提交
7094 7095
 * @memcg: memcg to uncharge
 * @nr_pages: number of pages to uncharge
7096 7097 7098
 */
void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
{
7099
	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
7100
		page_counter_uncharge(&memcg->tcpmem, nr_pages);
7101 7102
		return;
	}
7103

7104
	mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages);
7105

7106
	refill_stock(memcg, nr_pages);
7107 7108
}

7109 7110 7111 7112 7113 7114 7115 7116 7117
static int __init cgroup_memory(char *s)
{
	char *token;

	while ((token = strsep(&s, ",")) != NULL) {
		if (!*token)
			continue;
		if (!strcmp(token, "nosocket"))
			cgroup_memory_nosocket = true;
7118 7119
		if (!strcmp(token, "nokmem"))
			cgroup_memory_nokmem = true;
7120 7121 7122 7123
	}
	return 0;
}
__setup("cgroup.memory=", cgroup_memory);
7124

7125
/*
7126 7127
 * subsys_initcall() for memory controller.
 *
7128 7129 7130 7131
 * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this
 * context because of lock dependencies (cgroup_lock -> cpu hotplug) but
 * basically everything that doesn't depend on a specific mem_cgroup structure
 * should be initialized from here.
7132 7133 7134
 */
static int __init mem_cgroup_init(void)
{
7135 7136
	int cpu, node;

7137 7138
	cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
				  memcg_hotplug_cpu_dead);
7139 7140 7141 7142 7143 7144 7145 7146 7147 7148 7149

	for_each_possible_cpu(cpu)
		INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
			  drain_local_stock);

	for_each_node(node) {
		struct mem_cgroup_tree_per_node *rtpn;

		rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL,
				    node_online(node) ? node : NUMA_NO_NODE);

7150
		rtpn->rb_root = RB_ROOT;
7151
		rtpn->rb_rightmost = NULL;
7152
		spin_lock_init(&rtpn->lock);
7153 7154 7155
		soft_limit_tree.rb_tree_per_node[node] = rtpn;
	}

7156 7157 7158
	return 0;
}
subsys_initcall(mem_cgroup_init);
7159 7160

#ifdef CONFIG_MEMCG_SWAP
7161 7162
static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg)
{
7163
	while (!refcount_inc_not_zero(&memcg->id.ref)) {
7164 7165 7166 7167 7168 7169 7170 7171 7172 7173 7174 7175 7176 7177 7178
		/*
		 * The root cgroup cannot be destroyed, so it's refcount must
		 * always be >= 1.
		 */
		if (WARN_ON_ONCE(memcg == root_mem_cgroup)) {
			VM_BUG_ON(1);
			break;
		}
		memcg = parent_mem_cgroup(memcg);
		if (!memcg)
			memcg = root_mem_cgroup;
	}
	return memcg;
}

7179 7180 7181 7182 7183 7184 7185 7186 7187
/**
 * mem_cgroup_swapout - transfer a memsw charge to swap
 * @page: page whose memsw charge to transfer
 * @entry: swap entry to move the charge to
 *
 * Transfer the memsw charge of @page to @entry.
 */
void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
{
7188
	struct mem_cgroup *memcg, *swap_memcg;
7189
	unsigned int nr_entries;
7190 7191 7192 7193 7194
	unsigned short oldid;

	VM_BUG_ON_PAGE(PageLRU(page), page);
	VM_BUG_ON_PAGE(page_count(page), page);

7195
	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
7196 7197 7198 7199 7200 7201 7202 7203
		return;

	memcg = page->mem_cgroup;

	/* Readahead page, never charged */
	if (!memcg)
		return;

7204 7205 7206 7207 7208 7209
	/*
	 * In case the memcg owning these pages has been offlined and doesn't
	 * have an ID allocated to it anymore, charge the closest online
	 * ancestor for the swap instead and transfer the memory+swap charge.
	 */
	swap_memcg = mem_cgroup_id_get_online(memcg);
7210
	nr_entries = thp_nr_pages(page);
7211 7212 7213 7214 7215
	/* Get references for the tail pages, too */
	if (nr_entries > 1)
		mem_cgroup_id_get_many(swap_memcg, nr_entries - 1);
	oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg),
				   nr_entries);
7216
	VM_BUG_ON_PAGE(oldid, page);
7217
	mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries);
7218 7219 7220 7221

	page->mem_cgroup = NULL;

	if (!mem_cgroup_is_root(memcg))
7222
		page_counter_uncharge(&memcg->memory, nr_entries);
7223

7224
	if (!cgroup_memory_noswap && memcg != swap_memcg) {
7225
		if (!mem_cgroup_is_root(swap_memcg))
7226 7227
			page_counter_charge(&swap_memcg->memsw, nr_entries);
		page_counter_uncharge(&memcg->memsw, nr_entries);
7228 7229
	}

7230 7231
	/*
	 * Interrupts should be disabled here because the caller holds the
M
Matthew Wilcox 已提交
7232
	 * i_pages lock which is taken with interrupts-off. It is
7233
	 * important here to have the interrupts disabled because it is the
M
Matthew Wilcox 已提交
7234
	 * only synchronisation we have for updating the per-CPU variables.
7235 7236
	 */
	VM_BUG_ON(!irqs_disabled());
7237
	mem_cgroup_charge_statistics(memcg, page, -nr_entries);
7238
	memcg_check_events(memcg, page);
7239

7240
	css_put(&memcg->css);
7241 7242
}

7243 7244
/**
 * mem_cgroup_try_charge_swap - try charging swap space for a page
7245 7246 7247
 * @page: page being added to swap
 * @entry: swap entry to charge
 *
7248
 * Try to charge @page's memcg for the swap space at @entry.
7249 7250 7251 7252 7253
 *
 * Returns 0 on success, -ENOMEM on failure.
 */
int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
{
7254
	unsigned int nr_pages = thp_nr_pages(page);
7255
	struct page_counter *counter;
7256
	struct mem_cgroup *memcg;
7257 7258
	unsigned short oldid;

7259
	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
7260 7261 7262 7263 7264 7265 7266 7267
		return 0;

	memcg = page->mem_cgroup;

	/* Readahead page, never charged */
	if (!memcg)
		return 0;

7268 7269
	if (!entry.val) {
		memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
7270
		return 0;
7271
	}
7272

7273 7274
	memcg = mem_cgroup_id_get_online(memcg);

7275
	if (!cgroup_memory_noswap && !mem_cgroup_is_root(memcg) &&
7276
	    !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) {
7277 7278
		memcg_memory_event(memcg, MEMCG_SWAP_MAX);
		memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
7279
		mem_cgroup_id_put(memcg);
7280
		return -ENOMEM;
7281
	}
7282

7283 7284 7285 7286
	/* Get references for the tail pages, too */
	if (nr_pages > 1)
		mem_cgroup_id_get_many(memcg, nr_pages - 1);
	oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages);
7287
	VM_BUG_ON_PAGE(oldid, page);
7288
	mod_memcg_state(memcg, MEMCG_SWAP, nr_pages);
7289 7290 7291 7292

	return 0;
}

7293
/**
7294
 * mem_cgroup_uncharge_swap - uncharge swap space
7295
 * @entry: swap entry to uncharge
7296
 * @nr_pages: the amount of swap space to uncharge
7297
 */
7298
void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
7299 7300 7301 7302
{
	struct mem_cgroup *memcg;
	unsigned short id;

7303
	id = swap_cgroup_record(entry, 0, nr_pages);
7304
	rcu_read_lock();
7305
	memcg = mem_cgroup_from_id(id);
7306
	if (memcg) {
7307
		if (!cgroup_memory_noswap && !mem_cgroup_is_root(memcg)) {
7308
			if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
7309
				page_counter_uncharge(&memcg->swap, nr_pages);
7310
			else
7311
				page_counter_uncharge(&memcg->memsw, nr_pages);
7312
		}
7313
		mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages);
7314
		mem_cgroup_id_put_many(memcg, nr_pages);
7315 7316 7317 7318
	}
	rcu_read_unlock();
}

7319 7320 7321 7322
long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
{
	long nr_swap_pages = get_nr_swap_pages();

7323
	if (cgroup_memory_noswap || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
7324 7325 7326
		return nr_swap_pages;
	for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
		nr_swap_pages = min_t(long, nr_swap_pages,
7327
				      READ_ONCE(memcg->swap.max) -
7328 7329 7330 7331
				      page_counter_read(&memcg->swap));
	return nr_swap_pages;
}

7332 7333 7334 7335 7336 7337 7338 7339
bool mem_cgroup_swap_full(struct page *page)
{
	struct mem_cgroup *memcg;

	VM_BUG_ON_PAGE(!PageLocked(page), page);

	if (vm_swap_full())
		return true;
7340
	if (cgroup_memory_noswap || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
7341 7342 7343 7344 7345 7346
		return false;

	memcg = page->mem_cgroup;
	if (!memcg)
		return false;

7347 7348 7349 7350 7351
	for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) {
		unsigned long usage = page_counter_read(&memcg->swap);

		if (usage * 2 >= READ_ONCE(memcg->swap.high) ||
		    usage * 2 >= READ_ONCE(memcg->swap.max))
7352
			return true;
7353
	}
7354 7355 7356 7357

	return false;
}

7358
static int __init setup_swap_account(char *s)
7359 7360
{
	if (!strcmp(s, "1"))
7361
		cgroup_memory_noswap = 0;
7362
	else if (!strcmp(s, "0"))
7363
		cgroup_memory_noswap = 1;
7364 7365
	return 1;
}
7366
__setup("swapaccount=", setup_swap_account);
7367

7368 7369 7370 7371 7372 7373 7374 7375
static u64 swap_current_read(struct cgroup_subsys_state *css,
			     struct cftype *cft)
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);

	return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE;
}

7376 7377 7378 7379 7380 7381 7382 7383 7384 7385 7386 7387 7388 7389 7390 7391 7392 7393 7394 7395 7396 7397 7398
static int swap_high_show(struct seq_file *m, void *v)
{
	return seq_puts_memcg_tunable(m,
		READ_ONCE(mem_cgroup_from_seq(m)->swap.high));
}

static ssize_t swap_high_write(struct kernfs_open_file *of,
			       char *buf, size_t nbytes, loff_t off)
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
	unsigned long high;
	int err;

	buf = strstrip(buf);
	err = page_counter_memparse(buf, "max", &high);
	if (err)
		return err;

	page_counter_set_high(&memcg->swap, high);

	return nbytes;
}

7399 7400
static int swap_max_show(struct seq_file *m, void *v)
{
7401 7402
	return seq_puts_memcg_tunable(m,
		READ_ONCE(mem_cgroup_from_seq(m)->swap.max));
7403 7404 7405 7406 7407 7408 7409 7410 7411 7412 7413 7414 7415 7416
}

static ssize_t swap_max_write(struct kernfs_open_file *of,
			      char *buf, size_t nbytes, loff_t off)
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
	unsigned long max;
	int err;

	buf = strstrip(buf);
	err = page_counter_memparse(buf, "max", &max);
	if (err)
		return err;

7417
	xchg(&memcg->swap.max, max);
7418 7419 7420 7421

	return nbytes;
}

7422 7423
static int swap_events_show(struct seq_file *m, void *v)
{
7424
	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
7425

7426 7427
	seq_printf(m, "high %lu\n",
		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_HIGH]));
7428 7429 7430 7431 7432 7433 7434 7435
	seq_printf(m, "max %lu\n",
		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX]));
	seq_printf(m, "fail %lu\n",
		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL]));

	return 0;
}

7436 7437 7438 7439 7440 7441
static struct cftype swap_files[] = {
	{
		.name = "swap.current",
		.flags = CFTYPE_NOT_ON_ROOT,
		.read_u64 = swap_current_read,
	},
7442 7443 7444 7445 7446 7447
	{
		.name = "swap.high",
		.flags = CFTYPE_NOT_ON_ROOT,
		.seq_show = swap_high_show,
		.write = swap_high_write,
	},
7448 7449 7450 7451 7452 7453
	{
		.name = "swap.max",
		.flags = CFTYPE_NOT_ON_ROOT,
		.seq_show = swap_max_show,
		.write = swap_max_write,
	},
7454 7455 7456 7457 7458 7459
	{
		.name = "swap.events",
		.flags = CFTYPE_NOT_ON_ROOT,
		.file_offset = offsetof(struct mem_cgroup, swap_events_file),
		.seq_show = swap_events_show,
	},
7460 7461 7462
	{ }	/* terminate */
};

7463
static struct cftype memsw_files[] = {
7464 7465 7466 7467 7468 7469 7470 7471 7472 7473 7474 7475 7476 7477 7478 7479 7480 7481 7482 7483 7484 7485 7486 7487 7488 7489
	{
		.name = "memsw.usage_in_bytes",
		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
		.read_u64 = mem_cgroup_read_u64,
	},
	{
		.name = "memsw.max_usage_in_bytes",
		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
		.write = mem_cgroup_reset,
		.read_u64 = mem_cgroup_read_u64,
	},
	{
		.name = "memsw.limit_in_bytes",
		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
		.write = mem_cgroup_write,
		.read_u64 = mem_cgroup_read_u64,
	},
	{
		.name = "memsw.failcnt",
		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
		.write = mem_cgroup_reset,
		.read_u64 = mem_cgroup_read_u64,
	},
	{ },	/* terminate */
};

7490 7491 7492 7493 7494 7495 7496
/*
 * If mem_cgroup_swap_init() is implemented as a subsys_initcall()
 * instead of a core_initcall(), this could mean cgroup_memory_noswap still
 * remains set to false even when memcg is disabled via "cgroup_disable=memory"
 * boot parameter. This may result in premature OOPS inside
 * mem_cgroup_get_nr_swap_pages() function in corner cases.
 */
7497 7498
static int __init mem_cgroup_swap_init(void)
{
7499 7500 7501 7502 7503
	/* No memory control -> no swap control */
	if (mem_cgroup_disabled())
		cgroup_memory_noswap = true;

	if (cgroup_memory_noswap)
7504 7505 7506 7507 7508
		return 0;

	WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, swap_files));
	WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, memsw_files));

7509 7510
	return 0;
}
7511
core_initcall(mem_cgroup_swap_init);
7512 7513

#endif /* CONFIG_MEMCG_SWAP */