memcontrol.c 187.2 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-or-later
B
Balbir Singh 已提交
2 3 4 5 6
/* memcontrol.c - Memory Controller
 *
 * Copyright IBM Corporation, 2007
 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
 *
7 8 9
 * Copyright 2007 OpenVZ SWsoft Inc
 * Author: Pavel Emelianov <xemul@openvz.org>
 *
10 11 12 13
 * Memory thresholds
 * Copyright (C) 2009 Nokia Corporation
 * Author: Kirill A. Shutemov
 *
14 15 16 17
 * Kernel Memory Controller
 * Copyright (C) 2012 Parallels Inc. and Google Inc.
 * Authors: Glauber Costa and Suleiman Souhlal
 *
18 19 20 21 22
 * Native page reclaim
 * Charge lifetime sanitation
 * Lockless page tracking & accounting
 * Unified hierarchy configuration model
 * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner
B
Balbir Singh 已提交
23 24
 */

25
#include <linux/page_counter.h>
B
Balbir Singh 已提交
26 27
#include <linux/memcontrol.h>
#include <linux/cgroup.h>
28
#include <linux/pagewalk.h>
29
#include <linux/sched/mm.h>
30
#include <linux/shmem_fs.h>
31
#include <linux/hugetlb.h>
K
KAMEZAWA Hiroyuki 已提交
32
#include <linux/pagemap.h>
33
#include <linux/vm_event_item.h>
34
#include <linux/smp.h>
35
#include <linux/page-flags.h>
36
#include <linux/backing-dev.h>
37 38
#include <linux/bit_spinlock.h>
#include <linux/rcupdate.h>
39
#include <linux/limits.h>
40
#include <linux/export.h>
41
#include <linux/mutex.h>
42
#include <linux/rbtree.h>
43
#include <linux/slab.h>
44
#include <linux/swap.h>
45
#include <linux/swapops.h>
46
#include <linux/spinlock.h>
47
#include <linux/eventfd.h>
48
#include <linux/poll.h>
49
#include <linux/sort.h>
50
#include <linux/fs.h>
51
#include <linux/seq_file.h>
52
#include <linux/vmpressure.h>
53
#include <linux/mm_inline.h>
54
#include <linux/swap_cgroup.h>
55
#include <linux/cpu.h>
56
#include <linux/oom.h>
57
#include <linux/lockdep.h>
58
#include <linux/file.h>
59
#include <linux/tracehook.h>
60
#include <linux/psi.h>
61
#include <linux/seq_buf.h>
K
KAMEZAWA Hiroyuki 已提交
62
#include "internal.h"
G
Glauber Costa 已提交
63
#include <net/sock.h>
M
Michal Hocko 已提交
64
#include <net/ip.h>
65
#include "slab.h"
B
Balbir Singh 已提交
66

67
#include <linux/uaccess.h>
68

69 70
#include <trace/events/vmscan.h>

71 72
struct cgroup_subsys memory_cgrp_subsys __read_mostly;
EXPORT_SYMBOL(memory_cgrp_subsys);
73

74 75
struct mem_cgroup *root_mem_cgroup __read_mostly;

76
#define MEM_CGROUP_RECLAIM_RETRIES	5
B
Balbir Singh 已提交
77

78 79 80
/* Socket memory accounting disabled? */
static bool cgroup_memory_nosocket;

81 82 83
/* Kernel memory accounting disabled? */
static bool cgroup_memory_nokmem;

84
/* Whether the swap controller is active */
A
Andrew Morton 已提交
85
#ifdef CONFIG_MEMCG_SWAP
86
bool cgroup_memory_noswap __read_mostly;
87
#else
88
#define cgroup_memory_noswap		1
89
#endif
90

91 92 93 94
#ifdef CONFIG_CGROUP_WRITEBACK
static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq);
#endif

95 96 97
/* Whether legacy memory+swap accounting is active */
static bool do_memsw_account(void)
{
98
	return !cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_noswap;
99 100
}

101 102
#define THRESHOLDS_EVENTS_TARGET 128
#define SOFTLIMIT_EVENTS_TARGET 1024
103

104 105 106 107 108
/*
 * Cgroups above their limits are maintained in a RB-Tree, independent of
 * their hierarchy representation
 */

109
struct mem_cgroup_tree_per_node {
110
	struct rb_root rb_root;
111
	struct rb_node *rb_rightmost;
112 113 114 115 116 117 118 119 120
	spinlock_t lock;
};

struct mem_cgroup_tree {
	struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
};

static struct mem_cgroup_tree soft_limit_tree __read_mostly;

K
KAMEZAWA Hiroyuki 已提交
121 122 123 124 125
/* for OOM */
struct mem_cgroup_eventfd_list {
	struct list_head list;
	struct eventfd_ctx *eventfd;
};
126

127 128 129
/*
 * cgroup_event represents events which userspace want to receive.
 */
130
struct mem_cgroup_event {
131
	/*
132
	 * memcg which the event belongs to.
133
	 */
134
	struct mem_cgroup *memcg;
135 136 137 138 139 140 141 142
	/*
	 * eventfd to signal userspace about the event.
	 */
	struct eventfd_ctx *eventfd;
	/*
	 * Each of these stored in a list by the cgroup.
	 */
	struct list_head list;
143 144 145 146 147
	/*
	 * register_event() callback will be used to add new userspace
	 * waiter for changes related to this event.  Use eventfd_signal()
	 * on eventfd to send notification to userspace.
	 */
148
	int (*register_event)(struct mem_cgroup *memcg,
T
Tejun Heo 已提交
149
			      struct eventfd_ctx *eventfd, const char *args);
150 151 152 153 154
	/*
	 * unregister_event() callback will be called when userspace closes
	 * the eventfd or on cgroup removing.  This callback must be set,
	 * if you want provide notification functionality.
	 */
155
	void (*unregister_event)(struct mem_cgroup *memcg,
156
				 struct eventfd_ctx *eventfd);
157 158 159 160 161 162
	/*
	 * All fields below needed to unregister event when
	 * userspace closes eventfd.
	 */
	poll_table pt;
	wait_queue_head_t *wqh;
163
	wait_queue_entry_t wait;
164 165 166
	struct work_struct remove;
};

167 168
static void mem_cgroup_threshold(struct mem_cgroup *memcg);
static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
169

170 171
/* Stuffs for move charges at task migration. */
/*
172
 * Types of charges to be moved.
173
 */
174 175 176
#define MOVE_ANON	0x1U
#define MOVE_FILE	0x2U
#define MOVE_MASK	(MOVE_ANON | MOVE_FILE)
177

178 179
/* "mc" and its members are protected by cgroup_mutex */
static struct move_charge_struct {
180
	spinlock_t	  lock; /* for from, to */
181
	struct mm_struct  *mm;
182 183
	struct mem_cgroup *from;
	struct mem_cgroup *to;
184
	unsigned long flags;
185
	unsigned long precharge;
186
	unsigned long moved_charge;
187
	unsigned long moved_swap;
188 189 190
	struct task_struct *moving_task;	/* a task moving charges */
	wait_queue_head_t waitq;		/* a waitq for other context */
} mc = {
191
	.lock = __SPIN_LOCK_UNLOCKED(mc.lock),
192 193
	.waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
};
194

195 196 197 198
/*
 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
 * limit reclaim to prevent infinite loops, if they ever occur.
 */
199
#define	MEM_CGROUP_MAX_RECLAIM_LOOPS		100
200
#define	MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS	2
201

202 203
enum charge_type {
	MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
204
	MEM_CGROUP_CHARGE_TYPE_ANON,
K
KAMEZAWA Hiroyuki 已提交
205
	MEM_CGROUP_CHARGE_TYPE_SWAPOUT,	/* for accounting swapcache */
K
KAMEZAWA Hiroyuki 已提交
206
	MEM_CGROUP_CHARGE_TYPE_DROP,	/* a page was unused swap cache */
207 208 209
	NR_CHARGE_TYPE,
};

210
/* for encoding cft->private value on file */
G
Glauber Costa 已提交
211 212 213 214
enum res_type {
	_MEM,
	_MEMSWAP,
	_OOM_TYPE,
215
	_KMEM,
V
Vladimir Davydov 已提交
216
	_TCP,
G
Glauber Costa 已提交
217 218
};

219 220
#define MEMFILE_PRIVATE(x, val)	((x) << 16 | (val))
#define MEMFILE_TYPE(val)	((val) >> 16 & 0xffff)
221
#define MEMFILE_ATTR(val)	((val) & 0xffff)
K
KAMEZAWA Hiroyuki 已提交
222 223
/* Used for OOM nofiier */
#define OOM_CONTROL		(0)
224

225 226 227 228 229 230 231 232 233 234 235 236 237 238 239
/*
 * Iteration constructs for visiting all cgroups (under a tree).  If
 * loops are exited prematurely (break), mem_cgroup_iter_break() must
 * be used for reference counting.
 */
#define for_each_mem_cgroup_tree(iter, root)		\
	for (iter = mem_cgroup_iter(root, NULL, NULL);	\
	     iter != NULL;				\
	     iter = mem_cgroup_iter(root, iter, NULL))

#define for_each_mem_cgroup(iter)			\
	for (iter = mem_cgroup_iter(NULL, NULL, NULL);	\
	     iter != NULL;				\
	     iter = mem_cgroup_iter(NULL, iter, NULL))

240 241 242 243 244 245
static inline bool should_force_charge(void)
{
	return tsk_is_oom_victim(current) || fatal_signal_pending(current) ||
		(current->flags & PF_EXITING);
}

246 247 248 249 250 251 252 253 254 255 256 257 258
/* Some nice accessors for the vmpressure. */
struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
{
	if (!memcg)
		memcg = root_mem_cgroup;
	return &memcg->vmpressure;
}

struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr)
{
	return &container_of(vmpr, struct mem_cgroup, vmpressure)->css;
}

259
#ifdef CONFIG_MEMCG_KMEM
260
/*
261
 * This will be the memcg's index in each cache's ->memcg_params.memcg_caches.
L
Li Zefan 已提交
262 263 264 265 266
 * The main reason for not using cgroup id for this:
 *  this works better in sparse environments, where we have a lot of memcgs,
 *  but only a few kmem-limited. Or also, if we have, for instance, 200
 *  memcgs, and none but the 200th is kmem-limited, we'd have to have a
 *  200 entry array for that.
267
 *
268 269
 * The current size of the caches array is stored in memcg_nr_cache_ids. It
 * will double each time we have to increase it.
270
 */
271 272
static DEFINE_IDA(memcg_cache_ida);
int memcg_nr_cache_ids;
273

274 275 276 277 278 279 280 281 282 283 284 285 286
/* Protects memcg_nr_cache_ids */
static DECLARE_RWSEM(memcg_cache_ids_sem);

void memcg_get_cache_ids(void)
{
	down_read(&memcg_cache_ids_sem);
}

void memcg_put_cache_ids(void)
{
	up_read(&memcg_cache_ids_sem);
}

287 288 289 290 291 292
/*
 * MIN_SIZE is different than 1, because we would like to avoid going through
 * the alloc/free process all the time. In a small machine, 4 kmem-limited
 * cgroups is a reasonable guess. In the future, it could be a parameter or
 * tunable, but that is strictly not necessary.
 *
L
Li Zefan 已提交
293
 * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get
294 295
 * this constant directly from cgroup, but it is understandable that this is
 * better kept as an internal representation in cgroup.c. In any case, the
L
Li Zefan 已提交
296
 * cgrp_id space is not getting any smaller, and we don't have to necessarily
297 298 299
 * increase ours as well if it increases.
 */
#define MEMCG_CACHES_MIN_SIZE 4
L
Li Zefan 已提交
300
#define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX
301

302 303 304 305 306 307
/*
 * A lot of the calls to the cache allocation functions are expected to be
 * inlined by the compiler. Since the calls to memcg_kmem_get_cache are
 * conditional to this static branch, we'll have to allow modules that does
 * kmem_cache_alloc and the such to see this symbol as well
 */
308
DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key);
309
EXPORT_SYMBOL(memcg_kmem_enabled_key);
310

311
struct workqueue_struct *memcg_kmem_cache_wq;
312
#endif
313

314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336
static int memcg_shrinker_map_size;
static DEFINE_MUTEX(memcg_shrinker_map_mutex);

static void memcg_free_shrinker_map_rcu(struct rcu_head *head)
{
	kvfree(container_of(head, struct memcg_shrinker_map, rcu));
}

static int memcg_expand_one_shrinker_map(struct mem_cgroup *memcg,
					 int size, int old_size)
{
	struct memcg_shrinker_map *new, *old;
	int nid;

	lockdep_assert_held(&memcg_shrinker_map_mutex);

	for_each_node(nid) {
		old = rcu_dereference_protected(
			mem_cgroup_nodeinfo(memcg, nid)->shrinker_map, true);
		/* Not yet online memcg */
		if (!old)
			return 0;

337
		new = kvmalloc_node(sizeof(*new) + size, GFP_KERNEL, nid);
338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380
		if (!new)
			return -ENOMEM;

		/* Set all old bits, clear all new bits */
		memset(new->map, (int)0xff, old_size);
		memset((void *)new->map + old_size, 0, size - old_size);

		rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, new);
		call_rcu(&old->rcu, memcg_free_shrinker_map_rcu);
	}

	return 0;
}

static void memcg_free_shrinker_maps(struct mem_cgroup *memcg)
{
	struct mem_cgroup_per_node *pn;
	struct memcg_shrinker_map *map;
	int nid;

	if (mem_cgroup_is_root(memcg))
		return;

	for_each_node(nid) {
		pn = mem_cgroup_nodeinfo(memcg, nid);
		map = rcu_dereference_protected(pn->shrinker_map, true);
		if (map)
			kvfree(map);
		rcu_assign_pointer(pn->shrinker_map, NULL);
	}
}

static int memcg_alloc_shrinker_maps(struct mem_cgroup *memcg)
{
	struct memcg_shrinker_map *map;
	int nid, size, ret = 0;

	if (mem_cgroup_is_root(memcg))
		return 0;

	mutex_lock(&memcg_shrinker_map_mutex);
	size = memcg_shrinker_map_size;
	for_each_node(nid) {
381
		map = kvzalloc_node(sizeof(*map) + size, GFP_KERNEL, nid);
382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411
		if (!map) {
			memcg_free_shrinker_maps(memcg);
			ret = -ENOMEM;
			break;
		}
		rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, map);
	}
	mutex_unlock(&memcg_shrinker_map_mutex);

	return ret;
}

int memcg_expand_shrinker_maps(int new_id)
{
	int size, old_size, ret = 0;
	struct mem_cgroup *memcg;

	size = DIV_ROUND_UP(new_id + 1, BITS_PER_LONG) * sizeof(unsigned long);
	old_size = memcg_shrinker_map_size;
	if (size <= old_size)
		return 0;

	mutex_lock(&memcg_shrinker_map_mutex);
	if (!root_mem_cgroup)
		goto unlock;

	for_each_mem_cgroup(memcg) {
		if (mem_cgroup_is_root(memcg))
			continue;
		ret = memcg_expand_one_shrinker_map(memcg, size, old_size);
412 413
		if (ret) {
			mem_cgroup_iter_break(NULL, memcg);
414
			goto unlock;
415
		}
416 417 418 419 420 421 422
	}
unlock:
	if (!ret)
		memcg_shrinker_map_size = size;
	mutex_unlock(&memcg_shrinker_map_mutex);
	return ret;
}
423 424 425 426 427 428 429 430

void memcg_set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id)
{
	if (shrinker_id >= 0 && memcg && !mem_cgroup_is_root(memcg)) {
		struct memcg_shrinker_map *map;

		rcu_read_lock();
		map = rcu_dereference(memcg->nodeinfo[nid]->shrinker_map);
431 432
		/* Pairs with smp mb in shrink_slab() */
		smp_mb__before_atomic();
433 434 435 436 437
		set_bit(shrinker_id, map->map);
		rcu_read_unlock();
	}
}

438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454
/**
 * mem_cgroup_css_from_page - css of the memcg associated with a page
 * @page: page of interest
 *
 * If memcg is bound to the default hierarchy, css of the memcg associated
 * with @page is returned.  The returned css remains associated with @page
 * until it is released.
 *
 * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup
 * is returned.
 */
struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page)
{
	struct mem_cgroup *memcg;

	memcg = page->mem_cgroup;

455
	if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
456 457 458 459 460
		memcg = root_mem_cgroup;

	return &memcg->css;
}

461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479
/**
 * page_cgroup_ino - return inode number of the memcg a page is charged to
 * @page: the page
 *
 * Look up the closest online ancestor of the memory cgroup @page is charged to
 * and return its inode number or 0 if @page is not charged to any cgroup. It
 * is safe to call this function without holding a reference to @page.
 *
 * Note, this function is inherently racy, because there is nothing to prevent
 * the cgroup inode from getting torn down and potentially reallocated a moment
 * after page_cgroup_ino() returns, so it only should be used by callers that
 * do not care (such as procfs interfaces).
 */
ino_t page_cgroup_ino(struct page *page)
{
	struct mem_cgroup *memcg;
	unsigned long ino = 0;

	rcu_read_lock();
480
	if (PageSlab(page) && !PageTail(page))
481 482 483
		memcg = memcg_from_slab_page(page);
	else
		memcg = READ_ONCE(page->mem_cgroup);
484 485 486 487 488 489 490 491
	while (memcg && !(memcg->css.flags & CSS_ONLINE))
		memcg = parent_mem_cgroup(memcg);
	if (memcg)
		ino = cgroup_ino(memcg->css.cgroup);
	rcu_read_unlock();
	return ino;
}

492 493
static struct mem_cgroup_per_node *
mem_cgroup_page_nodeinfo(struct mem_cgroup *memcg, struct page *page)
494
{
495
	int nid = page_to_nid(page);
496

497
	return memcg->nodeinfo[nid];
498 499
}

500 501
static struct mem_cgroup_tree_per_node *
soft_limit_tree_node(int nid)
502
{
503
	return soft_limit_tree.rb_tree_per_node[nid];
504 505
}

506
static struct mem_cgroup_tree_per_node *
507 508 509 510
soft_limit_tree_from_page(struct page *page)
{
	int nid = page_to_nid(page);

511
	return soft_limit_tree.rb_tree_per_node[nid];
512 513
}

514 515
static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_node *mz,
					 struct mem_cgroup_tree_per_node *mctz,
516
					 unsigned long new_usage_in_excess)
517 518 519
{
	struct rb_node **p = &mctz->rb_root.rb_node;
	struct rb_node *parent = NULL;
520
	struct mem_cgroup_per_node *mz_node;
521
	bool rightmost = true;
522 523 524 525 526 527 528 529 530

	if (mz->on_tree)
		return;

	mz->usage_in_excess = new_usage_in_excess;
	if (!mz->usage_in_excess)
		return;
	while (*p) {
		parent = *p;
531
		mz_node = rb_entry(parent, struct mem_cgroup_per_node,
532
					tree_node);
533
		if (mz->usage_in_excess < mz_node->usage_in_excess) {
534
			p = &(*p)->rb_left;
535 536 537
			rightmost = false;
		}

538 539 540 541 542 543 544
		/*
		 * We can't avoid mem cgroups that are over their soft
		 * limit by the same amount
		 */
		else if (mz->usage_in_excess >= mz_node->usage_in_excess)
			p = &(*p)->rb_right;
	}
545 546 547 548

	if (rightmost)
		mctz->rb_rightmost = &mz->tree_node;

549 550 551 552 553
	rb_link_node(&mz->tree_node, parent, p);
	rb_insert_color(&mz->tree_node, &mctz->rb_root);
	mz->on_tree = true;
}

554 555
static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
					 struct mem_cgroup_tree_per_node *mctz)
556 557 558
{
	if (!mz->on_tree)
		return;
559 560 561 562

	if (&mz->tree_node == mctz->rb_rightmost)
		mctz->rb_rightmost = rb_prev(&mz->tree_node);

563 564 565 566
	rb_erase(&mz->tree_node, &mctz->rb_root);
	mz->on_tree = false;
}

567 568
static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
				       struct mem_cgroup_tree_per_node *mctz)
569
{
570 571 572
	unsigned long flags;

	spin_lock_irqsave(&mctz->lock, flags);
573
	__mem_cgroup_remove_exceeded(mz, mctz);
574
	spin_unlock_irqrestore(&mctz->lock, flags);
575 576
}

577 578 579
static unsigned long soft_limit_excess(struct mem_cgroup *memcg)
{
	unsigned long nr_pages = page_counter_read(&memcg->memory);
580
	unsigned long soft_limit = READ_ONCE(memcg->soft_limit);
581 582 583 584 585 586 587
	unsigned long excess = 0;

	if (nr_pages > soft_limit)
		excess = nr_pages - soft_limit;

	return excess;
}
588 589 590

static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
{
591
	unsigned long excess;
592 593
	struct mem_cgroup_per_node *mz;
	struct mem_cgroup_tree_per_node *mctz;
594

595
	mctz = soft_limit_tree_from_page(page);
596 597
	if (!mctz)
		return;
598 599 600 601 602
	/*
	 * Necessary to update all ancestors when hierarchy is used.
	 * because their event counter is not touched.
	 */
	for (; memcg; memcg = parent_mem_cgroup(memcg)) {
603
		mz = mem_cgroup_page_nodeinfo(memcg, page);
604
		excess = soft_limit_excess(memcg);
605 606 607 608 609
		/*
		 * We have to update the tree if mz is on RB-tree or
		 * mem is over its softlimit.
		 */
		if (excess || mz->on_tree) {
610 611 612
			unsigned long flags;

			spin_lock_irqsave(&mctz->lock, flags);
613 614
			/* if on-tree, remove it */
			if (mz->on_tree)
615
				__mem_cgroup_remove_exceeded(mz, mctz);
616 617 618 619
			/*
			 * Insert again. mz->usage_in_excess will be updated.
			 * If excess is 0, no tree ops.
			 */
620
			__mem_cgroup_insert_exceeded(mz, mctz, excess);
621
			spin_unlock_irqrestore(&mctz->lock, flags);
622 623 624 625 626 627
		}
	}
}

static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
{
628 629 630
	struct mem_cgroup_tree_per_node *mctz;
	struct mem_cgroup_per_node *mz;
	int nid;
631

632
	for_each_node(nid) {
633 634
		mz = mem_cgroup_nodeinfo(memcg, nid);
		mctz = soft_limit_tree_node(nid);
635 636
		if (mctz)
			mem_cgroup_remove_exceeded(mz, mctz);
637 638 639
	}
}

640 641
static struct mem_cgroup_per_node *
__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
642
{
643
	struct mem_cgroup_per_node *mz;
644 645 646

retry:
	mz = NULL;
647
	if (!mctz->rb_rightmost)
648 649
		goto done;		/* Nothing to reclaim from */

650 651
	mz = rb_entry(mctz->rb_rightmost,
		      struct mem_cgroup_per_node, tree_node);
652 653 654 655 656
	/*
	 * Remove the node now but someone else can add it back,
	 * we will to add it back at the end of reclaim to its correct
	 * position in the tree.
	 */
657
	__mem_cgroup_remove_exceeded(mz, mctz);
658
	if (!soft_limit_excess(mz->memcg) ||
S
Shakeel Butt 已提交
659
	    !css_tryget(&mz->memcg->css))
660 661 662 663 664
		goto retry;
done:
	return mz;
}

665 666
static struct mem_cgroup_per_node *
mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
667
{
668
	struct mem_cgroup_per_node *mz;
669

670
	spin_lock_irq(&mctz->lock);
671
	mz = __mem_cgroup_largest_soft_limit_node(mctz);
672
	spin_unlock_irq(&mctz->lock);
673 674 675
	return mz;
}

676 677 678 679 680 681 682 683 684 685 686 687 688 689 690
/**
 * __mod_memcg_state - update cgroup memory statistics
 * @memcg: the memory cgroup
 * @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item
 * @val: delta to add to the counter, can be negative
 */
void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val)
{
	long x;

	if (mem_cgroup_disabled())
		return;

	x = val + __this_cpu_read(memcg->vmstats_percpu->stat[idx]);
	if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) {
691 692
		struct mem_cgroup *mi;

693 694 695 696 697
		/*
		 * Batch local counters to keep them in sync with
		 * the hierarchical ones.
		 */
		__this_cpu_add(memcg->vmstats_local->stat[idx], x);
698 699
		for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
			atomic_long_add(x, &mi->vmstats[idx]);
700 701 702 703 704
		x = 0;
	}
	__this_cpu_write(memcg->vmstats_percpu->stat[idx], x);
}

705 706 707 708 709 710 711 712 713 714 715
static struct mem_cgroup_per_node *
parent_nodeinfo(struct mem_cgroup_per_node *pn, int nid)
{
	struct mem_cgroup *parent;

	parent = parent_mem_cgroup(pn->memcg);
	if (!parent)
		return NULL;
	return mem_cgroup_nodeinfo(parent, nid);
}

716 717 718 719 720 721 722 723 724 725 726 727 728
/**
 * __mod_lruvec_state - update lruvec memory statistics
 * @lruvec: the lruvec
 * @idx: the stat item
 * @val: delta to add to the counter, can be negative
 *
 * The lruvec is the intersection of the NUMA node and a cgroup. This
 * function updates the all three counters that are affected by a
 * change of state at this level: per-node, per-cgroup, per-lruvec.
 */
void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
			int val)
{
729
	pg_data_t *pgdat = lruvec_pgdat(lruvec);
730
	struct mem_cgroup_per_node *pn;
731
	struct mem_cgroup *memcg;
732 733 734
	long x;

	/* Update node */
735
	__mod_node_page_state(pgdat, idx, val);
736 737 738 739 740

	if (mem_cgroup_disabled())
		return;

	pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
741
	memcg = pn->memcg;
742 743

	/* Update memcg */
744
	__mod_memcg_state(memcg, idx, val);
745

746 747 748
	/* Update lruvec */
	__this_cpu_add(pn->lruvec_stat_local->count[idx], val);

749 750
	x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]);
	if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) {
751 752 753 754
		struct mem_cgroup_per_node *pi;

		for (pi = pn; pi; pi = parent_nodeinfo(pi, pgdat->node_id))
			atomic_long_add(x, &pi->lruvec_stat[idx]);
755 756 757 758 759
		x = 0;
	}
	__this_cpu_write(pn->lruvec_stat_cpu->count[idx], x);
}

760 761
void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val)
{
762
	pg_data_t *pgdat = page_pgdat(virt_to_page(p));
763 764 765 766
	struct mem_cgroup *memcg;
	struct lruvec *lruvec;

	rcu_read_lock();
767
	memcg = mem_cgroup_from_obj(p);
768 769 770 771 772

	/* Untracked pages have no memcg, no lruvec. Update only the node */
	if (!memcg || memcg == root_mem_cgroup) {
		__mod_node_page_state(pgdat, idx, val);
	} else {
773
		lruvec = mem_cgroup_lruvec(memcg, pgdat);
774 775 776 777 778
		__mod_lruvec_state(lruvec, idx, val);
	}
	rcu_read_unlock();
}

779 780 781 782 783 784 785 786 787 788 789
void mod_memcg_obj_state(void *p, int idx, int val)
{
	struct mem_cgroup *memcg;

	rcu_read_lock();
	memcg = mem_cgroup_from_obj(p);
	if (memcg)
		mod_memcg_state(memcg, idx, val);
	rcu_read_unlock();
}

790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805
/**
 * __count_memcg_events - account VM events in a cgroup
 * @memcg: the memory cgroup
 * @idx: the event item
 * @count: the number of events that occured
 */
void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
			  unsigned long count)
{
	unsigned long x;

	if (mem_cgroup_disabled())
		return;

	x = count + __this_cpu_read(memcg->vmstats_percpu->events[idx]);
	if (unlikely(x > MEMCG_CHARGE_BATCH)) {
806 807
		struct mem_cgroup *mi;

808 809 810 811 812
		/*
		 * Batch local counters to keep them in sync with
		 * the hierarchical ones.
		 */
		__this_cpu_add(memcg->vmstats_local->events[idx], x);
813 814
		for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
			atomic_long_add(x, &mi->vmevents[idx]);
815 816 817 818 819
		x = 0;
	}
	__this_cpu_write(memcg->vmstats_percpu->events[idx], x);
}

820
static unsigned long memcg_events(struct mem_cgroup *memcg, int event)
821
{
822
	return atomic_long_read(&memcg->vmevents[event]);
823 824
}

825 826
static unsigned long memcg_events_local(struct mem_cgroup *memcg, int event)
{
827 828 829 830 831 832
	long x = 0;
	int cpu;

	for_each_possible_cpu(cpu)
		x += per_cpu(memcg->vmstats_local->events[event], cpu);
	return x;
833 834
}

835
static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
836
					 struct page *page,
837
					 int nr_pages)
838
{
839 840
	/* pagein of a big page is an event. So, ignore page size */
	if (nr_pages > 0)
841
		__count_memcg_events(memcg, PGPGIN, 1);
842
	else {
843
		__count_memcg_events(memcg, PGPGOUT, 1);
844 845
		nr_pages = -nr_pages; /* for event */
	}
846

847
	__this_cpu_add(memcg->vmstats_percpu->nr_page_events, nr_pages);
848 849
}

850 851
static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
				       enum mem_cgroup_events_target target)
852 853 854
{
	unsigned long val, next;

855 856
	val = __this_cpu_read(memcg->vmstats_percpu->nr_page_events);
	next = __this_cpu_read(memcg->vmstats_percpu->targets[target]);
857
	/* from time_after() in jiffies.h */
858
	if ((long)(next - val) < 0) {
859 860 861 862
		switch (target) {
		case MEM_CGROUP_TARGET_THRESH:
			next = val + THRESHOLDS_EVENTS_TARGET;
			break;
863 864 865
		case MEM_CGROUP_TARGET_SOFTLIMIT:
			next = val + SOFTLIMIT_EVENTS_TARGET;
			break;
866 867 868
		default:
			break;
		}
869
		__this_cpu_write(memcg->vmstats_percpu->targets[target], next);
870
		return true;
871
	}
872
	return false;
873 874 875 876 877 878
}

/*
 * Check events in order.
 *
 */
879
static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
880 881
{
	/* threshold event is triggered in finer grain than soft limit */
882 883
	if (unlikely(mem_cgroup_event_ratelimit(memcg,
						MEM_CGROUP_TARGET_THRESH))) {
884
		bool do_softlimit;
885

886 887
		do_softlimit = mem_cgroup_event_ratelimit(memcg,
						MEM_CGROUP_TARGET_SOFTLIMIT);
888
		mem_cgroup_threshold(memcg);
889 890
		if (unlikely(do_softlimit))
			mem_cgroup_update_tree(memcg, page);
891
	}
892 893
}

894
struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
895
{
896 897 898 899 900 901 902 903
	/*
	 * mm_update_next_owner() may clear mm->owner to NULL
	 * if it races with swapoff, page migration, etc.
	 * So this can be called with p == NULL.
	 */
	if (unlikely(!p))
		return NULL;

904
	return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
905
}
M
Michal Hocko 已提交
906
EXPORT_SYMBOL(mem_cgroup_from_task);
907

908 909 910 911 912 913 914 915 916
/**
 * get_mem_cgroup_from_mm: Obtain a reference on given mm_struct's memcg.
 * @mm: mm from which memcg should be extracted. It can be NULL.
 *
 * Obtain a reference on mm->memcg and returns it if successful. Otherwise
 * root_mem_cgroup is returned. However if mem_cgroup is disabled, NULL is
 * returned.
 */
struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
917
{
918 919 920 921
	struct mem_cgroup *memcg;

	if (mem_cgroup_disabled())
		return NULL;
922

923 924
	rcu_read_lock();
	do {
925 926 927 928 929 930
		/*
		 * Page cache insertions can happen withou an
		 * actual mm context, e.g. during disk probing
		 * on boot, loopback IO, acct() writes etc.
		 */
		if (unlikely(!mm))
931
			memcg = root_mem_cgroup;
932 933 934 935 936
		else {
			memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
			if (unlikely(!memcg))
				memcg = root_mem_cgroup;
		}
937
	} while (!css_tryget(&memcg->css));
938
	rcu_read_unlock();
939
	return memcg;
940
}
941 942
EXPORT_SYMBOL(get_mem_cgroup_from_mm);

943 944 945 946 947 948 949 950 951 952 953 954 955 956 957
/**
 * get_mem_cgroup_from_page: Obtain a reference on given page's memcg.
 * @page: page from which memcg should be extracted.
 *
 * Obtain a reference on page->memcg and returns it if successful. Otherwise
 * root_mem_cgroup is returned.
 */
struct mem_cgroup *get_mem_cgroup_from_page(struct page *page)
{
	struct mem_cgroup *memcg = page->mem_cgroup;

	if (mem_cgroup_disabled())
		return NULL;

	rcu_read_lock();
S
Shakeel Butt 已提交
958 959
	/* Page should not get uncharged and freed memcg under us. */
	if (!memcg || WARN_ON_ONCE(!css_tryget(&memcg->css)))
960 961 962 963 964 965
		memcg = root_mem_cgroup;
	rcu_read_unlock();
	return memcg;
}
EXPORT_SYMBOL(get_mem_cgroup_from_page);

966 967 968 969 970 971
/**
 * If current->active_memcg is non-NULL, do not fallback to current->mm->memcg.
 */
static __always_inline struct mem_cgroup *get_mem_cgroup_from_current(void)
{
	if (unlikely(current->active_memcg)) {
S
Shakeel Butt 已提交
972
		struct mem_cgroup *memcg;
973 974

		rcu_read_lock();
S
Shakeel Butt 已提交
975 976 977 978
		/* current->active_memcg must hold a ref. */
		if (WARN_ON_ONCE(!css_tryget(&current->active_memcg->css)))
			memcg = root_mem_cgroup;
		else
979 980 981 982 983 984
			memcg = current->active_memcg;
		rcu_read_unlock();
		return memcg;
	}
	return get_mem_cgroup_from_mm(current->mm);
}
985

986 987 988 989 990 991 992 993 994 995 996 997 998
/**
 * mem_cgroup_iter - iterate over memory cgroup hierarchy
 * @root: hierarchy root
 * @prev: previously returned memcg, NULL on first invocation
 * @reclaim: cookie for shared reclaim walks, NULL for full walks
 *
 * Returns references to children of the hierarchy below @root, or
 * @root itself, or %NULL after a full round-trip.
 *
 * Caller must pass the return value in @prev on subsequent
 * invocations for reference counting, or use mem_cgroup_iter_break()
 * to cancel a hierarchy walk before the round-trip is complete.
 *
999
 * Reclaimers can specify a node and a priority level in @reclaim to
1000
 * divide up the memcgs in the hierarchy among all concurrent
1001
 * reclaimers operating on the same node and priority.
1002
 */
1003
struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
1004
				   struct mem_cgroup *prev,
1005
				   struct mem_cgroup_reclaim_cookie *reclaim)
K
KAMEZAWA Hiroyuki 已提交
1006
{
M
Michal Hocko 已提交
1007
	struct mem_cgroup_reclaim_iter *uninitialized_var(iter);
1008
	struct cgroup_subsys_state *css = NULL;
1009
	struct mem_cgroup *memcg = NULL;
1010
	struct mem_cgroup *pos = NULL;
1011

1012 1013
	if (mem_cgroup_disabled())
		return NULL;
1014

1015 1016
	if (!root)
		root = root_mem_cgroup;
K
KAMEZAWA Hiroyuki 已提交
1017

1018
	if (prev && !reclaim)
1019
		pos = prev;
K
KAMEZAWA Hiroyuki 已提交
1020

1021 1022
	if (!root->use_hierarchy && root != root_mem_cgroup) {
		if (prev)
1023
			goto out;
1024
		return root;
1025
	}
K
KAMEZAWA Hiroyuki 已提交
1026

1027
	rcu_read_lock();
M
Michal Hocko 已提交
1028

1029
	if (reclaim) {
1030
		struct mem_cgroup_per_node *mz;
1031

1032
		mz = mem_cgroup_nodeinfo(root, reclaim->pgdat->node_id);
1033
		iter = &mz->iter;
1034 1035 1036 1037

		if (prev && reclaim->generation != iter->generation)
			goto out_unlock;

1038
		while (1) {
1039
			pos = READ_ONCE(iter->position);
1040 1041
			if (!pos || css_tryget(&pos->css))
				break;
1042
			/*
1043 1044 1045 1046 1047 1048
			 * css reference reached zero, so iter->position will
			 * be cleared by ->css_released. However, we should not
			 * rely on this happening soon, because ->css_released
			 * is called from a work queue, and by busy-waiting we
			 * might block it. So we clear iter->position right
			 * away.
1049
			 */
1050 1051
			(void)cmpxchg(&iter->position, pos, NULL);
		}
1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068
	}

	if (pos)
		css = &pos->css;

	for (;;) {
		css = css_next_descendant_pre(css, &root->css);
		if (!css) {
			/*
			 * Reclaimers share the hierarchy walk, and a
			 * new one might jump in right at the end of
			 * the hierarchy - make sure they see at least
			 * one group and restart from the beginning.
			 */
			if (!prev)
				continue;
			break;
1069
		}
K
KAMEZAWA Hiroyuki 已提交
1070

1071 1072 1073 1074 1075 1076
		/*
		 * Verify the css and acquire a reference.  The root
		 * is provided by the caller, so we know it's alive
		 * and kicking, and don't take an extra reference.
		 */
		memcg = mem_cgroup_from_css(css);
K
KAMEZAWA Hiroyuki 已提交
1077

1078 1079
		if (css == &root->css)
			break;
K
KAMEZAWA Hiroyuki 已提交
1080

1081 1082
		if (css_tryget(css))
			break;
1083

1084
		memcg = NULL;
1085
	}
1086 1087 1088

	if (reclaim) {
		/*
1089 1090 1091
		 * The position could have already been updated by a competing
		 * thread, so check that the value hasn't changed since we read
		 * it to avoid reclaiming from the same cgroup twice.
1092
		 */
1093 1094
		(void)cmpxchg(&iter->position, pos, memcg);

1095 1096 1097 1098 1099 1100 1101
		if (pos)
			css_put(&pos->css);

		if (!memcg)
			iter->generation++;
		else if (!prev)
			reclaim->generation = iter->generation;
1102
	}
1103

1104 1105
out_unlock:
	rcu_read_unlock();
1106
out:
1107 1108 1109
	if (prev && prev != root)
		css_put(&prev->css);

1110
	return memcg;
K
KAMEZAWA Hiroyuki 已提交
1111
}
K
KAMEZAWA Hiroyuki 已提交
1112

1113 1114 1115 1116 1117 1118 1119
/**
 * mem_cgroup_iter_break - abort a hierarchy walk prematurely
 * @root: hierarchy root
 * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
 */
void mem_cgroup_iter_break(struct mem_cgroup *root,
			   struct mem_cgroup *prev)
1120 1121 1122 1123 1124 1125
{
	if (!root)
		root = root_mem_cgroup;
	if (prev && prev != root)
		css_put(&prev->css);
}
K
KAMEZAWA Hiroyuki 已提交
1126

1127 1128
static void __invalidate_reclaim_iterators(struct mem_cgroup *from,
					struct mem_cgroup *dead_memcg)
1129 1130
{
	struct mem_cgroup_reclaim_iter *iter;
1131 1132
	struct mem_cgroup_per_node *mz;
	int nid;
1133

1134 1135
	for_each_node(nid) {
		mz = mem_cgroup_nodeinfo(from, nid);
1136 1137
		iter = &mz->iter;
		cmpxchg(&iter->position, dead_memcg, NULL);
1138 1139 1140
	}
}

1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161
static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
{
	struct mem_cgroup *memcg = dead_memcg;
	struct mem_cgroup *last;

	do {
		__invalidate_reclaim_iterators(memcg, dead_memcg);
		last = memcg;
	} while ((memcg = parent_mem_cgroup(memcg)));

	/*
	 * When cgruop1 non-hierarchy mode is used,
	 * parent_mem_cgroup() does not walk all the way up to the
	 * cgroup root (root_mem_cgroup). So we have to handle
	 * dead_memcg from cgroup root separately.
	 */
	if (last != root_mem_cgroup)
		__invalidate_reclaim_iterators(root_mem_cgroup,
						dead_memcg);
}

1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186
/**
 * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
 * @memcg: hierarchy root
 * @fn: function to call for each task
 * @arg: argument passed to @fn
 *
 * This function iterates over tasks attached to @memcg or to any of its
 * descendants and calls @fn for each task. If @fn returns a non-zero
 * value, the function breaks the iteration loop and returns the value.
 * Otherwise, it will iterate over all tasks and return 0.
 *
 * This function must not be called for the root memory cgroup.
 */
int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
			  int (*fn)(struct task_struct *, void *), void *arg)
{
	struct mem_cgroup *iter;
	int ret = 0;

	BUG_ON(memcg == root_mem_cgroup);

	for_each_mem_cgroup_tree(iter, memcg) {
		struct css_task_iter it;
		struct task_struct *task;

1187
		css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it);
1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198
		while (!ret && (task = css_task_iter_next(&it)))
			ret = fn(task, arg);
		css_task_iter_end(&it);
		if (ret) {
			mem_cgroup_iter_break(memcg, iter);
			break;
		}
	}
	return ret;
}

1199
/**
1200
 * mem_cgroup_page_lruvec - return lruvec for isolating/putting an LRU page
1201
 * @page: the page
1202
 * @pgdat: pgdat of the page
1203 1204 1205 1206
 *
 * This function is only safe when following the LRU page isolation
 * and putback protocol: the LRU lock must be held, and the page must
 * either be PageLRU() or the caller must have isolated/allocated it.
1207
 */
M
Mel Gorman 已提交
1208
struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct pglist_data *pgdat)
K
KAMEZAWA Hiroyuki 已提交
1209
{
1210
	struct mem_cgroup_per_node *mz;
1211
	struct mem_cgroup *memcg;
1212
	struct lruvec *lruvec;
1213

1214
	if (mem_cgroup_disabled()) {
1215
		lruvec = &pgdat->__lruvec;
1216 1217
		goto out;
	}
1218

1219
	memcg = page->mem_cgroup;
1220
	/*
1221
	 * Swapcache readahead pages are added to the LRU - and
1222
	 * possibly migrated - before they are charged.
1223
	 */
1224 1225
	if (!memcg)
		memcg = root_mem_cgroup;
1226

1227
	mz = mem_cgroup_page_nodeinfo(memcg, page);
1228 1229 1230 1231 1232 1233 1234
	lruvec = &mz->lruvec;
out:
	/*
	 * Since a node can be onlined after the mem_cgroup was created,
	 * we have to be prepared to initialize lruvec->zone here;
	 * and if offlined then reonlined, we need to reinitialize it.
	 */
M
Mel Gorman 已提交
1235 1236
	if (unlikely(lruvec->pgdat != pgdat))
		lruvec->pgdat = pgdat;
1237
	return lruvec;
K
KAMEZAWA Hiroyuki 已提交
1238
}
1239

1240
/**
1241 1242 1243
 * mem_cgroup_update_lru_size - account for adding or removing an lru page
 * @lruvec: mem_cgroup per zone lru vector
 * @lru: index of lru list the page is sitting on
1244
 * @zid: zone id of the accounted pages
1245
 * @nr_pages: positive when adding or negative when removing
1246
 *
1247 1248 1249
 * This function must be called under lru_lock, just before a page is added
 * to or just after a page is removed from an lru list (that ordering being
 * so as to allow it to check that lru_size 0 is consistent with list_empty).
1250
 */
1251
void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
1252
				int zid, int nr_pages)
1253
{
1254
	struct mem_cgroup_per_node *mz;
1255
	unsigned long *lru_size;
1256
	long size;
1257 1258 1259 1260

	if (mem_cgroup_disabled())
		return;

1261
	mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
1262
	lru_size = &mz->lru_zone_size[zid][lru];
1263 1264 1265 1266 1267

	if (nr_pages < 0)
		*lru_size += nr_pages;

	size = *lru_size;
1268 1269 1270
	if (WARN_ONCE(size < 0,
		"%s(%p, %d, %d): lru_size %ld\n",
		__func__, lruvec, lru, nr_pages, size)) {
1271 1272 1273 1274 1275 1276
		VM_BUG_ON(1);
		*lru_size = 0;
	}

	if (nr_pages > 0)
		*lru_size += nr_pages;
K
KAMEZAWA Hiroyuki 已提交
1277
}
1278

1279
/**
1280
 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
W
Wanpeng Li 已提交
1281
 * @memcg: the memory cgroup
1282
 *
1283
 * Returns the maximum amount of memory @mem can be charged with, in
1284
 * pages.
1285
 */
1286
static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1287
{
1288 1289 1290
	unsigned long margin = 0;
	unsigned long count;
	unsigned long limit;
1291

1292
	count = page_counter_read(&memcg->memory);
1293
	limit = READ_ONCE(memcg->memory.max);
1294 1295 1296
	if (count < limit)
		margin = limit - count;

1297
	if (do_memsw_account()) {
1298
		count = page_counter_read(&memcg->memsw);
1299
		limit = READ_ONCE(memcg->memsw.max);
1300
		if (count < limit)
1301
			margin = min(margin, limit - count);
1302 1303
		else
			margin = 0;
1304 1305 1306
	}

	return margin;
1307 1308
}

1309
/*
Q
Qiang Huang 已提交
1310
 * A routine for checking "mem" is under move_account() or not.
1311
 *
Q
Qiang Huang 已提交
1312 1313 1314
 * Checking a cgroup is mc.from or mc.to or under hierarchy of
 * moving cgroups. This is for waiting at high-memory pressure
 * caused by "move".
1315
 */
1316
static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
1317
{
1318 1319
	struct mem_cgroup *from;
	struct mem_cgroup *to;
1320
	bool ret = false;
1321 1322 1323 1324 1325 1326 1327 1328 1329
	/*
	 * Unlike task_move routines, we access mc.to, mc.from not under
	 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
	 */
	spin_lock(&mc.lock);
	from = mc.from;
	to = mc.to;
	if (!from)
		goto unlock;
1330

1331 1332
	ret = mem_cgroup_is_descendant(from, memcg) ||
		mem_cgroup_is_descendant(to, memcg);
1333 1334
unlock:
	spin_unlock(&mc.lock);
1335 1336 1337
	return ret;
}

1338
static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
1339 1340
{
	if (mc.moving_task && current != mc.moving_task) {
1341
		if (mem_cgroup_under_move(memcg)) {
1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353
			DEFINE_WAIT(wait);
			prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
			/* moving charge context might have finished. */
			if (mc.moving_task)
				schedule();
			finish_wait(&mc.waitq, &wait);
			return true;
		}
	}
	return false;
}

1354 1355 1356 1357
static char *memory_stat_format(struct mem_cgroup *memcg)
{
	struct seq_buf s;
	int i;
1358

1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374
	seq_buf_init(&s, kmalloc(PAGE_SIZE, GFP_KERNEL), PAGE_SIZE);
	if (!s.buffer)
		return NULL;

	/*
	 * Provide statistics on the state of the memory subsystem as
	 * well as cumulative event counters that show past behavior.
	 *
	 * This list is ordered following a combination of these gradients:
	 * 1) generic big picture -> specifics and details
	 * 2) reflecting userspace activity -> reflecting kernel heuristics
	 *
	 * Current memory state:
	 */

	seq_buf_printf(&s, "anon %llu\n",
1375
		       (u64)memcg_page_state(memcg, NR_ANON_MAPPED) *
1376 1377
		       PAGE_SIZE);
	seq_buf_printf(&s, "file %llu\n",
1378
		       (u64)memcg_page_state(memcg, NR_FILE_PAGES) *
1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403
		       PAGE_SIZE);
	seq_buf_printf(&s, "kernel_stack %llu\n",
		       (u64)memcg_page_state(memcg, MEMCG_KERNEL_STACK_KB) *
		       1024);
	seq_buf_printf(&s, "slab %llu\n",
		       (u64)(memcg_page_state(memcg, NR_SLAB_RECLAIMABLE) +
			     memcg_page_state(memcg, NR_SLAB_UNRECLAIMABLE)) *
		       PAGE_SIZE);
	seq_buf_printf(&s, "sock %llu\n",
		       (u64)memcg_page_state(memcg, MEMCG_SOCK) *
		       PAGE_SIZE);

	seq_buf_printf(&s, "shmem %llu\n",
		       (u64)memcg_page_state(memcg, NR_SHMEM) *
		       PAGE_SIZE);
	seq_buf_printf(&s, "file_mapped %llu\n",
		       (u64)memcg_page_state(memcg, NR_FILE_MAPPED) *
		       PAGE_SIZE);
	seq_buf_printf(&s, "file_dirty %llu\n",
		       (u64)memcg_page_state(memcg, NR_FILE_DIRTY) *
		       PAGE_SIZE);
	seq_buf_printf(&s, "file_writeback %llu\n",
		       (u64)memcg_page_state(memcg, NR_WRITEBACK) *
		       PAGE_SIZE);

1404
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1405
	seq_buf_printf(&s, "anon_thp %llu\n",
1406 1407 1408
		       (u64)memcg_page_state(memcg, NR_ANON_THPS) *
		       HPAGE_PMD_SIZE);
#endif
1409 1410

	for (i = 0; i < NR_LRU_LISTS; i++)
1411
		seq_buf_printf(&s, "%s %llu\n", lru_list_name(i),
1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423
			       (u64)memcg_page_state(memcg, NR_LRU_BASE + i) *
			       PAGE_SIZE);

	seq_buf_printf(&s, "slab_reclaimable %llu\n",
		       (u64)memcg_page_state(memcg, NR_SLAB_RECLAIMABLE) *
		       PAGE_SIZE);
	seq_buf_printf(&s, "slab_unreclaimable %llu\n",
		       (u64)memcg_page_state(memcg, NR_SLAB_UNRECLAIMABLE) *
		       PAGE_SIZE);

	/* Accumulated memory events */

1424 1425 1426 1427
	seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGFAULT),
		       memcg_events(memcg, PGFAULT));
	seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGMAJFAULT),
		       memcg_events(memcg, PGMAJFAULT));
1428 1429 1430 1431 1432

	seq_buf_printf(&s, "workingset_refault %lu\n",
		       memcg_page_state(memcg, WORKINGSET_REFAULT));
	seq_buf_printf(&s, "workingset_activate %lu\n",
		       memcg_page_state(memcg, WORKINGSET_ACTIVATE));
1433 1434
	seq_buf_printf(&s, "workingset_restore %lu\n",
		       memcg_page_state(memcg, WORKINGSET_RESTORE));
1435 1436 1437
	seq_buf_printf(&s, "workingset_nodereclaim %lu\n",
		       memcg_page_state(memcg, WORKINGSET_NODERECLAIM));

1438 1439
	seq_buf_printf(&s, "%s %lu\n",  vm_event_name(PGREFILL),
		       memcg_events(memcg, PGREFILL));
1440 1441 1442 1443 1444 1445
	seq_buf_printf(&s, "pgscan %lu\n",
		       memcg_events(memcg, PGSCAN_KSWAPD) +
		       memcg_events(memcg, PGSCAN_DIRECT));
	seq_buf_printf(&s, "pgsteal %lu\n",
		       memcg_events(memcg, PGSTEAL_KSWAPD) +
		       memcg_events(memcg, PGSTEAL_DIRECT));
1446 1447 1448 1449 1450 1451 1452 1453
	seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGACTIVATE),
		       memcg_events(memcg, PGACTIVATE));
	seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGDEACTIVATE),
		       memcg_events(memcg, PGDEACTIVATE));
	seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGLAZYFREE),
		       memcg_events(memcg, PGLAZYFREE));
	seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGLAZYFREED),
		       memcg_events(memcg, PGLAZYFREED));
1454 1455

#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1456
	seq_buf_printf(&s, "%s %lu\n", vm_event_name(THP_FAULT_ALLOC),
1457
		       memcg_events(memcg, THP_FAULT_ALLOC));
1458
	seq_buf_printf(&s, "%s %lu\n", vm_event_name(THP_COLLAPSE_ALLOC),
1459 1460 1461 1462 1463 1464 1465 1466
		       memcg_events(memcg, THP_COLLAPSE_ALLOC));
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */

	/* The above should easily fit into one page */
	WARN_ON_ONCE(seq_buf_has_overflowed(&s));

	return s.buffer;
}
1467

1468
#define K(x) ((x) << (PAGE_SHIFT-10))
1469
/**
1470 1471
 * mem_cgroup_print_oom_context: Print OOM information relevant to
 * memory controller.
1472 1473 1474 1475 1476 1477
 * @memcg: The memory cgroup that went over limit
 * @p: Task that is going to be killed
 *
 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
 * enabled
 */
1478
void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
1479 1480 1481
{
	rcu_read_lock();

1482 1483 1484 1485 1486
	if (memcg) {
		pr_cont(",oom_memcg=");
		pr_cont_cgroup_path(memcg->css.cgroup);
	} else
		pr_cont(",global_oom");
1487
	if (p) {
1488
		pr_cont(",task_memcg=");
1489 1490
		pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
	}
1491
	rcu_read_unlock();
1492 1493 1494 1495 1496 1497 1498 1499 1500
}

/**
 * mem_cgroup_print_oom_meminfo: Print OOM memory information relevant to
 * memory controller.
 * @memcg: The memory cgroup that went over limit
 */
void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
{
1501
	char *buf;
1502

1503 1504
	pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
		K((u64)page_counter_read(&memcg->memory)),
1505
		K((u64)READ_ONCE(memcg->memory.max)), memcg->memory.failcnt);
1506 1507 1508
	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
		pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n",
			K((u64)page_counter_read(&memcg->swap)),
1509
			K((u64)READ_ONCE(memcg->swap.max)), memcg->swap.failcnt);
1510 1511 1512 1513 1514 1515 1516
	else {
		pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
			K((u64)page_counter_read(&memcg->memsw)),
			K((u64)memcg->memsw.max), memcg->memsw.failcnt);
		pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
			K((u64)page_counter_read(&memcg->kmem)),
			K((u64)memcg->kmem.max), memcg->kmem.failcnt);
1517
	}
1518 1519 1520 1521 1522 1523 1524 1525 1526

	pr_info("Memory cgroup stats for ");
	pr_cont_cgroup_path(memcg->css.cgroup);
	pr_cont(":");
	buf = memory_stat_format(memcg);
	if (!buf)
		return;
	pr_info("%s", buf);
	kfree(buf);
1527 1528
}

D
David Rientjes 已提交
1529 1530 1531
/*
 * Return the memory (and swap, if configured) limit for a memcg.
 */
1532
unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
D
David Rientjes 已提交
1533
{
1534
	unsigned long max;
1535

1536
	max = READ_ONCE(memcg->memory.max);
1537
	if (mem_cgroup_swappiness(memcg)) {
1538 1539
		unsigned long memsw_max;
		unsigned long swap_max;
1540

1541
		memsw_max = memcg->memsw.max;
1542
		swap_max = READ_ONCE(memcg->swap.max);
1543 1544
		swap_max = min(swap_max, (unsigned long)total_swap_pages);
		max = min(max + swap_max, memsw_max);
1545
	}
1546
	return max;
D
David Rientjes 已提交
1547 1548
}

1549 1550 1551 1552 1553
unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
{
	return page_counter_read(&memcg->memory);
}

1554
static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1555
				     int order)
1556
{
1557 1558 1559
	struct oom_control oc = {
		.zonelist = NULL,
		.nodemask = NULL,
1560
		.memcg = memcg,
1561 1562 1563
		.gfp_mask = gfp_mask,
		.order = order,
	};
1564
	bool ret;
1565

1566 1567 1568 1569 1570 1571 1572
	if (mutex_lock_killable(&oom_lock))
		return true;
	/*
	 * A few threads which were not waiting at mutex_lock_killable() can
	 * fail to bail out. Therefore, check again after holding oom_lock.
	 */
	ret = should_force_charge() || out_of_memory(&oc);
1573
	mutex_unlock(&oom_lock);
1574
	return ret;
1575 1576
}

1577
static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
1578
				   pg_data_t *pgdat,
1579 1580 1581 1582 1583 1584 1585 1586 1587
				   gfp_t gfp_mask,
				   unsigned long *total_scanned)
{
	struct mem_cgroup *victim = NULL;
	int total = 0;
	int loop = 0;
	unsigned long excess;
	unsigned long nr_scanned;
	struct mem_cgroup_reclaim_cookie reclaim = {
1588
		.pgdat = pgdat,
1589 1590
	};

1591
	excess = soft_limit_excess(root_memcg);
1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616

	while (1) {
		victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
		if (!victim) {
			loop++;
			if (loop >= 2) {
				/*
				 * If we have not been able to reclaim
				 * anything, it might because there are
				 * no reclaimable pages under this hierarchy
				 */
				if (!total)
					break;
				/*
				 * We want to do more targeted reclaim.
				 * excess >> 2 is not to excessive so as to
				 * reclaim too much, nor too less that we keep
				 * coming back to reclaim from this cgroup
				 */
				if (total >= (excess >> 2) ||
					(loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
					break;
			}
			continue;
		}
1617
		total += mem_cgroup_shrink_node(victim, gfp_mask, false,
1618
					pgdat, &nr_scanned);
1619
		*total_scanned += nr_scanned;
1620
		if (!soft_limit_excess(root_memcg))
1621
			break;
1622
	}
1623 1624
	mem_cgroup_iter_break(root_memcg, victim);
	return total;
1625 1626
}

1627 1628 1629 1630 1631 1632
#ifdef CONFIG_LOCKDEP
static struct lockdep_map memcg_oom_lock_dep_map = {
	.name = "memcg_oom_lock",
};
#endif

1633 1634
static DEFINE_SPINLOCK(memcg_oom_lock);

K
KAMEZAWA Hiroyuki 已提交
1635 1636 1637 1638
/*
 * Check OOM-Killer is already running under our hierarchy.
 * If someone is running, return false.
 */
1639
static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
K
KAMEZAWA Hiroyuki 已提交
1640
{
1641
	struct mem_cgroup *iter, *failed = NULL;
1642

1643 1644
	spin_lock(&memcg_oom_lock);

1645
	for_each_mem_cgroup_tree(iter, memcg) {
1646
		if (iter->oom_lock) {
1647 1648 1649 1650 1651
			/*
			 * this subtree of our hierarchy is already locked
			 * so we cannot give a lock.
			 */
			failed = iter;
1652 1653
			mem_cgroup_iter_break(memcg, iter);
			break;
1654 1655
		} else
			iter->oom_lock = true;
K
KAMEZAWA Hiroyuki 已提交
1656
	}
K
KAMEZAWA Hiroyuki 已提交
1657

1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668
	if (failed) {
		/*
		 * OK, we failed to lock the whole subtree so we have
		 * to clean up what we set up to the failing subtree
		 */
		for_each_mem_cgroup_tree(iter, memcg) {
			if (iter == failed) {
				mem_cgroup_iter_break(memcg, iter);
				break;
			}
			iter->oom_lock = false;
1669
		}
1670 1671
	} else
		mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
1672 1673 1674 1675

	spin_unlock(&memcg_oom_lock);

	return !failed;
1676
}
1677

1678
static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
1679
{
K
KAMEZAWA Hiroyuki 已提交
1680 1681
	struct mem_cgroup *iter;

1682
	spin_lock(&memcg_oom_lock);
1683
	mutex_release(&memcg_oom_lock_dep_map, _RET_IP_);
1684
	for_each_mem_cgroup_tree(iter, memcg)
1685
		iter->oom_lock = false;
1686
	spin_unlock(&memcg_oom_lock);
1687 1688
}

1689
static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
1690 1691 1692
{
	struct mem_cgroup *iter;

1693
	spin_lock(&memcg_oom_lock);
1694
	for_each_mem_cgroup_tree(iter, memcg)
1695 1696
		iter->under_oom++;
	spin_unlock(&memcg_oom_lock);
1697 1698
}

1699
static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
1700 1701 1702
{
	struct mem_cgroup *iter;

K
KAMEZAWA Hiroyuki 已提交
1703 1704
	/*
	 * When a new child is created while the hierarchy is under oom,
1705
	 * mem_cgroup_oom_lock() may not be called. Watch for underflow.
K
KAMEZAWA Hiroyuki 已提交
1706
	 */
1707
	spin_lock(&memcg_oom_lock);
1708
	for_each_mem_cgroup_tree(iter, memcg)
1709 1710 1711
		if (iter->under_oom > 0)
			iter->under_oom--;
	spin_unlock(&memcg_oom_lock);
1712 1713
}

K
KAMEZAWA Hiroyuki 已提交
1714 1715
static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);

K
KAMEZAWA Hiroyuki 已提交
1716
struct oom_wait_info {
1717
	struct mem_cgroup *memcg;
1718
	wait_queue_entry_t	wait;
K
KAMEZAWA Hiroyuki 已提交
1719 1720
};

1721
static int memcg_oom_wake_function(wait_queue_entry_t *wait,
K
KAMEZAWA Hiroyuki 已提交
1722 1723
	unsigned mode, int sync, void *arg)
{
1724 1725
	struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
	struct mem_cgroup *oom_wait_memcg;
K
KAMEZAWA Hiroyuki 已提交
1726 1727 1728
	struct oom_wait_info *oom_wait_info;

	oom_wait_info = container_of(wait, struct oom_wait_info, wait);
1729
	oom_wait_memcg = oom_wait_info->memcg;
K
KAMEZAWA Hiroyuki 已提交
1730

1731 1732
	if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) &&
	    !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg))
K
KAMEZAWA Hiroyuki 已提交
1733 1734 1735 1736
		return 0;
	return autoremove_wake_function(wait, mode, sync, arg);
}

1737
static void memcg_oom_recover(struct mem_cgroup *memcg)
1738
{
1739 1740 1741 1742 1743 1744 1745 1746 1747
	/*
	 * For the following lockless ->under_oom test, the only required
	 * guarantee is that it must see the state asserted by an OOM when
	 * this function is called as a result of userland actions
	 * triggered by the notification of the OOM.  This is trivially
	 * achieved by invoking mem_cgroup_mark_under_oom() before
	 * triggering notification.
	 */
	if (memcg && memcg->under_oom)
1748
		__wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
1749 1750
}

1751 1752 1753 1754 1755 1756 1757 1758
enum oom_status {
	OOM_SUCCESS,
	OOM_FAILED,
	OOM_ASYNC,
	OOM_SKIPPED
};

static enum oom_status mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
1759
{
1760 1761 1762
	enum oom_status ret;
	bool locked;

1763 1764 1765
	if (order > PAGE_ALLOC_COSTLY_ORDER)
		return OOM_SKIPPED;

1766 1767
	memcg_memory_event(memcg, MEMCG_OOM);

K
KAMEZAWA Hiroyuki 已提交
1768
	/*
1769 1770 1771 1772
	 * We are in the middle of the charge context here, so we
	 * don't want to block when potentially sitting on a callstack
	 * that holds all kinds of filesystem and mm locks.
	 *
1773 1774 1775 1776
	 * cgroup1 allows disabling the OOM killer and waiting for outside
	 * handling until the charge can succeed; remember the context and put
	 * the task to sleep at the end of the page fault when all locks are
	 * released.
1777
	 *
1778 1779 1780 1781 1782 1783 1784
	 * On the other hand, in-kernel OOM killer allows for an async victim
	 * memory reclaim (oom_reaper) and that means that we are not solely
	 * relying on the oom victim to make a forward progress and we can
	 * invoke the oom killer here.
	 *
	 * Please note that mem_cgroup_out_of_memory might fail to find a
	 * victim and then we have to bail out from the charge path.
K
KAMEZAWA Hiroyuki 已提交
1785
	 */
1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796
	if (memcg->oom_kill_disable) {
		if (!current->in_user_fault)
			return OOM_SKIPPED;
		css_get(&memcg->css);
		current->memcg_in_oom = memcg;
		current->memcg_oom_gfp_mask = mask;
		current->memcg_oom_order = order;

		return OOM_ASYNC;
	}

1797 1798 1799 1800 1801 1802 1803 1804
	mem_cgroup_mark_under_oom(memcg);

	locked = mem_cgroup_oom_trylock(memcg);

	if (locked)
		mem_cgroup_oom_notify(memcg);

	mem_cgroup_unmark_under_oom(memcg);
1805
	if (mem_cgroup_out_of_memory(memcg, mask, order))
1806 1807 1808 1809 1810 1811
		ret = OOM_SUCCESS;
	else
		ret = OOM_FAILED;

	if (locked)
		mem_cgroup_oom_unlock(memcg);
1812

1813
	return ret;
1814 1815 1816 1817
}

/**
 * mem_cgroup_oom_synchronize - complete memcg OOM handling
1818
 * @handle: actually kill/wait or just clean up the OOM state
1819
 *
1820 1821
 * This has to be called at the end of a page fault if the memcg OOM
 * handler was enabled.
1822
 *
1823
 * Memcg supports userspace OOM handling where failed allocations must
1824 1825 1826 1827
 * sleep on a waitqueue until the userspace task resolves the
 * situation.  Sleeping directly in the charge context with all kinds
 * of locks held is not a good idea, instead we remember an OOM state
 * in the task and mem_cgroup_oom_synchronize() has to be called at
1828
 * the end of the page fault to complete the OOM handling.
1829 1830
 *
 * Returns %true if an ongoing memcg OOM situation was detected and
1831
 * completed, %false otherwise.
1832
 */
1833
bool mem_cgroup_oom_synchronize(bool handle)
1834
{
T
Tejun Heo 已提交
1835
	struct mem_cgroup *memcg = current->memcg_in_oom;
1836
	struct oom_wait_info owait;
1837
	bool locked;
1838 1839 1840

	/* OOM is global, do not handle */
	if (!memcg)
1841
		return false;
1842

1843
	if (!handle)
1844
		goto cleanup;
1845 1846 1847 1848 1849

	owait.memcg = memcg;
	owait.wait.flags = 0;
	owait.wait.func = memcg_oom_wake_function;
	owait.wait.private = current;
1850
	INIT_LIST_HEAD(&owait.wait.entry);
K
KAMEZAWA Hiroyuki 已提交
1851

1852
	prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
1853 1854 1855 1856 1857 1858 1859 1860 1861 1862
	mem_cgroup_mark_under_oom(memcg);

	locked = mem_cgroup_oom_trylock(memcg);

	if (locked)
		mem_cgroup_oom_notify(memcg);

	if (locked && !memcg->oom_kill_disable) {
		mem_cgroup_unmark_under_oom(memcg);
		finish_wait(&memcg_oom_waitq, &owait.wait);
T
Tejun Heo 已提交
1863 1864
		mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask,
					 current->memcg_oom_order);
1865
	} else {
1866
		schedule();
1867 1868 1869 1870 1871
		mem_cgroup_unmark_under_oom(memcg);
		finish_wait(&memcg_oom_waitq, &owait.wait);
	}

	if (locked) {
1872 1873 1874 1875 1876 1877 1878 1879
		mem_cgroup_oom_unlock(memcg);
		/*
		 * There is no guarantee that an OOM-lock contender
		 * sees the wakeups triggered by the OOM kill
		 * uncharges.  Wake any sleepers explicitely.
		 */
		memcg_oom_recover(memcg);
	}
1880
cleanup:
T
Tejun Heo 已提交
1881
	current->memcg_in_oom = NULL;
1882
	css_put(&memcg->css);
K
KAMEZAWA Hiroyuki 已提交
1883
	return true;
1884 1885
}

1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913
/**
 * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM
 * @victim: task to be killed by the OOM killer
 * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM
 *
 * Returns a pointer to a memory cgroup, which has to be cleaned up
 * by killing all belonging OOM-killable tasks.
 *
 * Caller has to call mem_cgroup_put() on the returned non-NULL memcg.
 */
struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
					    struct mem_cgroup *oom_domain)
{
	struct mem_cgroup *oom_group = NULL;
	struct mem_cgroup *memcg;

	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
		return NULL;

	if (!oom_domain)
		oom_domain = root_mem_cgroup;

	rcu_read_lock();

	memcg = mem_cgroup_from_task(victim);
	if (memcg == root_mem_cgroup)
		goto out;

1914 1915 1916 1917 1918 1919 1920 1921
	/*
	 * If the victim task has been asynchronously moved to a different
	 * memory cgroup, we might end up killing tasks outside oom_domain.
	 * In this case it's better to ignore memory.group.oom.
	 */
	if (unlikely(!mem_cgroup_is_descendant(memcg, oom_domain)))
		goto out;

1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949
	/*
	 * Traverse the memory cgroup hierarchy from the victim task's
	 * cgroup up to the OOMing cgroup (or root) to find the
	 * highest-level memory cgroup with oom.group set.
	 */
	for (; memcg; memcg = parent_mem_cgroup(memcg)) {
		if (memcg->oom_group)
			oom_group = memcg;

		if (memcg == oom_domain)
			break;
	}

	if (oom_group)
		css_get(&oom_group->css);
out:
	rcu_read_unlock();

	return oom_group;
}

void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
{
	pr_info("Tasks in ");
	pr_cont_cgroup_path(memcg->css.cgroup);
	pr_cont(" are going to be killed due to memory.oom.group set\n");
}

1950
/**
1951 1952
 * lock_page_memcg - lock a page->mem_cgroup binding
 * @page: the page
1953
 *
1954
 * This function protects unlocked LRU pages from being moved to
1955 1956 1957 1958 1959
 * another cgroup.
 *
 * It ensures lifetime of the returned memcg. Caller is responsible
 * for the lifetime of the page; __unlock_page_memcg() is available
 * when @page might get freed inside the locked section.
1960
 */
1961
struct mem_cgroup *lock_page_memcg(struct page *page)
1962
{
1963
	struct page *head = compound_head(page); /* rmap on tail pages */
1964
	struct mem_cgroup *memcg;
1965
	unsigned long flags;
1966

1967 1968 1969 1970
	/*
	 * The RCU lock is held throughout the transaction.  The fast
	 * path can get away without acquiring the memcg->move_lock
	 * because page moving starts with an RCU grace period.
1971 1972 1973 1974 1975 1976 1977
	 *
	 * The RCU lock also protects the memcg from being freed when
	 * the page state that is going to change is the only thing
	 * preventing the page itself from being freed. E.g. writeback
	 * doesn't hold a page reference and relies on PG_writeback to
	 * keep off truncation, migration and so forth.
         */
1978 1979 1980
	rcu_read_lock();

	if (mem_cgroup_disabled())
1981
		return NULL;
1982
again:
1983
	memcg = head->mem_cgroup;
1984
	if (unlikely(!memcg))
1985
		return NULL;
1986

Q
Qiang Huang 已提交
1987
	if (atomic_read(&memcg->moving_account) <= 0)
1988
		return memcg;
1989

1990
	spin_lock_irqsave(&memcg->move_lock, flags);
1991
	if (memcg != head->mem_cgroup) {
1992
		spin_unlock_irqrestore(&memcg->move_lock, flags);
1993 1994
		goto again;
	}
1995 1996 1997 1998

	/*
	 * When charge migration first begins, we can have locked and
	 * unlocked page stat updates happening concurrently.  Track
1999
	 * the task who has the lock for unlock_page_memcg().
2000 2001 2002
	 */
	memcg->move_lock_task = current;
	memcg->move_lock_flags = flags;
2003

2004
	return memcg;
2005
}
2006
EXPORT_SYMBOL(lock_page_memcg);
2007

2008
/**
2009 2010 2011 2012
 * __unlock_page_memcg - unlock and unpin a memcg
 * @memcg: the memcg
 *
 * Unlock and unpin a memcg returned by lock_page_memcg().
2013
 */
2014
void __unlock_page_memcg(struct mem_cgroup *memcg)
2015
{
2016 2017 2018 2019 2020 2021 2022 2023
	if (memcg && memcg->move_lock_task == current) {
		unsigned long flags = memcg->move_lock_flags;

		memcg->move_lock_task = NULL;
		memcg->move_lock_flags = 0;

		spin_unlock_irqrestore(&memcg->move_lock, flags);
	}
2024

2025
	rcu_read_unlock();
2026
}
2027 2028 2029 2030 2031 2032 2033

/**
 * unlock_page_memcg - unlock a page->mem_cgroup binding
 * @page: the page
 */
void unlock_page_memcg(struct page *page)
{
2034 2035 2036
	struct page *head = compound_head(page);

	__unlock_page_memcg(head->mem_cgroup);
2037
}
2038
EXPORT_SYMBOL(unlock_page_memcg);
2039

2040 2041
struct memcg_stock_pcp {
	struct mem_cgroup *cached; /* this never be root cgroup */
2042
	unsigned int nr_pages;
2043
	struct work_struct work;
2044
	unsigned long flags;
2045
#define FLUSHING_CACHED_CHARGE	0
2046 2047
};
static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
2048
static DEFINE_MUTEX(percpu_charge_mutex);
2049

2050 2051 2052 2053 2054 2055 2056 2057 2058 2059
/**
 * consume_stock: Try to consume stocked charge on this cpu.
 * @memcg: memcg to consume from.
 * @nr_pages: how many pages to charge.
 *
 * The charges will only happen if @memcg matches the current cpu's memcg
 * stock, and at least @nr_pages are available in that stock.  Failure to
 * service an allocation will refill the stock.
 *
 * returns true if successful, false otherwise.
2060
 */
2061
static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2062 2063
{
	struct memcg_stock_pcp *stock;
2064
	unsigned long flags;
2065
	bool ret = false;
2066

2067
	if (nr_pages > MEMCG_CHARGE_BATCH)
2068
		return ret;
2069

2070 2071 2072
	local_irq_save(flags);

	stock = this_cpu_ptr(&memcg_stock);
2073
	if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
2074
		stock->nr_pages -= nr_pages;
2075 2076
		ret = true;
	}
2077 2078 2079

	local_irq_restore(flags);

2080 2081 2082 2083
	return ret;
}

/*
2084
 * Returns stocks cached in percpu and reset cached information.
2085 2086 2087 2088 2089
 */
static void drain_stock(struct memcg_stock_pcp *stock)
{
	struct mem_cgroup *old = stock->cached;

2090
	if (stock->nr_pages) {
2091
		page_counter_uncharge(&old->memory, stock->nr_pages);
2092
		if (do_memsw_account())
2093
			page_counter_uncharge(&old->memsw, stock->nr_pages);
2094
		css_put_many(&old->css, stock->nr_pages);
2095
		stock->nr_pages = 0;
2096 2097 2098 2099 2100 2101
	}
	stock->cached = NULL;
}

static void drain_local_stock(struct work_struct *dummy)
{
2102 2103 2104
	struct memcg_stock_pcp *stock;
	unsigned long flags;

2105 2106 2107 2108
	/*
	 * The only protection from memory hotplug vs. drain_stock races is
	 * that we always operate on local CPU stock here with IRQ disabled
	 */
2109 2110 2111
	local_irq_save(flags);

	stock = this_cpu_ptr(&memcg_stock);
2112
	drain_stock(stock);
2113
	clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
2114 2115

	local_irq_restore(flags);
2116 2117 2118
}

/*
2119
 * Cache charges(val) to local per_cpu area.
2120
 * This will be consumed by consume_stock() function, later.
2121
 */
2122
static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2123
{
2124 2125 2126 2127
	struct memcg_stock_pcp *stock;
	unsigned long flags;

	local_irq_save(flags);
2128

2129
	stock = this_cpu_ptr(&memcg_stock);
2130
	if (stock->cached != memcg) { /* reset if necessary */
2131
		drain_stock(stock);
2132
		stock->cached = memcg;
2133
	}
2134
	stock->nr_pages += nr_pages;
2135

2136
	if (stock->nr_pages > MEMCG_CHARGE_BATCH)
2137 2138
		drain_stock(stock);

2139
	local_irq_restore(flags);
2140 2141 2142
}

/*
2143
 * Drains all per-CPU charge caches for given root_memcg resp. subtree
2144
 * of the hierarchy under it.
2145
 */
2146
static void drain_all_stock(struct mem_cgroup *root_memcg)
2147
{
2148
	int cpu, curcpu;
2149

2150 2151 2152
	/* If someone's already draining, avoid adding running more workers. */
	if (!mutex_trylock(&percpu_charge_mutex))
		return;
2153 2154 2155 2156 2157 2158
	/*
	 * Notify other cpus that system-wide "drain" is running
	 * We do not care about races with the cpu hotplug because cpu down
	 * as well as workers from this path always operate on the local
	 * per-cpu data. CPU up doesn't touch memcg_stock at all.
	 */
2159
	curcpu = get_cpu();
2160 2161
	for_each_online_cpu(cpu) {
		struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
2162
		struct mem_cgroup *memcg;
2163
		bool flush = false;
2164

2165
		rcu_read_lock();
2166
		memcg = stock->cached;
2167 2168 2169 2170 2171 2172 2173
		if (memcg && stock->nr_pages &&
		    mem_cgroup_is_descendant(memcg, root_memcg))
			flush = true;
		rcu_read_unlock();

		if (flush &&
		    !test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
2174 2175 2176 2177 2178
			if (cpu == curcpu)
				drain_local_stock(&stock->work);
			else
				schedule_work_on(cpu, &stock->work);
		}
2179
	}
2180
	put_cpu();
2181
	mutex_unlock(&percpu_charge_mutex);
2182 2183
}

2184
static int memcg_hotplug_cpu_dead(unsigned int cpu)
2185 2186
{
	struct memcg_stock_pcp *stock;
2187
	struct mem_cgroup *memcg, *mi;
2188 2189 2190

	stock = &per_cpu(memcg_stock, cpu);
	drain_stock(stock);
2191 2192 2193 2194 2195 2196 2197 2198

	for_each_mem_cgroup(memcg) {
		int i;

		for (i = 0; i < MEMCG_NR_STAT; i++) {
			int nid;
			long x;

2199
			x = this_cpu_xchg(memcg->vmstats_percpu->stat[i], 0);
2200
			if (x)
2201 2202
				for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
					atomic_long_add(x, &memcg->vmstats[i]);
2203 2204 2205 2206 2207 2208 2209 2210 2211

			if (i >= NR_VM_NODE_STAT_ITEMS)
				continue;

			for_each_node(nid) {
				struct mem_cgroup_per_node *pn;

				pn = mem_cgroup_nodeinfo(memcg, nid);
				x = this_cpu_xchg(pn->lruvec_stat_cpu->count[i], 0);
2212
				if (x)
2213 2214 2215
					do {
						atomic_long_add(x, &pn->lruvec_stat[i]);
					} while ((pn = parent_nodeinfo(pn, nid)));
2216 2217 2218
			}
		}

2219
		for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
2220 2221
			long x;

2222
			x = this_cpu_xchg(memcg->vmstats_percpu->events[i], 0);
2223
			if (x)
2224 2225
				for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
					atomic_long_add(x, &memcg->vmevents[i]);
2226 2227 2228
		}
	}

2229
	return 0;
2230 2231
}

2232 2233 2234 2235 2236
static void reclaim_high(struct mem_cgroup *memcg,
			 unsigned int nr_pages,
			 gfp_t gfp_mask)
{
	do {
2237 2238
		if (page_counter_read(&memcg->memory) <=
		    READ_ONCE(memcg->memory.high))
2239
			continue;
2240
		memcg_memory_event(memcg, MEMCG_HIGH);
2241
		try_to_free_mem_cgroup_pages(memcg, nr_pages, gfp_mask, true);
2242 2243
	} while ((memcg = parent_mem_cgroup(memcg)) &&
		 !mem_cgroup_is_root(memcg));
2244 2245 2246 2247 2248 2249 2250
}

static void high_work_func(struct work_struct *work)
{
	struct mem_cgroup *memcg;

	memcg = container_of(work, struct mem_cgroup, high_work);
2251
	reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL);
2252 2253
}

2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306
/*
 * Clamp the maximum sleep time per allocation batch to 2 seconds. This is
 * enough to still cause a significant slowdown in most cases, while still
 * allowing diagnostics and tracing to proceed without becoming stuck.
 */
#define MEMCG_MAX_HIGH_DELAY_JIFFIES (2UL*HZ)

/*
 * When calculating the delay, we use these either side of the exponentiation to
 * maintain precision and scale to a reasonable number of jiffies (see the table
 * below.
 *
 * - MEMCG_DELAY_PRECISION_SHIFT: Extra precision bits while translating the
 *   overage ratio to a delay.
 * - MEMCG_DELAY_SCALING_SHIFT: The number of bits to scale down down the
 *   proposed penalty in order to reduce to a reasonable number of jiffies, and
 *   to produce a reasonable delay curve.
 *
 * MEMCG_DELAY_SCALING_SHIFT just happens to be a number that produces a
 * reasonable delay curve compared to precision-adjusted overage, not
 * penalising heavily at first, but still making sure that growth beyond the
 * limit penalises misbehaviour cgroups by slowing them down exponentially. For
 * example, with a high of 100 megabytes:
 *
 *  +-------+------------------------+
 *  | usage | time to allocate in ms |
 *  +-------+------------------------+
 *  | 100M  |                      0 |
 *  | 101M  |                      6 |
 *  | 102M  |                     25 |
 *  | 103M  |                     57 |
 *  | 104M  |                    102 |
 *  | 105M  |                    159 |
 *  | 106M  |                    230 |
 *  | 107M  |                    313 |
 *  | 108M  |                    409 |
 *  | 109M  |                    518 |
 *  | 110M  |                    639 |
 *  | 111M  |                    774 |
 *  | 112M  |                    921 |
 *  | 113M  |                   1081 |
 *  | 114M  |                   1254 |
 *  | 115M  |                   1439 |
 *  | 116M  |                   1638 |
 *  | 117M  |                   1849 |
 *  | 118M  |                   2000 |
 *  | 119M  |                   2000 |
 *  | 120M  |                   2000 |
 *  +-------+------------------------+
 */
 #define MEMCG_DELAY_PRECISION_SHIFT 20
 #define MEMCG_DELAY_SCALING_SHIFT 14

2307
static u64 calculate_overage(unsigned long usage, unsigned long high)
2308
{
2309
	u64 overage;
2310

2311 2312
	if (usage <= high)
		return 0;
2313

2314 2315 2316 2317 2318
	/*
	 * Prevent division by 0 in overage calculation by acting as if
	 * it was a threshold of 1 page
	 */
	high = max(high, 1UL);
2319

2320 2321 2322 2323
	overage = usage - high;
	overage <<= MEMCG_DELAY_PRECISION_SHIFT;
	return div64_u64(overage, high);
}
2324

2325 2326 2327
static u64 mem_find_max_overage(struct mem_cgroup *memcg)
{
	u64 overage, max_overage = 0;
2328

2329 2330
	do {
		overage = calculate_overage(page_counter_read(&memcg->memory),
2331
					    READ_ONCE(memcg->memory.high));
2332
		max_overage = max(overage, max_overage);
2333 2334 2335
	} while ((memcg = parent_mem_cgroup(memcg)) &&
		 !mem_cgroup_is_root(memcg));

2336 2337 2338
	return max_overage;
}

2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354
static u64 swap_find_max_overage(struct mem_cgroup *memcg)
{
	u64 overage, max_overage = 0;

	do {
		overage = calculate_overage(page_counter_read(&memcg->swap),
					    READ_ONCE(memcg->swap.high));
		if (overage)
			memcg_memory_event(memcg, MEMCG_SWAP_HIGH);
		max_overage = max(overage, max_overage);
	} while ((memcg = parent_mem_cgroup(memcg)) &&
		 !mem_cgroup_is_root(memcg));

	return max_overage;
}

2355 2356 2357 2358 2359 2360 2361 2362 2363 2364
/*
 * Get the number of jiffies that we should penalise a mischievous cgroup which
 * is exceeding its memory.high by checking both it and its ancestors.
 */
static unsigned long calculate_high_delay(struct mem_cgroup *memcg,
					  unsigned int nr_pages,
					  u64 max_overage)
{
	unsigned long penalty_jiffies;

2365 2366
	if (!max_overage)
		return 0;
2367 2368 2369 2370 2371 2372 2373 2374 2375

	/*
	 * We use overage compared to memory.high to calculate the number of
	 * jiffies to sleep (penalty_jiffies). Ideally this value should be
	 * fairly lenient on small overages, and increasingly harsh when the
	 * memcg in question makes it clear that it has no intention of stopping
	 * its crazy behaviour, so we exponentially increase the delay based on
	 * overage amount.
	 */
2376 2377 2378
	penalty_jiffies = max_overage * max_overage * HZ;
	penalty_jiffies >>= MEMCG_DELAY_PRECISION_SHIFT;
	penalty_jiffies >>= MEMCG_DELAY_SCALING_SHIFT;
2379 2380 2381 2382 2383 2384 2385 2386 2387

	/*
	 * Factor in the task's own contribution to the overage, such that four
	 * N-sized allocations are throttled approximately the same as one
	 * 4N-sized allocation.
	 *
	 * MEMCG_CHARGE_BATCH pages is nominal, so work out how much smaller or
	 * larger the current charge patch is than that.
	 */
2388
	return penalty_jiffies * nr_pages / MEMCG_CHARGE_BATCH;
2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412
}

/*
 * Scheduled by try_charge() to be executed from the userland return path
 * and reclaims memory over the high limit.
 */
void mem_cgroup_handle_over_high(void)
{
	unsigned long penalty_jiffies;
	unsigned long pflags;
	unsigned int nr_pages = current->memcg_nr_pages_over_high;
	struct mem_cgroup *memcg;

	if (likely(!nr_pages))
		return;

	memcg = get_mem_cgroup_from_mm(current->mm);
	reclaim_high(memcg, nr_pages, GFP_KERNEL);
	current->memcg_nr_pages_over_high = 0;

	/*
	 * memory.high is breached and reclaim is unable to keep up. Throttle
	 * allocators proactively to slow down excessive growth.
	 */
2413 2414
	penalty_jiffies = calculate_high_delay(memcg, nr_pages,
					       mem_find_max_overage(memcg));
2415

2416 2417 2418
	penalty_jiffies += calculate_high_delay(memcg, nr_pages,
						swap_find_max_overage(memcg));

2419 2420 2421 2422 2423 2424 2425
	/*
	 * Clamp the max delay per usermode return so as to still keep the
	 * application moving forwards and also permit diagnostics, albeit
	 * extremely slowly.
	 */
	penalty_jiffies = min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES);

2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445
	/*
	 * Don't sleep if the amount of jiffies this memcg owes us is so low
	 * that it's not even worth doing, in an attempt to be nice to those who
	 * go only a small amount over their memory.high value and maybe haven't
	 * been aggressively reclaimed enough yet.
	 */
	if (penalty_jiffies <= HZ / 100)
		goto out;

	/*
	 * If we exit early, we're guaranteed to die (since
	 * schedule_timeout_killable sets TASK_KILLABLE). This means we don't
	 * need to account for any ill-begotten jiffies to pay them off later.
	 */
	psi_memstall_enter(&pflags);
	schedule_timeout_killable(penalty_jiffies);
	psi_memstall_leave(&pflags);

out:
	css_put(&memcg->css);
2446 2447
}

2448 2449
static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
		      unsigned int nr_pages)
2450
{
2451
	unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages);
2452
	int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
2453
	struct mem_cgroup *mem_over_limit;
2454
	struct page_counter *counter;
2455
	unsigned long nr_reclaimed;
2456 2457
	bool may_swap = true;
	bool drained = false;
2458
	enum oom_status oom_status;
2459

2460
	if (mem_cgroup_is_root(memcg))
2461
		return 0;
2462
retry:
2463
	if (consume_stock(memcg, nr_pages))
2464
		return 0;
2465

2466
	if (!do_memsw_account() ||
2467 2468
	    page_counter_try_charge(&memcg->memsw, batch, &counter)) {
		if (page_counter_try_charge(&memcg->memory, batch, &counter))
2469
			goto done_restock;
2470
		if (do_memsw_account())
2471 2472
			page_counter_uncharge(&memcg->memsw, batch);
		mem_over_limit = mem_cgroup_from_counter(counter, memory);
2473
	} else {
2474
		mem_over_limit = mem_cgroup_from_counter(counter, memsw);
2475
		may_swap = false;
2476
	}
2477

2478 2479 2480 2481
	if (batch > nr_pages) {
		batch = nr_pages;
		goto retry;
	}
2482

2483 2484 2485 2486 2487 2488 2489 2490 2491
	/*
	 * Memcg doesn't have a dedicated reserve for atomic
	 * allocations. But like the global atomic pool, we need to
	 * put the burden of reclaim on regular allocation requests
	 * and let these go through as privileged allocations.
	 */
	if (gfp_mask & __GFP_ATOMIC)
		goto force;

2492 2493 2494 2495 2496 2497
	/*
	 * Unlike in global OOM situations, memcg is not in a physical
	 * memory shortage.  Allow dying and OOM-killed tasks to
	 * bypass the last charges so that they can exit quickly and
	 * free their memory.
	 */
2498
	if (unlikely(should_force_charge()))
2499
		goto force;
2500

2501 2502 2503 2504 2505 2506 2507 2508 2509
	/*
	 * Prevent unbounded recursion when reclaim operations need to
	 * allocate memory. This might exceed the limits temporarily,
	 * but we prefer facilitating memory reclaim and getting back
	 * under the limit over triggering OOM kills in these cases.
	 */
	if (unlikely(current->flags & PF_MEMALLOC))
		goto force;

2510 2511 2512
	if (unlikely(task_in_memcg_oom(current)))
		goto nomem;

2513
	if (!gfpflags_allow_blocking(gfp_mask))
2514
		goto nomem;
2515

2516
	memcg_memory_event(mem_over_limit, MEMCG_MAX);
2517

2518 2519
	nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
						    gfp_mask, may_swap);
2520

2521
	if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
2522
		goto retry;
2523

2524
	if (!drained) {
2525
		drain_all_stock(mem_over_limit);
2526 2527 2528 2529
		drained = true;
		goto retry;
	}

2530 2531
	if (gfp_mask & __GFP_NORETRY)
		goto nomem;
2532 2533 2534 2535 2536 2537 2538 2539 2540
	/*
	 * Even though the limit is exceeded at this point, reclaim
	 * may have been able to free some pages.  Retry the charge
	 * before killing the task.
	 *
	 * Only for regular pages, though: huge pages are rather
	 * unlikely to succeed so close to the limit, and we fall back
	 * to regular pages anyway in case of failure.
	 */
2541
	if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
2542 2543 2544 2545 2546 2547 2548 2549
		goto retry;
	/*
	 * At task move, charge accounts can be doubly counted. So, it's
	 * better to wait until the end of task_move if something is going on.
	 */
	if (mem_cgroup_wait_acct_move(mem_over_limit))
		goto retry;

2550 2551 2552
	if (nr_retries--)
		goto retry;

2553
	if (gfp_mask & __GFP_RETRY_MAYFAIL)
2554 2555
		goto nomem;

2556
	if (gfp_mask & __GFP_NOFAIL)
2557
		goto force;
2558

2559
	if (fatal_signal_pending(current))
2560
		goto force;
2561

2562 2563 2564 2565 2566 2567
	/*
	 * keep retrying as long as the memcg oom killer is able to make
	 * a forward progress or bypass the charge if the oom killer
	 * couldn't make any progress.
	 */
	oom_status = mem_cgroup_oom(mem_over_limit, gfp_mask,
2568
		       get_order(nr_pages * PAGE_SIZE));
2569 2570 2571 2572 2573 2574 2575 2576 2577
	switch (oom_status) {
	case OOM_SUCCESS:
		nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
		goto retry;
	case OOM_FAILED:
		goto force;
	default:
		goto nomem;
	}
2578
nomem:
2579
	if (!(gfp_mask & __GFP_NOFAIL))
2580
		return -ENOMEM;
2581 2582 2583 2584 2585 2586 2587
force:
	/*
	 * The allocation either can't fail or will lead to more memory
	 * being freed very soon.  Allow memory usage go over the limit
	 * temporarily by force charging it.
	 */
	page_counter_charge(&memcg->memory, nr_pages);
2588
	if (do_memsw_account())
2589 2590 2591 2592
		page_counter_charge(&memcg->memsw, nr_pages);
	css_get_many(&memcg->css, nr_pages);

	return 0;
2593 2594

done_restock:
2595
	css_get_many(&memcg->css, batch);
2596 2597
	if (batch > nr_pages)
		refill_stock(memcg, batch - nr_pages);
2598

2599
	/*
2600 2601
	 * If the hierarchy is above the normal consumption range, schedule
	 * reclaim on returning to userland.  We can perform reclaim here
2602
	 * if __GFP_RECLAIM but let's always punt for simplicity and so that
2603 2604 2605 2606
	 * GFP_KERNEL can consistently be used during reclaim.  @memcg is
	 * not recorded as it most likely matches current's and won't
	 * change in the meantime.  As high limit is checked again before
	 * reclaim, the cost of mismatch is negligible.
2607 2608
	 */
	do {
2609 2610 2611 2612 2613 2614 2615 2616 2617 2618
		bool mem_high, swap_high;

		mem_high = page_counter_read(&memcg->memory) >
			READ_ONCE(memcg->memory.high);
		swap_high = page_counter_read(&memcg->swap) >
			READ_ONCE(memcg->swap.high);

		/* Don't bother a random interrupted task */
		if (in_interrupt()) {
			if (mem_high) {
2619 2620 2621
				schedule_work(&memcg->high_work);
				break;
			}
2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634
			continue;
		}

		if (mem_high || swap_high) {
			/*
			 * The allocating tasks in this cgroup will need to do
			 * reclaim or be throttled to prevent further growth
			 * of the memory or swap footprints.
			 *
			 * Target some best-effort fairness between the tasks,
			 * and distribute reclaim work and delay penalties
			 * based on how much each task is actually allocating.
			 */
V
Vladimir Davydov 已提交
2635
			current->memcg_nr_pages_over_high += batch;
2636 2637 2638
			set_notify_resume(current);
			break;
		}
2639
	} while ((memcg = parent_mem_cgroup(memcg)));
2640 2641

	return 0;
2642
}
2643

2644
#if defined(CONFIG_MEMCG_KMEM) || defined(CONFIG_MMU)
2645
static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
2646
{
2647 2648 2649
	if (mem_cgroup_is_root(memcg))
		return;

2650
	page_counter_uncharge(&memcg->memory, nr_pages);
2651
	if (do_memsw_account())
2652
		page_counter_uncharge(&memcg->memsw, nr_pages);
2653

2654
	css_put_many(&memcg->css, nr_pages);
2655
}
2656
#endif
2657

2658 2659
static void lock_page_lru(struct page *page, int *isolated)
{
2660
	pg_data_t *pgdat = page_pgdat(page);
2661

2662
	spin_lock_irq(&pgdat->lru_lock);
2663 2664 2665
	if (PageLRU(page)) {
		struct lruvec *lruvec;

2666
		lruvec = mem_cgroup_page_lruvec(page, pgdat);
2667 2668 2669 2670 2671 2672 2673 2674 2675
		ClearPageLRU(page);
		del_page_from_lru_list(page, lruvec, page_lru(page));
		*isolated = 1;
	} else
		*isolated = 0;
}

static void unlock_page_lru(struct page *page, int isolated)
{
2676
	pg_data_t *pgdat = page_pgdat(page);
2677 2678 2679 2680

	if (isolated) {
		struct lruvec *lruvec;

2681
		lruvec = mem_cgroup_page_lruvec(page, pgdat);
2682 2683 2684 2685
		VM_BUG_ON_PAGE(PageLRU(page), page);
		SetPageLRU(page);
		add_page_to_lru_list(page, lruvec, page_lru(page));
	}
2686
	spin_unlock_irq(&pgdat->lru_lock);
2687 2688
}

2689
static void commit_charge(struct page *page, struct mem_cgroup *memcg,
2690
			  bool lrucare)
2691
{
2692
	int isolated;
2693

2694
	VM_BUG_ON_PAGE(page->mem_cgroup, page);
2695 2696 2697 2698 2699

	/*
	 * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page
	 * may already be on some other mem_cgroup's LRU.  Take care of it.
	 */
2700 2701
	if (lrucare)
		lock_page_lru(page, &isolated);
2702

2703 2704
	/*
	 * Nobody should be changing or seriously looking at
2705
	 * page->mem_cgroup at this point:
2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716
	 *
	 * - the page is uncharged
	 *
	 * - the page is off-LRU
	 *
	 * - an anonymous fault has exclusive page access, except for
	 *   a locked page table
	 *
	 * - a page cache insertion, a swapin fault, or a migration
	 *   have the page locked
	 */
2717
	page->mem_cgroup = memcg;
2718

2719 2720
	if (lrucare)
		unlock_page_lru(page, isolated);
2721
}
2722

2723
#ifdef CONFIG_MEMCG_KMEM
2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750
/*
 * Returns a pointer to the memory cgroup to which the kernel object is charged.
 *
 * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(),
 * cgroup_mutex, etc.
 */
struct mem_cgroup *mem_cgroup_from_obj(void *p)
{
	struct page *page;

	if (mem_cgroup_disabled())
		return NULL;

	page = virt_to_head_page(p);

	/*
	 * Slab pages don't have page->mem_cgroup set because corresponding
	 * kmem caches can be reparented during the lifetime. That's why
	 * memcg_from_slab_page() should be used instead.
	 */
	if (PageSlab(page))
		return memcg_from_slab_page(page);

	/* All other pages use page->mem_cgroup */
	return page->mem_cgroup;
}

2751
static int memcg_alloc_cache_id(void)
2752
{
2753 2754 2755
	int id, size;
	int err;

2756
	id = ida_simple_get(&memcg_cache_ida,
2757 2758 2759
			    0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL);
	if (id < 0)
		return id;
2760

2761
	if (id < memcg_nr_cache_ids)
2762 2763 2764 2765 2766 2767
		return id;

	/*
	 * There's no space for the new id in memcg_caches arrays,
	 * so we have to grow them.
	 */
2768
	down_write(&memcg_cache_ids_sem);
2769 2770

	size = 2 * (id + 1);
2771 2772 2773 2774 2775
	if (size < MEMCG_CACHES_MIN_SIZE)
		size = MEMCG_CACHES_MIN_SIZE;
	else if (size > MEMCG_CACHES_MAX_SIZE)
		size = MEMCG_CACHES_MAX_SIZE;

2776
	err = memcg_update_all_caches(size);
2777 2778
	if (!err)
		err = memcg_update_all_list_lrus(size);
2779 2780 2781 2782 2783
	if (!err)
		memcg_nr_cache_ids = size;

	up_write(&memcg_cache_ids_sem);

2784
	if (err) {
2785
		ida_simple_remove(&memcg_cache_ida, id);
2786 2787 2788 2789 2790 2791 2792
		return err;
	}
	return id;
}

static void memcg_free_cache_id(int id)
{
2793
	ida_simple_remove(&memcg_cache_ida, id);
2794 2795
}

2796
struct memcg_kmem_cache_create_work {
2797 2798 2799 2800 2801
	struct mem_cgroup *memcg;
	struct kmem_cache *cachep;
	struct work_struct work;
};

2802
static void memcg_kmem_cache_create_func(struct work_struct *w)
2803
{
2804 2805
	struct memcg_kmem_cache_create_work *cw =
		container_of(w, struct memcg_kmem_cache_create_work, work);
2806 2807
	struct mem_cgroup *memcg = cw->memcg;
	struct kmem_cache *cachep = cw->cachep;
2808

2809
	memcg_create_kmem_cache(memcg, cachep);
2810

2811
	css_put(&memcg->css);
2812 2813 2814 2815 2816 2817
	kfree(cw);
}

/*
 * Enqueue the creation of a per-memcg kmem_cache.
 */
2818
static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
2819
					       struct kmem_cache *cachep)
2820
{
2821
	struct memcg_kmem_cache_create_work *cw;
2822

2823 2824 2825
	if (!css_tryget_online(&memcg->css))
		return;

2826
	cw = kmalloc(sizeof(*cw), GFP_NOWAIT | __GFP_NOWARN);
2827
	if (!cw)
2828
		return;
2829

2830 2831
	cw->memcg = memcg;
	cw->cachep = cachep;
2832
	INIT_WORK(&cw->work, memcg_kmem_cache_create_func);
2833

2834
	queue_work(memcg_kmem_cache_wq, &cw->work);
2835 2836
}

2837 2838
static inline bool memcg_kmem_bypass(void)
{
2839 2840 2841 2842 2843 2844
	if (in_interrupt())
		return true;

	/* Allow remote memcg charging in kthread contexts. */
	if ((!current->mm || (current->flags & PF_KTHREAD)) &&
	     !current->active_memcg)
2845 2846 2847 2848 2849 2850 2851 2852
		return true;
	return false;
}

/**
 * memcg_kmem_get_cache: select the correct per-memcg cache for allocation
 * @cachep: the original global kmem cache
 *
2853 2854 2855
 * Return the kmem_cache we're supposed to use for a slab allocation.
 * We try to use the current memcg's version of the cache.
 *
2856 2857 2858
 * If the cache does not exist yet, if we are the first user of it, we
 * create it asynchronously in a workqueue and let the current allocation
 * go through with the original cache.
2859
 *
2860 2861 2862 2863
 * This function takes a reference to the cache it returns to assure it
 * won't get destroyed while we are working with it. Once the caller is
 * done with it, memcg_kmem_put_cache() must be called to release the
 * reference.
2864
 */
2865
struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep)
2866 2867
{
	struct mem_cgroup *memcg;
2868
	struct kmem_cache *memcg_cachep;
2869
	struct memcg_cache_array *arr;
2870
	int kmemcg_id;
2871

2872
	VM_BUG_ON(!is_root_cache(cachep));
2873

2874
	if (memcg_kmem_bypass())
V
Vladimir Davydov 已提交
2875 2876
		return cachep;

2877 2878 2879 2880 2881 2882 2883 2884 2885 2886
	rcu_read_lock();

	if (unlikely(current->active_memcg))
		memcg = current->active_memcg;
	else
		memcg = mem_cgroup_from_task(current);

	if (!memcg || memcg == root_mem_cgroup)
		goto out_unlock;

2887
	kmemcg_id = READ_ONCE(memcg->kmemcg_id);
2888
	if (kmemcg_id < 0)
2889
		goto out_unlock;
2890

2891 2892 2893 2894 2895 2896 2897 2898
	arr = rcu_dereference(cachep->memcg_params.memcg_caches);

	/*
	 * Make sure we will access the up-to-date value. The code updating
	 * memcg_caches issues a write barrier to match the data dependency
	 * barrier inside READ_ONCE() (see memcg_create_kmem_cache()).
	 */
	memcg_cachep = READ_ONCE(arr->entries[kmemcg_id]);
2899 2900 2901 2902 2903 2904 2905 2906 2907

	/*
	 * If we are in a safe context (can wait, and not in interrupt
	 * context), we could be be predictable and return right away.
	 * This would guarantee that the allocation being performed
	 * already belongs in the new cache.
	 *
	 * However, there are some clashes that can arrive from locking.
	 * For instance, because we acquire the slab_mutex while doing
2908 2909 2910
	 * memcg_create_kmem_cache, this means no further allocation
	 * could happen with the slab_mutex held. So it's better to
	 * defer everything.
2911 2912 2913 2914 2915 2916 2917
	 *
	 * If the memcg is dying or memcg_cache is about to be released,
	 * don't bother creating new kmem_caches. Because memcg_cachep
	 * is ZEROed as the fist step of kmem offlining, we don't need
	 * percpu_ref_tryget_live() here. css_tryget_online() check in
	 * memcg_schedule_kmem_cache_create() will prevent us from
	 * creation of a new kmem_cache.
2918
	 */
2919 2920 2921 2922 2923 2924
	if (unlikely(!memcg_cachep))
		memcg_schedule_kmem_cache_create(memcg, cachep);
	else if (percpu_ref_tryget(&memcg_cachep->memcg_params.refcnt))
		cachep = memcg_cachep;
out_unlock:
	rcu_read_unlock();
2925
	return cachep;
2926 2927
}

2928 2929 2930 2931 2932
/**
 * memcg_kmem_put_cache: drop reference taken by memcg_kmem_get_cache
 * @cachep: the cache returned by memcg_kmem_get_cache
 */
void memcg_kmem_put_cache(struct kmem_cache *cachep)
2933 2934
{
	if (!is_root_cache(cachep))
2935
		percpu_ref_put(&cachep->memcg_params.refcnt);
2936 2937
}

2938
/**
2939
 * __memcg_kmem_charge: charge a number of kernel pages to a memcg
2940
 * @memcg: memory cgroup to charge
2941
 * @gfp: reclaim mode
2942
 * @nr_pages: number of pages to charge
2943 2944 2945
 *
 * Returns 0 on success, an error code on failure.
 */
2946 2947
int __memcg_kmem_charge(struct mem_cgroup *memcg, gfp_t gfp,
			unsigned int nr_pages)
2948
{
2949
	struct page_counter *counter;
2950 2951
	int ret;

2952
	ret = try_charge(memcg, gfp, nr_pages);
2953
	if (ret)
2954
		return ret;
2955 2956 2957

	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
	    !page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) {
2958 2959 2960 2961 2962 2963 2964 2965 2966 2967

		/*
		 * Enforce __GFP_NOFAIL allocation because callers are not
		 * prepared to see failures and likely do not have any failure
		 * handling code.
		 */
		if (gfp & __GFP_NOFAIL) {
			page_counter_charge(&memcg->kmem, nr_pages);
			return 0;
		}
2968 2969
		cancel_charge(memcg, nr_pages);
		return -ENOMEM;
2970
	}
2971
	return 0;
2972 2973
}

2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988
/**
 * __memcg_kmem_uncharge: uncharge a number of kernel pages from a memcg
 * @memcg: memcg to uncharge
 * @nr_pages: number of pages to uncharge
 */
void __memcg_kmem_uncharge(struct mem_cgroup *memcg, unsigned int nr_pages)
{
	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
		page_counter_uncharge(&memcg->kmem, nr_pages);

	page_counter_uncharge(&memcg->memory, nr_pages);
	if (do_memsw_account())
		page_counter_uncharge(&memcg->memsw, nr_pages);
}

2989
/**
2990
 * __memcg_kmem_charge_page: charge a kmem page to the current memory cgroup
2991 2992 2993 2994 2995 2996
 * @page: page to charge
 * @gfp: reclaim mode
 * @order: allocation order
 *
 * Returns 0 on success, an error code on failure.
 */
2997
int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order)
2998
{
2999
	struct mem_cgroup *memcg;
3000
	int ret = 0;
3001

3002
	if (memcg_kmem_bypass())
3003 3004
		return 0;

3005
	memcg = get_mem_cgroup_from_current();
3006
	if (!mem_cgroup_is_root(memcg)) {
3007
		ret = __memcg_kmem_charge(memcg, gfp, 1 << order);
3008 3009
		if (!ret) {
			page->mem_cgroup = memcg;
3010
			__SetPageKmemcg(page);
3011
		}
3012
	}
3013
	css_put(&memcg->css);
3014
	return ret;
3015
}
3016

3017
/**
3018
 * __memcg_kmem_uncharge_page: uncharge a kmem page
3019 3020 3021
 * @page: page to uncharge
 * @order: allocation order
 */
3022
void __memcg_kmem_uncharge_page(struct page *page, int order)
3023
{
3024
	struct mem_cgroup *memcg = page->mem_cgroup;
3025
	unsigned int nr_pages = 1 << order;
3026 3027 3028 3029

	if (!memcg)
		return;

3030
	VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page);
3031
	__memcg_kmem_uncharge(memcg, nr_pages);
3032
	page->mem_cgroup = NULL;
3033 3034 3035 3036 3037

	/* slab pages do not have PageKmemcg flag set */
	if (PageKmemcg(page))
		__ClearPageKmemcg(page);

3038
	css_put_many(&memcg->css, nr_pages);
3039
}
3040
#endif /* CONFIG_MEMCG_KMEM */
3041

3042 3043 3044 3045
#ifdef CONFIG_TRANSPARENT_HUGEPAGE

/*
 * Because tail pages are not marked as "used", set it. We're under
3046
 * pgdat->lru_lock and migration entries setup in all page mappings.
3047
 */
3048
void mem_cgroup_split_huge_fixup(struct page *head)
3049
{
3050
	int i;
3051

3052 3053
	if (mem_cgroup_disabled())
		return;
3054

3055
	for (i = 1; i < HPAGE_PMD_NR; i++)
3056
		head[i].mem_cgroup = head->mem_cgroup;
3057
}
3058
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
3059

A
Andrew Morton 已提交
3060
#ifdef CONFIG_MEMCG_SWAP
3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071
/**
 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
 * @entry: swap entry to be moved
 * @from:  mem_cgroup which the entry is moved from
 * @to:  mem_cgroup which the entry is moved to
 *
 * It succeeds only when the swap_cgroup's record for this entry is the same
 * as the mem_cgroup's id of @from.
 *
 * Returns 0 on success, -EINVAL on failure.
 *
3072
 * The caller must have charged to @to, IOW, called page_counter_charge() about
3073 3074 3075
 * both res and memsw, and called css_get().
 */
static int mem_cgroup_move_swap_account(swp_entry_t entry,
3076
				struct mem_cgroup *from, struct mem_cgroup *to)
3077 3078 3079
{
	unsigned short old_id, new_id;

L
Li Zefan 已提交
3080 3081
	old_id = mem_cgroup_id(from);
	new_id = mem_cgroup_id(to);
3082 3083

	if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
3084 3085
		mod_memcg_state(from, MEMCG_SWAP, -1);
		mod_memcg_state(to, MEMCG_SWAP, 1);
3086 3087 3088 3089 3090 3091
		return 0;
	}
	return -EINVAL;
}
#else
static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
3092
				struct mem_cgroup *from, struct mem_cgroup *to)
3093 3094 3095
{
	return -EINVAL;
}
3096
#endif
K
KAMEZAWA Hiroyuki 已提交
3097

3098
static DEFINE_MUTEX(memcg_max_mutex);
3099

3100 3101
static int mem_cgroup_resize_max(struct mem_cgroup *memcg,
				 unsigned long max, bool memsw)
3102
{
3103
	bool enlarge = false;
3104
	bool drained = false;
3105
	int ret;
3106 3107
	bool limits_invariant;
	struct page_counter *counter = memsw ? &memcg->memsw : &memcg->memory;
3108

3109
	do {
3110 3111 3112 3113
		if (signal_pending(current)) {
			ret = -EINTR;
			break;
		}
3114

3115
		mutex_lock(&memcg_max_mutex);
3116 3117
		/*
		 * Make sure that the new limit (memsw or memory limit) doesn't
3118
		 * break our basic invariant rule memory.max <= memsw.max.
3119
		 */
3120
		limits_invariant = memsw ? max >= READ_ONCE(memcg->memory.max) :
3121
					   max <= memcg->memsw.max;
3122
		if (!limits_invariant) {
3123
			mutex_unlock(&memcg_max_mutex);
3124 3125 3126
			ret = -EINVAL;
			break;
		}
3127
		if (max > counter->max)
3128
			enlarge = true;
3129 3130
		ret = page_counter_set_max(counter, max);
		mutex_unlock(&memcg_max_mutex);
3131 3132 3133 3134

		if (!ret)
			break;

3135 3136 3137 3138 3139 3140
		if (!drained) {
			drain_all_stock(memcg);
			drained = true;
			continue;
		}

3141 3142 3143 3144 3145 3146
		if (!try_to_free_mem_cgroup_pages(memcg, 1,
					GFP_KERNEL, !memsw)) {
			ret = -EBUSY;
			break;
		}
	} while (true);
3147

3148 3149
	if (!ret && enlarge)
		memcg_oom_recover(memcg);
3150

3151 3152 3153
	return ret;
}

3154
unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
3155 3156 3157 3158
					    gfp_t gfp_mask,
					    unsigned long *total_scanned)
{
	unsigned long nr_reclaimed = 0;
3159
	struct mem_cgroup_per_node *mz, *next_mz = NULL;
3160 3161
	unsigned long reclaimed;
	int loop = 0;
3162
	struct mem_cgroup_tree_per_node *mctz;
3163
	unsigned long excess;
3164 3165 3166 3167 3168
	unsigned long nr_scanned;

	if (order > 0)
		return 0;

3169
	mctz = soft_limit_tree_node(pgdat->node_id);
3170 3171 3172 3173 3174 3175

	/*
	 * Do not even bother to check the largest node if the root
	 * is empty. Do it lockless to prevent lock bouncing. Races
	 * are acceptable as soft limit is best effort anyway.
	 */
3176
	if (!mctz || RB_EMPTY_ROOT(&mctz->rb_root))
3177 3178
		return 0;

3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192
	/*
	 * This loop can run a while, specially if mem_cgroup's continuously
	 * keep exceeding their soft limit and putting the system under
	 * pressure
	 */
	do {
		if (next_mz)
			mz = next_mz;
		else
			mz = mem_cgroup_largest_soft_limit_node(mctz);
		if (!mz)
			break;

		nr_scanned = 0;
3193
		reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat,
3194 3195 3196
						    gfp_mask, &nr_scanned);
		nr_reclaimed += reclaimed;
		*total_scanned += nr_scanned;
3197
		spin_lock_irq(&mctz->lock);
3198
		__mem_cgroup_remove_exceeded(mz, mctz);
3199 3200 3201 3202 3203 3204

		/*
		 * If we failed to reclaim anything from this memory cgroup
		 * it is time to move on to the next cgroup
		 */
		next_mz = NULL;
3205 3206 3207
		if (!reclaimed)
			next_mz = __mem_cgroup_largest_soft_limit_node(mctz);

3208
		excess = soft_limit_excess(mz->memcg);
3209 3210 3211 3212 3213 3214 3215 3216 3217
		/*
		 * One school of thought says that we should not add
		 * back the node to the tree if reclaim returns 0.
		 * But our reclaim could return 0, simply because due
		 * to priority we are exposing a smaller subset of
		 * memory to reclaim from. Consider this as a longer
		 * term TODO.
		 */
		/* If excess == 0, no tree ops */
3218
		__mem_cgroup_insert_exceeded(mz, mctz, excess);
3219
		spin_unlock_irq(&mctz->lock);
3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236
		css_put(&mz->memcg->css);
		loop++;
		/*
		 * Could not reclaim anything and there are no more
		 * mem cgroups to try or we seem to be looping without
		 * reclaiming anything.
		 */
		if (!nr_reclaimed &&
			(next_mz == NULL ||
			loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
			break;
	} while (!nr_reclaimed);
	if (next_mz)
		css_put(&next_mz->memcg->css);
	return nr_reclaimed;
}

3237 3238 3239 3240 3241 3242
/*
 * Test whether @memcg has children, dead or alive.  Note that this
 * function doesn't care whether @memcg has use_hierarchy enabled and
 * returns %true if there are child csses according to the cgroup
 * hierarchy.  Testing use_hierarchy is the caller's responsiblity.
 */
3243 3244
static inline bool memcg_has_children(struct mem_cgroup *memcg)
{
3245 3246 3247 3248 3249 3250
	bool ret;

	rcu_read_lock();
	ret = css_next_child(NULL, &memcg->css);
	rcu_read_unlock();
	return ret;
3251 3252
}

3253
/*
3254
 * Reclaims as many pages from the given memcg as possible.
3255 3256 3257 3258 3259 3260 3261
 *
 * Caller is responsible for holding css reference for memcg.
 */
static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
{
	int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;

3262 3263
	/* we call try-to-free pages for make this cgroup empty */
	lru_add_drain_all();
3264 3265 3266

	drain_all_stock(memcg);

3267
	/* try to free all pages in this cgroup */
3268
	while (nr_retries && page_counter_read(&memcg->memory)) {
3269
		int progress;
3270

3271 3272 3273
		if (signal_pending(current))
			return -EINTR;

3274 3275
		progress = try_to_free_mem_cgroup_pages(memcg, 1,
							GFP_KERNEL, true);
3276
		if (!progress) {
3277
			nr_retries--;
3278
			/* maybe some writeback is necessary */
3279
			congestion_wait(BLK_RW_ASYNC, HZ/10);
3280
		}
3281 3282

	}
3283 3284

	return 0;
3285 3286
}

3287 3288 3289
static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of,
					    char *buf, size_t nbytes,
					    loff_t off)
3290
{
3291
	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3292

3293 3294
	if (mem_cgroup_is_root(memcg))
		return -EINVAL;
3295
	return mem_cgroup_force_empty(memcg) ?: nbytes;
3296 3297
}

3298 3299
static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
				     struct cftype *cft)
3300
{
3301
	return mem_cgroup_from_css(css)->use_hierarchy;
3302 3303
}

3304 3305
static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
				      struct cftype *cft, u64 val)
3306 3307
{
	int retval = 0;
3308
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
T
Tejun Heo 已提交
3309
	struct mem_cgroup *parent_memcg = mem_cgroup_from_css(memcg->css.parent);
3310

3311
	if (memcg->use_hierarchy == val)
3312
		return 0;
3313

3314
	/*
3315
	 * If parent's use_hierarchy is set, we can't make any modifications
3316 3317 3318 3319 3320 3321
	 * in the child subtrees. If it is unset, then the change can
	 * occur, provided the current cgroup has no children.
	 *
	 * For the root cgroup, parent_mem is NULL, we allow value to be
	 * set if there are no children.
	 */
3322
	if ((!parent_memcg || !parent_memcg->use_hierarchy) &&
3323
				(val == 1 || val == 0)) {
3324
		if (!memcg_has_children(memcg))
3325
			memcg->use_hierarchy = val;
3326 3327 3328 3329
		else
			retval = -EBUSY;
	} else
		retval = -EINVAL;
3330

3331 3332 3333
	return retval;
}

3334
static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
3335
{
3336
	unsigned long val;
3337

3338
	if (mem_cgroup_is_root(memcg)) {
3339
		val = memcg_page_state(memcg, NR_FILE_PAGES) +
3340
			memcg_page_state(memcg, NR_ANON_MAPPED);
3341 3342
		if (swap)
			val += memcg_page_state(memcg, MEMCG_SWAP);
3343
	} else {
3344
		if (!swap)
3345
			val = page_counter_read(&memcg->memory);
3346
		else
3347
			val = page_counter_read(&memcg->memsw);
3348
	}
3349
	return val;
3350 3351
}

3352 3353 3354 3355 3356 3357 3358
enum {
	RES_USAGE,
	RES_LIMIT,
	RES_MAX_USAGE,
	RES_FAILCNT,
	RES_SOFT_LIMIT,
};
3359

3360
static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
3361
			       struct cftype *cft)
B
Balbir Singh 已提交
3362
{
3363
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3364
	struct page_counter *counter;
3365

3366
	switch (MEMFILE_TYPE(cft->private)) {
3367
	case _MEM:
3368 3369
		counter = &memcg->memory;
		break;
3370
	case _MEMSWAP:
3371 3372
		counter = &memcg->memsw;
		break;
3373
	case _KMEM:
3374
		counter = &memcg->kmem;
3375
		break;
V
Vladimir Davydov 已提交
3376
	case _TCP:
3377
		counter = &memcg->tcpmem;
V
Vladimir Davydov 已提交
3378
		break;
3379 3380 3381
	default:
		BUG();
	}
3382 3383 3384 3385

	switch (MEMFILE_ATTR(cft->private)) {
	case RES_USAGE:
		if (counter == &memcg->memory)
3386
			return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE;
3387
		if (counter == &memcg->memsw)
3388
			return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE;
3389 3390
		return (u64)page_counter_read(counter) * PAGE_SIZE;
	case RES_LIMIT:
3391
		return (u64)counter->max * PAGE_SIZE;
3392 3393 3394 3395 3396 3397 3398 3399 3400
	case RES_MAX_USAGE:
		return (u64)counter->watermark * PAGE_SIZE;
	case RES_FAILCNT:
		return counter->failcnt;
	case RES_SOFT_LIMIT:
		return (u64)memcg->soft_limit * PAGE_SIZE;
	default:
		BUG();
	}
B
Balbir Singh 已提交
3401
}
3402

3403
static void memcg_flush_percpu_vmstats(struct mem_cgroup *memcg)
3404
{
3405
	unsigned long stat[MEMCG_NR_STAT] = {0};
3406 3407 3408 3409
	struct mem_cgroup *mi;
	int node, cpu, i;

	for_each_online_cpu(cpu)
3410
		for (i = 0; i < MEMCG_NR_STAT; i++)
3411
			stat[i] += per_cpu(memcg->vmstats_percpu->stat[i], cpu);
3412 3413

	for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
3414
		for (i = 0; i < MEMCG_NR_STAT; i++)
3415 3416 3417 3418 3419 3420
			atomic_long_add(stat[i], &mi->vmstats[i]);

	for_each_node(node) {
		struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
		struct mem_cgroup_per_node *pi;

3421
		for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
3422 3423 3424
			stat[i] = 0;

		for_each_online_cpu(cpu)
3425
			for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
3426 3427
				stat[i] += per_cpu(
					pn->lruvec_stat_cpu->count[i], cpu);
3428 3429

		for (pi = pn; pi; pi = parent_nodeinfo(pi, node))
3430
			for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
3431 3432 3433 3434
				atomic_long_add(stat[i], &pi->lruvec_stat[i]);
	}
}

3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445
static void memcg_flush_percpu_vmevents(struct mem_cgroup *memcg)
{
	unsigned long events[NR_VM_EVENT_ITEMS];
	struct mem_cgroup *mi;
	int cpu, i;

	for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
		events[i] = 0;

	for_each_online_cpu(cpu)
		for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
3446 3447
			events[i] += per_cpu(memcg->vmstats_percpu->events[i],
					     cpu);
3448 3449 3450 3451 3452 3453

	for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
		for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
			atomic_long_add(events[i], &mi->vmevents[i]);
}

3454
#ifdef CONFIG_MEMCG_KMEM
3455
static int memcg_online_kmem(struct mem_cgroup *memcg)
3456 3457 3458
{
	int memcg_id;

3459 3460 3461
	if (cgroup_memory_nokmem)
		return 0;

3462
	BUG_ON(memcg->kmemcg_id >= 0);
3463
	BUG_ON(memcg->kmem_state);
3464

3465
	memcg_id = memcg_alloc_cache_id();
3466 3467
	if (memcg_id < 0)
		return memcg_id;
3468

3469
	static_branch_inc(&memcg_kmem_enabled_key);
3470
	/*
3471
	 * A memory cgroup is considered kmem-online as soon as it gets
V
Vladimir Davydov 已提交
3472
	 * kmemcg_id. Setting the id after enabling static branching will
3473 3474 3475
	 * guarantee no one starts accounting before all call sites are
	 * patched.
	 */
V
Vladimir Davydov 已提交
3476
	memcg->kmemcg_id = memcg_id;
3477
	memcg->kmem_state = KMEM_ONLINE;
3478
	INIT_LIST_HEAD(&memcg->kmem_caches);
3479 3480

	return 0;
3481 3482
}

3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502
static void memcg_offline_kmem(struct mem_cgroup *memcg)
{
	struct cgroup_subsys_state *css;
	struct mem_cgroup *parent, *child;
	int kmemcg_id;

	if (memcg->kmem_state != KMEM_ONLINE)
		return;
	/*
	 * Clear the online state before clearing memcg_caches array
	 * entries. The slab_mutex in memcg_deactivate_kmem_caches()
	 * guarantees that no cache will be created for this cgroup
	 * after we are done (see memcg_create_kmem_cache()).
	 */
	memcg->kmem_state = KMEM_ALLOCATED;

	parent = parent_mem_cgroup(memcg);
	if (!parent)
		parent = root_mem_cgroup;

3503
	/*
3504
	 * Deactivate and reparent kmem_caches.
3505
	 */
3506 3507 3508 3509 3510
	memcg_deactivate_kmem_caches(memcg, parent);

	kmemcg_id = memcg->kmemcg_id;
	BUG_ON(kmemcg_id < 0);

3511 3512 3513 3514 3515 3516 3517 3518
	/*
	 * Change kmemcg_id of this cgroup and all its descendants to the
	 * parent's id, and then move all entries from this cgroup's list_lrus
	 * to ones of the parent. After we have finished, all list_lrus
	 * corresponding to this cgroup are guaranteed to remain empty. The
	 * ordering is imposed by list_lru_node->lock taken by
	 * memcg_drain_all_list_lrus().
	 */
3519
	rcu_read_lock(); /* can be called from css_free w/o cgroup_mutex */
3520 3521 3522 3523 3524 3525 3526
	css_for_each_descendant_pre(css, &memcg->css) {
		child = mem_cgroup_from_css(css);
		BUG_ON(child->kmemcg_id != kmemcg_id);
		child->kmemcg_id = parent->kmemcg_id;
		if (!memcg->use_hierarchy)
			break;
	}
3527 3528
	rcu_read_unlock();

3529
	memcg_drain_all_list_lrus(kmemcg_id, parent);
3530 3531 3532 3533 3534 3535

	memcg_free_cache_id(kmemcg_id);
}

static void memcg_free_kmem(struct mem_cgroup *memcg)
{
3536 3537 3538 3539
	/* css_alloc() failed, offlining didn't happen */
	if (unlikely(memcg->kmem_state == KMEM_ONLINE))
		memcg_offline_kmem(memcg);

3540
	if (memcg->kmem_state == KMEM_ALLOCATED) {
3541
		WARN_ON(!list_empty(&memcg->kmem_caches));
3542 3543 3544
		static_branch_dec(&memcg_kmem_enabled_key);
	}
}
3545
#else
3546
static int memcg_online_kmem(struct mem_cgroup *memcg)
3547 3548 3549 3550 3551 3552 3553 3554 3555
{
	return 0;
}
static void memcg_offline_kmem(struct mem_cgroup *memcg)
{
}
static void memcg_free_kmem(struct mem_cgroup *memcg)
{
}
3556
#endif /* CONFIG_MEMCG_KMEM */
3557

3558 3559
static int memcg_update_kmem_max(struct mem_cgroup *memcg,
				 unsigned long max)
3560
{
3561
	int ret;
3562

3563 3564 3565
	mutex_lock(&memcg_max_mutex);
	ret = page_counter_set_max(&memcg->kmem, max);
	mutex_unlock(&memcg_max_mutex);
3566
	return ret;
3567
}
3568

3569
static int memcg_update_tcp_max(struct mem_cgroup *memcg, unsigned long max)
V
Vladimir Davydov 已提交
3570 3571 3572
{
	int ret;

3573
	mutex_lock(&memcg_max_mutex);
V
Vladimir Davydov 已提交
3574

3575
	ret = page_counter_set_max(&memcg->tcpmem, max);
V
Vladimir Davydov 已提交
3576 3577 3578
	if (ret)
		goto out;

3579
	if (!memcg->tcpmem_active) {
V
Vladimir Davydov 已提交
3580 3581 3582
		/*
		 * The active flag needs to be written after the static_key
		 * update. This is what guarantees that the socket activation
3583 3584 3585
		 * function is the last one to run. See mem_cgroup_sk_alloc()
		 * for details, and note that we don't mark any socket as
		 * belonging to this memcg until that flag is up.
V
Vladimir Davydov 已提交
3586 3587 3588 3589 3590 3591
		 *
		 * We need to do this, because static_keys will span multiple
		 * sites, but we can't control their order. If we mark a socket
		 * as accounted, but the accounting functions are not patched in
		 * yet, we'll lose accounting.
		 *
3592
		 * We never race with the readers in mem_cgroup_sk_alloc(),
V
Vladimir Davydov 已提交
3593 3594 3595 3596
		 * because when this value change, the code to process it is not
		 * patched in yet.
		 */
		static_branch_inc(&memcg_sockets_enabled_key);
3597
		memcg->tcpmem_active = true;
V
Vladimir Davydov 已提交
3598 3599
	}
out:
3600
	mutex_unlock(&memcg_max_mutex);
V
Vladimir Davydov 已提交
3601 3602 3603
	return ret;
}

3604 3605 3606 3607
/*
 * The user of this function is...
 * RES_LIMIT.
 */
3608 3609
static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
				char *buf, size_t nbytes, loff_t off)
B
Balbir Singh 已提交
3610
{
3611
	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3612
	unsigned long nr_pages;
3613 3614
	int ret;

3615
	buf = strstrip(buf);
3616
	ret = page_counter_memparse(buf, "-1", &nr_pages);
3617 3618
	if (ret)
		return ret;
3619

3620
	switch (MEMFILE_ATTR(of_cft(of)->private)) {
3621
	case RES_LIMIT:
3622 3623 3624 3625
		if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
			ret = -EINVAL;
			break;
		}
3626 3627
		switch (MEMFILE_TYPE(of_cft(of)->private)) {
		case _MEM:
3628
			ret = mem_cgroup_resize_max(memcg, nr_pages, false);
3629
			break;
3630
		case _MEMSWAP:
3631
			ret = mem_cgroup_resize_max(memcg, nr_pages, true);
3632
			break;
3633
		case _KMEM:
3634 3635 3636
			pr_warn_once("kmem.limit_in_bytes is deprecated and will be removed. "
				     "Please report your usecase to linux-mm@kvack.org if you "
				     "depend on this functionality.\n");
3637
			ret = memcg_update_kmem_max(memcg, nr_pages);
3638
			break;
V
Vladimir Davydov 已提交
3639
		case _TCP:
3640
			ret = memcg_update_tcp_max(memcg, nr_pages);
V
Vladimir Davydov 已提交
3641
			break;
3642
		}
3643
		break;
3644 3645 3646
	case RES_SOFT_LIMIT:
		memcg->soft_limit = nr_pages;
		ret = 0;
3647 3648
		break;
	}
3649
	return ret ?: nbytes;
B
Balbir Singh 已提交
3650 3651
}

3652 3653
static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf,
				size_t nbytes, loff_t off)
3654
{
3655
	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3656
	struct page_counter *counter;
3657

3658 3659 3660 3661 3662 3663 3664 3665 3666 3667
	switch (MEMFILE_TYPE(of_cft(of)->private)) {
	case _MEM:
		counter = &memcg->memory;
		break;
	case _MEMSWAP:
		counter = &memcg->memsw;
		break;
	case _KMEM:
		counter = &memcg->kmem;
		break;
V
Vladimir Davydov 已提交
3668
	case _TCP:
3669
		counter = &memcg->tcpmem;
V
Vladimir Davydov 已提交
3670
		break;
3671 3672 3673
	default:
		BUG();
	}
3674

3675
	switch (MEMFILE_ATTR(of_cft(of)->private)) {
3676
	case RES_MAX_USAGE:
3677
		page_counter_reset_watermark(counter);
3678 3679
		break;
	case RES_FAILCNT:
3680
		counter->failcnt = 0;
3681
		break;
3682 3683
	default:
		BUG();
3684
	}
3685

3686
	return nbytes;
3687 3688
}

3689
static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
3690 3691
					struct cftype *cft)
{
3692
	return mem_cgroup_from_css(css)->move_charge_at_immigrate;
3693 3694
}

3695
#ifdef CONFIG_MMU
3696
static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3697 3698
					struct cftype *cft, u64 val)
{
3699
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3700

3701
	if (val & ~MOVE_MASK)
3702
		return -EINVAL;
3703

3704
	/*
3705 3706 3707 3708
	 * No kind of locking is needed in here, because ->can_attach() will
	 * check this value once in the beginning of the process, and then carry
	 * on with stale data. This means that changes to this value will only
	 * affect task migrations starting after the change.
3709
	 */
3710
	memcg->move_charge_at_immigrate = val;
3711 3712
	return 0;
}
3713
#else
3714
static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3715 3716 3717 3718 3719
					struct cftype *cft, u64 val)
{
	return -ENOSYS;
}
#endif
3720

3721
#ifdef CONFIG_NUMA
3722 3723 3724 3725 3726 3727

#define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE))
#define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
#define LRU_ALL	     ((1 << NR_LRU_LISTS) - 1)

static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
3728
				int nid, unsigned int lru_mask, bool tree)
3729
{
3730
	struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
3731 3732 3733 3734 3735 3736 3737 3738
	unsigned long nr = 0;
	enum lru_list lru;

	VM_BUG_ON((unsigned)nid >= nr_node_ids);

	for_each_lru(lru) {
		if (!(BIT(lru) & lru_mask))
			continue;
3739 3740 3741 3742
		if (tree)
			nr += lruvec_page_state(lruvec, NR_LRU_BASE + lru);
		else
			nr += lruvec_page_state_local(lruvec, NR_LRU_BASE + lru);
3743 3744 3745 3746 3747
	}
	return nr;
}

static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
3748 3749
					     unsigned int lru_mask,
					     bool tree)
3750 3751 3752 3753 3754 3755 3756
{
	unsigned long nr = 0;
	enum lru_list lru;

	for_each_lru(lru) {
		if (!(BIT(lru) & lru_mask))
			continue;
3757 3758 3759 3760
		if (tree)
			nr += memcg_page_state(memcg, NR_LRU_BASE + lru);
		else
			nr += memcg_page_state_local(memcg, NR_LRU_BASE + lru);
3761 3762 3763 3764
	}
	return nr;
}

3765
static int memcg_numa_stat_show(struct seq_file *m, void *v)
3766
{
3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778
	struct numa_stat {
		const char *name;
		unsigned int lru_mask;
	};

	static const struct numa_stat stats[] = {
		{ "total", LRU_ALL },
		{ "file", LRU_ALL_FILE },
		{ "anon", LRU_ALL_ANON },
		{ "unevictable", BIT(LRU_UNEVICTABLE) },
	};
	const struct numa_stat *stat;
3779
	int nid;
3780
	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
3781

3782
	for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3783 3784 3785 3786 3787 3788 3789
		seq_printf(m, "%s=%lu", stat->name,
			   mem_cgroup_nr_lru_pages(memcg, stat->lru_mask,
						   false));
		for_each_node_state(nid, N_MEMORY)
			seq_printf(m, " N%d=%lu", nid,
				   mem_cgroup_node_nr_lru_pages(memcg, nid,
							stat->lru_mask, false));
3790
		seq_putc(m, '\n');
3791 3792
	}

3793
	for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3794 3795 3796 3797 3798 3799 3800 3801

		seq_printf(m, "hierarchical_%s=%lu", stat->name,
			   mem_cgroup_nr_lru_pages(memcg, stat->lru_mask,
						   true));
		for_each_node_state(nid, N_MEMORY)
			seq_printf(m, " N%d=%lu", nid,
				   mem_cgroup_node_nr_lru_pages(memcg, nid,
							stat->lru_mask, true));
3802
		seq_putc(m, '\n');
3803 3804 3805 3806 3807 3808
	}

	return 0;
}
#endif /* CONFIG_NUMA */

3809
static const unsigned int memcg1_stats[] = {
3810
	NR_FILE_PAGES,
3811
	NR_ANON_MAPPED,
3812 3813 3814
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
	NR_ANON_THPS,
#endif
3815 3816 3817 3818 3819 3820 3821 3822 3823 3824
	NR_SHMEM,
	NR_FILE_MAPPED,
	NR_FILE_DIRTY,
	NR_WRITEBACK,
	MEMCG_SWAP,
};

static const char *const memcg1_stat_names[] = {
	"cache",
	"rss",
3825
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
3826
	"rss_huge",
3827
#endif
3828 3829 3830 3831 3832 3833 3834
	"shmem",
	"mapped_file",
	"dirty",
	"writeback",
	"swap",
};

3835
/* Universal VM events cgroup1 shows, original sort order */
3836
static const unsigned int memcg1_events[] = {
3837 3838 3839 3840 3841 3842
	PGPGIN,
	PGPGOUT,
	PGFAULT,
	PGMAJFAULT,
};

3843
static int memcg_stat_show(struct seq_file *m, void *v)
3844
{
3845
	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
3846
	unsigned long memory, memsw;
3847 3848
	struct mem_cgroup *mi;
	unsigned int i;
3849

3850
	BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats));
3851

3852
	for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
3853 3854
		unsigned long nr;

3855
		if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
3856
			continue;
3857 3858 3859 3860 3861 3862
		nr = memcg_page_state_local(memcg, memcg1_stats[i]);
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
		if (memcg1_stats[i] == NR_ANON_THPS)
			nr *= HPAGE_PMD_NR;
#endif
		seq_printf(m, "%s %lu\n", memcg1_stat_names[i], nr * PAGE_SIZE);
3863
	}
L
Lee Schermerhorn 已提交
3864

3865
	for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
3866
		seq_printf(m, "%s %lu\n", vm_event_name(memcg1_events[i]),
3867
			   memcg_events_local(memcg, memcg1_events[i]));
3868 3869

	for (i = 0; i < NR_LRU_LISTS; i++)
3870
		seq_printf(m, "%s %lu\n", lru_list_name(i),
3871
			   memcg_page_state_local(memcg, NR_LRU_BASE + i) *
3872
			   PAGE_SIZE);
3873

K
KAMEZAWA Hiroyuki 已提交
3874
	/* Hierarchical information */
3875 3876
	memory = memsw = PAGE_COUNTER_MAX;
	for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) {
3877 3878
		memory = min(memory, READ_ONCE(mi->memory.max));
		memsw = min(memsw, READ_ONCE(mi->memsw.max));
3879
	}
3880 3881
	seq_printf(m, "hierarchical_memory_limit %llu\n",
		   (u64)memory * PAGE_SIZE);
3882
	if (do_memsw_account())
3883 3884
		seq_printf(m, "hierarchical_memsw_limit %llu\n",
			   (u64)memsw * PAGE_SIZE);
K
KOSAKI Motohiro 已提交
3885

3886
	for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
3887
		if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
3888
			continue;
3889
		seq_printf(m, "total_%s %llu\n", memcg1_stat_names[i],
3890 3891
			   (u64)memcg_page_state(memcg, memcg1_stats[i]) *
			   PAGE_SIZE);
3892 3893
	}

3894
	for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
3895 3896
		seq_printf(m, "total_%s %llu\n",
			   vm_event_name(memcg1_events[i]),
3897
			   (u64)memcg_events(memcg, memcg1_events[i]));
3898

3899
	for (i = 0; i < NR_LRU_LISTS; i++)
3900
		seq_printf(m, "total_%s %llu\n", lru_list_name(i),
3901 3902
			   (u64)memcg_page_state(memcg, NR_LRU_BASE + i) *
			   PAGE_SIZE);
K
KAMEZAWA Hiroyuki 已提交
3903

K
KOSAKI Motohiro 已提交
3904 3905
#ifdef CONFIG_DEBUG_VM
	{
3906 3907
		pg_data_t *pgdat;
		struct mem_cgroup_per_node *mz;
3908
		struct zone_reclaim_stat *rstat;
K
KOSAKI Motohiro 已提交
3909 3910 3911
		unsigned long recent_rotated[2] = {0, 0};
		unsigned long recent_scanned[2] = {0, 0};

3912 3913 3914
		for_each_online_pgdat(pgdat) {
			mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
			rstat = &mz->lruvec.reclaim_stat;
K
KOSAKI Motohiro 已提交
3915

3916 3917 3918 3919 3920
			recent_rotated[0] += rstat->recent_rotated[0];
			recent_rotated[1] += rstat->recent_rotated[1];
			recent_scanned[0] += rstat->recent_scanned[0];
			recent_scanned[1] += rstat->recent_scanned[1];
		}
3921 3922 3923 3924
		seq_printf(m, "recent_rotated_anon %lu\n", recent_rotated[0]);
		seq_printf(m, "recent_rotated_file %lu\n", recent_rotated[1]);
		seq_printf(m, "recent_scanned_anon %lu\n", recent_scanned[0]);
		seq_printf(m, "recent_scanned_file %lu\n", recent_scanned[1]);
K
KOSAKI Motohiro 已提交
3925 3926 3927
	}
#endif

3928 3929 3930
	return 0;
}

3931 3932
static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
				      struct cftype *cft)
K
KOSAKI Motohiro 已提交
3933
{
3934
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
K
KOSAKI Motohiro 已提交
3935

3936
	return mem_cgroup_swappiness(memcg);
K
KOSAKI Motohiro 已提交
3937 3938
}

3939 3940
static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
				       struct cftype *cft, u64 val)
K
KOSAKI Motohiro 已提交
3941
{
3942
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
K
KOSAKI Motohiro 已提交
3943

3944
	if (val > 100)
K
KOSAKI Motohiro 已提交
3945 3946
		return -EINVAL;

3947
	if (css->parent)
3948 3949 3950
		memcg->swappiness = val;
	else
		vm_swappiness = val;
3951

K
KOSAKI Motohiro 已提交
3952 3953 3954
	return 0;
}

3955 3956 3957
static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
{
	struct mem_cgroup_threshold_ary *t;
3958
	unsigned long usage;
3959 3960 3961 3962
	int i;

	rcu_read_lock();
	if (!swap)
3963
		t = rcu_dereference(memcg->thresholds.primary);
3964
	else
3965
		t = rcu_dereference(memcg->memsw_thresholds.primary);
3966 3967 3968 3969

	if (!t)
		goto unlock;

3970
	usage = mem_cgroup_usage(memcg, swap);
3971 3972

	/*
3973
	 * current_threshold points to threshold just below or equal to usage.
3974 3975 3976
	 * If it's not true, a threshold was crossed after last
	 * call of __mem_cgroup_threshold().
	 */
3977
	i = t->current_threshold;
3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000

	/*
	 * Iterate backward over array of thresholds starting from
	 * current_threshold and check if a threshold is crossed.
	 * If none of thresholds below usage is crossed, we read
	 * only one element of the array here.
	 */
	for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
		eventfd_signal(t->entries[i].eventfd, 1);

	/* i = current_threshold + 1 */
	i++;

	/*
	 * Iterate forward over array of thresholds starting from
	 * current_threshold+1 and check if a threshold is crossed.
	 * If none of thresholds above usage is crossed, we read
	 * only one element of the array here.
	 */
	for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
		eventfd_signal(t->entries[i].eventfd, 1);

	/* Update current_threshold */
4001
	t->current_threshold = i - 1;
4002 4003 4004 4005 4006 4007
unlock:
	rcu_read_unlock();
}

static void mem_cgroup_threshold(struct mem_cgroup *memcg)
{
4008 4009
	while (memcg) {
		__mem_cgroup_threshold(memcg, false);
4010
		if (do_memsw_account())
4011 4012 4013 4014
			__mem_cgroup_threshold(memcg, true);

		memcg = parent_mem_cgroup(memcg);
	}
4015 4016 4017 4018 4019 4020 4021
}

static int compare_thresholds(const void *a, const void *b)
{
	const struct mem_cgroup_threshold *_a = a;
	const struct mem_cgroup_threshold *_b = b;

4022 4023 4024 4025 4026 4027 4028
	if (_a->threshold > _b->threshold)
		return 1;

	if (_a->threshold < _b->threshold)
		return -1;

	return 0;
4029 4030
}

4031
static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
K
KAMEZAWA Hiroyuki 已提交
4032 4033 4034
{
	struct mem_cgroup_eventfd_list *ev;

4035 4036
	spin_lock(&memcg_oom_lock);

4037
	list_for_each_entry(ev, &memcg->oom_notify, list)
K
KAMEZAWA Hiroyuki 已提交
4038
		eventfd_signal(ev->eventfd, 1);
4039 4040

	spin_unlock(&memcg_oom_lock);
K
KAMEZAWA Hiroyuki 已提交
4041 4042 4043
	return 0;
}

4044
static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
K
KAMEZAWA Hiroyuki 已提交
4045
{
K
KAMEZAWA Hiroyuki 已提交
4046 4047
	struct mem_cgroup *iter;

4048
	for_each_mem_cgroup_tree(iter, memcg)
K
KAMEZAWA Hiroyuki 已提交
4049
		mem_cgroup_oom_notify_cb(iter);
K
KAMEZAWA Hiroyuki 已提交
4050 4051
}

4052
static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
T
Tejun Heo 已提交
4053
	struct eventfd_ctx *eventfd, const char *args, enum res_type type)
4054
{
4055 4056
	struct mem_cgroup_thresholds *thresholds;
	struct mem_cgroup_threshold_ary *new;
4057 4058
	unsigned long threshold;
	unsigned long usage;
4059
	int i, size, ret;
4060

4061
	ret = page_counter_memparse(args, "-1", &threshold);
4062 4063 4064 4065
	if (ret)
		return ret;

	mutex_lock(&memcg->thresholds_lock);
4066

4067
	if (type == _MEM) {
4068
		thresholds = &memcg->thresholds;
4069
		usage = mem_cgroup_usage(memcg, false);
4070
	} else if (type == _MEMSWAP) {
4071
		thresholds = &memcg->memsw_thresholds;
4072
		usage = mem_cgroup_usage(memcg, true);
4073
	} else
4074 4075 4076
		BUG();

	/* Check if a threshold crossed before adding a new one */
4077
	if (thresholds->primary)
4078 4079
		__mem_cgroup_threshold(memcg, type == _MEMSWAP);

4080
	size = thresholds->primary ? thresholds->primary->size + 1 : 1;
4081 4082

	/* Allocate memory for new array of thresholds */
4083
	new = kmalloc(struct_size(new, entries, size), GFP_KERNEL);
4084
	if (!new) {
4085 4086 4087
		ret = -ENOMEM;
		goto unlock;
	}
4088
	new->size = size;
4089 4090

	/* Copy thresholds (if any) to new array */
4091 4092
	if (thresholds->primary) {
		memcpy(new->entries, thresholds->primary->entries, (size - 1) *
4093
				sizeof(struct mem_cgroup_threshold));
4094 4095
	}

4096
	/* Add new threshold */
4097 4098
	new->entries[size - 1].eventfd = eventfd;
	new->entries[size - 1].threshold = threshold;
4099 4100

	/* Sort thresholds. Registering of new threshold isn't time-critical */
4101
	sort(new->entries, size, sizeof(struct mem_cgroup_threshold),
4102 4103 4104
			compare_thresholds, NULL);

	/* Find current threshold */
4105
	new->current_threshold = -1;
4106
	for (i = 0; i < size; i++) {
4107
		if (new->entries[i].threshold <= usage) {
4108
			/*
4109 4110
			 * new->current_threshold will not be used until
			 * rcu_assign_pointer(), so it's safe to increment
4111 4112
			 * it here.
			 */
4113
			++new->current_threshold;
4114 4115
		} else
			break;
4116 4117
	}

4118 4119 4120 4121 4122
	/* Free old spare buffer and save old primary buffer as spare */
	kfree(thresholds->spare);
	thresholds->spare = thresholds->primary;

	rcu_assign_pointer(thresholds->primary, new);
4123

4124
	/* To be sure that nobody uses thresholds */
4125 4126 4127 4128 4129 4130 4131 4132
	synchronize_rcu();

unlock:
	mutex_unlock(&memcg->thresholds_lock);

	return ret;
}

4133
static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
T
Tejun Heo 已提交
4134 4135
	struct eventfd_ctx *eventfd, const char *args)
{
4136
	return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM);
T
Tejun Heo 已提交
4137 4138
}

4139
static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg,
T
Tejun Heo 已提交
4140 4141
	struct eventfd_ctx *eventfd, const char *args)
{
4142
	return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP);
T
Tejun Heo 已提交
4143 4144
}

4145
static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
T
Tejun Heo 已提交
4146
	struct eventfd_ctx *eventfd, enum res_type type)
4147
{
4148 4149
	struct mem_cgroup_thresholds *thresholds;
	struct mem_cgroup_threshold_ary *new;
4150
	unsigned long usage;
4151
	int i, j, size, entries;
4152 4153

	mutex_lock(&memcg->thresholds_lock);
4154 4155

	if (type == _MEM) {
4156
		thresholds = &memcg->thresholds;
4157
		usage = mem_cgroup_usage(memcg, false);
4158
	} else if (type == _MEMSWAP) {
4159
		thresholds = &memcg->memsw_thresholds;
4160
		usage = mem_cgroup_usage(memcg, true);
4161
	} else
4162 4163
		BUG();

4164 4165 4166
	if (!thresholds->primary)
		goto unlock;

4167 4168 4169 4170
	/* Check if a threshold crossed before removing */
	__mem_cgroup_threshold(memcg, type == _MEMSWAP);

	/* Calculate new number of threshold */
4171
	size = entries = 0;
4172 4173
	for (i = 0; i < thresholds->primary->size; i++) {
		if (thresholds->primary->entries[i].eventfd != eventfd)
4174
			size++;
4175 4176
		else
			entries++;
4177 4178
	}

4179
	new = thresholds->spare;
4180

4181 4182 4183 4184
	/* If no items related to eventfd have been cleared, nothing to do */
	if (!entries)
		goto unlock;

4185 4186
	/* Set thresholds array to NULL if we don't have thresholds */
	if (!size) {
4187 4188
		kfree(new);
		new = NULL;
4189
		goto swap_buffers;
4190 4191
	}

4192
	new->size = size;
4193 4194

	/* Copy thresholds and find current threshold */
4195 4196 4197
	new->current_threshold = -1;
	for (i = 0, j = 0; i < thresholds->primary->size; i++) {
		if (thresholds->primary->entries[i].eventfd == eventfd)
4198 4199
			continue;

4200
		new->entries[j] = thresholds->primary->entries[i];
4201
		if (new->entries[j].threshold <= usage) {
4202
			/*
4203
			 * new->current_threshold will not be used
4204 4205 4206
			 * until rcu_assign_pointer(), so it's safe to increment
			 * it here.
			 */
4207
			++new->current_threshold;
4208 4209 4210 4211
		}
		j++;
	}

4212
swap_buffers:
4213 4214
	/* Swap primary and spare array */
	thresholds->spare = thresholds->primary;
4215

4216
	rcu_assign_pointer(thresholds->primary, new);
4217

4218
	/* To be sure that nobody uses thresholds */
4219
	synchronize_rcu();
4220 4221 4222 4223 4224 4225

	/* If all events are unregistered, free the spare array */
	if (!new) {
		kfree(thresholds->spare);
		thresholds->spare = NULL;
	}
4226
unlock:
4227 4228
	mutex_unlock(&memcg->thresholds_lock);
}
4229

4230
static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
T
Tejun Heo 已提交
4231 4232
	struct eventfd_ctx *eventfd)
{
4233
	return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM);
T
Tejun Heo 已提交
4234 4235
}

4236
static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
T
Tejun Heo 已提交
4237 4238
	struct eventfd_ctx *eventfd)
{
4239
	return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP);
T
Tejun Heo 已提交
4240 4241
}

4242
static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
T
Tejun Heo 已提交
4243
	struct eventfd_ctx *eventfd, const char *args)
K
KAMEZAWA Hiroyuki 已提交
4244 4245 4246 4247 4248 4249 4250
{
	struct mem_cgroup_eventfd_list *event;

	event = kmalloc(sizeof(*event),	GFP_KERNEL);
	if (!event)
		return -ENOMEM;

4251
	spin_lock(&memcg_oom_lock);
K
KAMEZAWA Hiroyuki 已提交
4252 4253 4254 4255 4256

	event->eventfd = eventfd;
	list_add(&event->list, &memcg->oom_notify);

	/* already in OOM ? */
4257
	if (memcg->under_oom)
K
KAMEZAWA Hiroyuki 已提交
4258
		eventfd_signal(eventfd, 1);
4259
	spin_unlock(&memcg_oom_lock);
K
KAMEZAWA Hiroyuki 已提交
4260 4261 4262 4263

	return 0;
}

4264
static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg,
T
Tejun Heo 已提交
4265
	struct eventfd_ctx *eventfd)
K
KAMEZAWA Hiroyuki 已提交
4266 4267 4268
{
	struct mem_cgroup_eventfd_list *ev, *tmp;

4269
	spin_lock(&memcg_oom_lock);
K
KAMEZAWA Hiroyuki 已提交
4270

4271
	list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
K
KAMEZAWA Hiroyuki 已提交
4272 4273 4274 4275 4276 4277
		if (ev->eventfd == eventfd) {
			list_del(&ev->list);
			kfree(ev);
		}
	}

4278
	spin_unlock(&memcg_oom_lock);
K
KAMEZAWA Hiroyuki 已提交
4279 4280
}

4281
static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
4282
{
4283
	struct mem_cgroup *memcg = mem_cgroup_from_seq(sf);
4284

4285
	seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable);
4286
	seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom);
R
Roman Gushchin 已提交
4287 4288
	seq_printf(sf, "oom_kill %lu\n",
		   atomic_long_read(&memcg->memory_events[MEMCG_OOM_KILL]));
4289 4290 4291
	return 0;
}

4292
static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
4293 4294
	struct cftype *cft, u64 val)
{
4295
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4296 4297

	/* cannot set to root cgroup and only 0 and 1 are allowed */
4298
	if (!css->parent || !((val == 0) || (val == 1)))
4299 4300
		return -EINVAL;

4301
	memcg->oom_kill_disable = val;
4302
	if (!val)
4303
		memcg_oom_recover(memcg);
4304

4305 4306 4307
	return 0;
}

4308 4309
#ifdef CONFIG_CGROUP_WRITEBACK

4310 4311
#include <trace/events/writeback.h>

T
Tejun Heo 已提交
4312 4313 4314 4315 4316 4317 4318 4319 4320 4321
static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
{
	return wb_domain_init(&memcg->cgwb_domain, gfp);
}

static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
{
	wb_domain_exit(&memcg->cgwb_domain);
}

4322 4323 4324 4325 4326
static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
{
	wb_domain_size_changed(&memcg->cgwb_domain);
}

T
Tejun Heo 已提交
4327 4328 4329 4330 4331 4332 4333 4334 4335 4336
struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);

	if (!memcg->css.parent)
		return NULL;

	return &memcg->cgwb_domain;
}

4337 4338 4339 4340 4341 4342
/*
 * idx can be of type enum memcg_stat_item or node_stat_item.
 * Keep in sync with memcg_exact_page().
 */
static unsigned long memcg_exact_page_state(struct mem_cgroup *memcg, int idx)
{
4343
	long x = atomic_long_read(&memcg->vmstats[idx]);
4344 4345 4346
	int cpu;

	for_each_online_cpu(cpu)
4347
		x += per_cpu_ptr(memcg->vmstats_percpu, cpu)->stat[idx];
4348 4349 4350 4351 4352
	if (x < 0)
		x = 0;
	return x;
}

4353 4354 4355
/**
 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
 * @wb: bdi_writeback in question
4356 4357
 * @pfilepages: out parameter for number of file pages
 * @pheadroom: out parameter for number of allocatable pages according to memcg
4358 4359 4360
 * @pdirty: out parameter for number of dirty pages
 * @pwriteback: out parameter for number of pages under writeback
 *
4361 4362 4363
 * Determine the numbers of file, headroom, dirty, and writeback pages in
 * @wb's memcg.  File, dirty and writeback are self-explanatory.  Headroom
 * is a bit more involved.
4364
 *
4365 4366 4367 4368 4369
 * A memcg's headroom is "min(max, high) - used".  In the hierarchy, the
 * headroom is calculated as the lowest headroom of itself and the
 * ancestors.  Note that this doesn't consider the actual amount of
 * available memory in the system.  The caller should further cap
 * *@pheadroom accordingly.
4370
 */
4371 4372 4373
void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
			 unsigned long *pheadroom, unsigned long *pdirty,
			 unsigned long *pwriteback)
4374 4375 4376 4377
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
	struct mem_cgroup *parent;

4378
	*pdirty = memcg_exact_page_state(memcg, NR_FILE_DIRTY);
4379

4380
	*pwriteback = memcg_exact_page_state(memcg, NR_WRITEBACK);
4381 4382
	*pfilepages = memcg_exact_page_state(memcg, NR_INACTIVE_FILE) +
			memcg_exact_page_state(memcg, NR_ACTIVE_FILE);
4383
	*pheadroom = PAGE_COUNTER_MAX;
4384 4385

	while ((parent = parent_mem_cgroup(memcg))) {
4386
		unsigned long ceiling = min(READ_ONCE(memcg->memory.max),
4387
					    READ_ONCE(memcg->memory.high));
4388 4389
		unsigned long used = page_counter_read(&memcg->memory);

4390
		*pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
4391 4392 4393 4394
		memcg = parent;
	}
}

4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448
/*
 * Foreign dirty flushing
 *
 * There's an inherent mismatch between memcg and writeback.  The former
 * trackes ownership per-page while the latter per-inode.  This was a
 * deliberate design decision because honoring per-page ownership in the
 * writeback path is complicated, may lead to higher CPU and IO overheads
 * and deemed unnecessary given that write-sharing an inode across
 * different cgroups isn't a common use-case.
 *
 * Combined with inode majority-writer ownership switching, this works well
 * enough in most cases but there are some pathological cases.  For
 * example, let's say there are two cgroups A and B which keep writing to
 * different but confined parts of the same inode.  B owns the inode and
 * A's memory is limited far below B's.  A's dirty ratio can rise enough to
 * trigger balance_dirty_pages() sleeps but B's can be low enough to avoid
 * triggering background writeback.  A will be slowed down without a way to
 * make writeback of the dirty pages happen.
 *
 * Conditions like the above can lead to a cgroup getting repatedly and
 * severely throttled after making some progress after each
 * dirty_expire_interval while the underyling IO device is almost
 * completely idle.
 *
 * Solving this problem completely requires matching the ownership tracking
 * granularities between memcg and writeback in either direction.  However,
 * the more egregious behaviors can be avoided by simply remembering the
 * most recent foreign dirtying events and initiating remote flushes on
 * them when local writeback isn't enough to keep the memory clean enough.
 *
 * The following two functions implement such mechanism.  When a foreign
 * page - a page whose memcg and writeback ownerships don't match - is
 * dirtied, mem_cgroup_track_foreign_dirty() records the inode owning
 * bdi_writeback on the page owning memcg.  When balance_dirty_pages()
 * decides that the memcg needs to sleep due to high dirty ratio, it calls
 * mem_cgroup_flush_foreign() which queues writeback on the recorded
 * foreign bdi_writebacks which haven't expired.  Both the numbers of
 * recorded bdi_writebacks and concurrent in-flight foreign writebacks are
 * limited to MEMCG_CGWB_FRN_CNT.
 *
 * The mechanism only remembers IDs and doesn't hold any object references.
 * As being wrong occasionally doesn't matter, updates and accesses to the
 * records are lockless and racy.
 */
void mem_cgroup_track_foreign_dirty_slowpath(struct page *page,
					     struct bdi_writeback *wb)
{
	struct mem_cgroup *memcg = page->mem_cgroup;
	struct memcg_cgwb_frn *frn;
	u64 now = get_jiffies_64();
	u64 oldest_at = now;
	int oldest = -1;
	int i;

4449 4450
	trace_track_foreign_dirty(page, wb);

4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510
	/*
	 * Pick the slot to use.  If there is already a slot for @wb, keep
	 * using it.  If not replace the oldest one which isn't being
	 * written out.
	 */
	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
		frn = &memcg->cgwb_frn[i];
		if (frn->bdi_id == wb->bdi->id &&
		    frn->memcg_id == wb->memcg_css->id)
			break;
		if (time_before64(frn->at, oldest_at) &&
		    atomic_read(&frn->done.cnt) == 1) {
			oldest = i;
			oldest_at = frn->at;
		}
	}

	if (i < MEMCG_CGWB_FRN_CNT) {
		/*
		 * Re-using an existing one.  Update timestamp lazily to
		 * avoid making the cacheline hot.  We want them to be
		 * reasonably up-to-date and significantly shorter than
		 * dirty_expire_interval as that's what expires the record.
		 * Use the shorter of 1s and dirty_expire_interval / 8.
		 */
		unsigned long update_intv =
			min_t(unsigned long, HZ,
			      msecs_to_jiffies(dirty_expire_interval * 10) / 8);

		if (time_before64(frn->at, now - update_intv))
			frn->at = now;
	} else if (oldest >= 0) {
		/* replace the oldest free one */
		frn = &memcg->cgwb_frn[oldest];
		frn->bdi_id = wb->bdi->id;
		frn->memcg_id = wb->memcg_css->id;
		frn->at = now;
	}
}

/* issue foreign writeback flushes for recorded foreign dirtying events */
void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
	unsigned long intv = msecs_to_jiffies(dirty_expire_interval * 10);
	u64 now = jiffies_64;
	int i;

	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
		struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i];

		/*
		 * If the record is older than dirty_expire_interval,
		 * writeback on it has already started.  No need to kick it
		 * off again.  Also, don't start a new one if there's
		 * already one in flight.
		 */
		if (time_after64(frn->at, now - intv) &&
		    atomic_read(&frn->done.cnt) == 1) {
			frn->at = 0;
4511
			trace_flush_foreign(wb, frn->bdi_id, frn->memcg_id);
4512 4513 4514 4515 4516 4517 4518
			cgroup_writeback_by_id(frn->bdi_id, frn->memcg_id, 0,
					       WB_REASON_FOREIGN_FLUSH,
					       &frn->done);
		}
	}
}

T
Tejun Heo 已提交
4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529
#else	/* CONFIG_CGROUP_WRITEBACK */

static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
{
	return 0;
}

static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
{
}

4530 4531 4532 4533
static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
{
}

4534 4535
#endif	/* CONFIG_CGROUP_WRITEBACK */

4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548
/*
 * DO NOT USE IN NEW FILES.
 *
 * "cgroup.event_control" implementation.
 *
 * This is way over-engineered.  It tries to support fully configurable
 * events for each user.  Such level of flexibility is completely
 * unnecessary especially in the light of the planned unified hierarchy.
 *
 * Please deprecate this and replace with something simpler if at all
 * possible.
 */

4549 4550 4551 4552 4553
/*
 * Unregister event and free resources.
 *
 * Gets called from workqueue.
 */
4554
static void memcg_event_remove(struct work_struct *work)
4555
{
4556 4557
	struct mem_cgroup_event *event =
		container_of(work, struct mem_cgroup_event, remove);
4558
	struct mem_cgroup *memcg = event->memcg;
4559 4560 4561

	remove_wait_queue(event->wqh, &event->wait);

4562
	event->unregister_event(memcg, event->eventfd);
4563 4564 4565 4566 4567 4568

	/* Notify userspace the event is going away. */
	eventfd_signal(event->eventfd, 1);

	eventfd_ctx_put(event->eventfd);
	kfree(event);
4569
	css_put(&memcg->css);
4570 4571 4572
}

/*
4573
 * Gets called on EPOLLHUP on eventfd when user closes it.
4574 4575 4576
 *
 * Called with wqh->lock held and interrupts disabled.
 */
4577
static int memcg_event_wake(wait_queue_entry_t *wait, unsigned mode,
4578
			    int sync, void *key)
4579
{
4580 4581
	struct mem_cgroup_event *event =
		container_of(wait, struct mem_cgroup_event, wait);
4582
	struct mem_cgroup *memcg = event->memcg;
A
Al Viro 已提交
4583
	__poll_t flags = key_to_poll(key);
4584

4585
	if (flags & EPOLLHUP) {
4586 4587 4588 4589 4590 4591 4592 4593 4594
		/*
		 * If the event has been detached at cgroup removal, we
		 * can simply return knowing the other side will cleanup
		 * for us.
		 *
		 * We can't race against event freeing since the other
		 * side will require wqh->lock via remove_wait_queue(),
		 * which we hold.
		 */
4595
		spin_lock(&memcg->event_list_lock);
4596 4597 4598 4599 4600 4601 4602 4603
		if (!list_empty(&event->list)) {
			list_del_init(&event->list);
			/*
			 * We are in atomic context, but cgroup_event_remove()
			 * may sleep, so we have to call it in workqueue.
			 */
			schedule_work(&event->remove);
		}
4604
		spin_unlock(&memcg->event_list_lock);
4605 4606 4607 4608 4609
	}

	return 0;
}

4610
static void memcg_event_ptable_queue_proc(struct file *file,
4611 4612
		wait_queue_head_t *wqh, poll_table *pt)
{
4613 4614
	struct mem_cgroup_event *event =
		container_of(pt, struct mem_cgroup_event, pt);
4615 4616 4617 4618 4619 4620

	event->wqh = wqh;
	add_wait_queue(wqh, &event->wait);
}

/*
4621 4622
 * DO NOT USE IN NEW FILES.
 *
4623 4624 4625 4626 4627
 * Parse input and register new cgroup event handler.
 *
 * Input must be in format '<event_fd> <control_fd> <args>'.
 * Interpretation of args is defined by control file implementation.
 */
4628 4629
static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
					 char *buf, size_t nbytes, loff_t off)
4630
{
4631
	struct cgroup_subsys_state *css = of_css(of);
4632
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4633
	struct mem_cgroup_event *event;
4634 4635 4636 4637
	struct cgroup_subsys_state *cfile_css;
	unsigned int efd, cfd;
	struct fd efile;
	struct fd cfile;
4638
	const char *name;
4639 4640 4641
	char *endp;
	int ret;

4642 4643 4644
	buf = strstrip(buf);

	efd = simple_strtoul(buf, &endp, 10);
4645 4646
	if (*endp != ' ')
		return -EINVAL;
4647
	buf = endp + 1;
4648

4649
	cfd = simple_strtoul(buf, &endp, 10);
4650 4651
	if ((*endp != ' ') && (*endp != '\0'))
		return -EINVAL;
4652
	buf = endp + 1;
4653 4654 4655 4656 4657

	event = kzalloc(sizeof(*event), GFP_KERNEL);
	if (!event)
		return -ENOMEM;

4658
	event->memcg = memcg;
4659
	INIT_LIST_HEAD(&event->list);
4660 4661 4662
	init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc);
	init_waitqueue_func_entry(&event->wait, memcg_event_wake);
	INIT_WORK(&event->remove, memcg_event_remove);
4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687

	efile = fdget(efd);
	if (!efile.file) {
		ret = -EBADF;
		goto out_kfree;
	}

	event->eventfd = eventfd_ctx_fileget(efile.file);
	if (IS_ERR(event->eventfd)) {
		ret = PTR_ERR(event->eventfd);
		goto out_put_efile;
	}

	cfile = fdget(cfd);
	if (!cfile.file) {
		ret = -EBADF;
		goto out_put_eventfd;
	}

	/* the process need read permission on control file */
	/* AV: shouldn't we check that it's been opened for read instead? */
	ret = inode_permission(file_inode(cfile.file), MAY_READ);
	if (ret < 0)
		goto out_put_cfile;

4688 4689 4690 4691 4692
	/*
	 * Determine the event callbacks and set them in @event.  This used
	 * to be done via struct cftype but cgroup core no longer knows
	 * about these events.  The following is crude but the whole thing
	 * is for compatibility anyway.
4693 4694
	 *
	 * DO NOT ADD NEW FILES.
4695
	 */
A
Al Viro 已提交
4696
	name = cfile.file->f_path.dentry->d_name.name;
4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707

	if (!strcmp(name, "memory.usage_in_bytes")) {
		event->register_event = mem_cgroup_usage_register_event;
		event->unregister_event = mem_cgroup_usage_unregister_event;
	} else if (!strcmp(name, "memory.oom_control")) {
		event->register_event = mem_cgroup_oom_register_event;
		event->unregister_event = mem_cgroup_oom_unregister_event;
	} else if (!strcmp(name, "memory.pressure_level")) {
		event->register_event = vmpressure_register_event;
		event->unregister_event = vmpressure_unregister_event;
	} else if (!strcmp(name, "memory.memsw.usage_in_bytes")) {
T
Tejun Heo 已提交
4708 4709
		event->register_event = memsw_cgroup_usage_register_event;
		event->unregister_event = memsw_cgroup_usage_unregister_event;
4710 4711 4712 4713 4714
	} else {
		ret = -EINVAL;
		goto out_put_cfile;
	}

4715
	/*
4716 4717 4718
	 * Verify @cfile should belong to @css.  Also, remaining events are
	 * automatically removed on cgroup destruction but the removal is
	 * asynchronous, so take an extra ref on @css.
4719
	 */
A
Al Viro 已提交
4720
	cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent,
4721
					       &memory_cgrp_subsys);
4722
	ret = -EINVAL;
4723
	if (IS_ERR(cfile_css))
4724
		goto out_put_cfile;
4725 4726
	if (cfile_css != css) {
		css_put(cfile_css);
4727
		goto out_put_cfile;
4728
	}
4729

4730
	ret = event->register_event(memcg, event->eventfd, buf);
4731 4732 4733
	if (ret)
		goto out_put_css;

4734
	vfs_poll(efile.file, &event->pt);
4735

4736 4737 4738
	spin_lock(&memcg->event_list_lock);
	list_add(&event->list, &memcg->event_list);
	spin_unlock(&memcg->event_list_lock);
4739 4740 4741 4742

	fdput(cfile);
	fdput(efile);

4743
	return nbytes;
4744 4745

out_put_css:
4746
	css_put(css);
4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758
out_put_cfile:
	fdput(cfile);
out_put_eventfd:
	eventfd_ctx_put(event->eventfd);
out_put_efile:
	fdput(efile);
out_kfree:
	kfree(event);

	return ret;
}

4759
static struct cftype mem_cgroup_legacy_files[] = {
B
Balbir Singh 已提交
4760
	{
4761
		.name = "usage_in_bytes",
4762
		.private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
4763
		.read_u64 = mem_cgroup_read_u64,
B
Balbir Singh 已提交
4764
	},
4765 4766
	{
		.name = "max_usage_in_bytes",
4767
		.private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
4768
		.write = mem_cgroup_reset,
4769
		.read_u64 = mem_cgroup_read_u64,
4770
	},
B
Balbir Singh 已提交
4771
	{
4772
		.name = "limit_in_bytes",
4773
		.private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
4774
		.write = mem_cgroup_write,
4775
		.read_u64 = mem_cgroup_read_u64,
B
Balbir Singh 已提交
4776
	},
4777 4778 4779
	{
		.name = "soft_limit_in_bytes",
		.private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
4780
		.write = mem_cgroup_write,
4781
		.read_u64 = mem_cgroup_read_u64,
4782
	},
B
Balbir Singh 已提交
4783 4784
	{
		.name = "failcnt",
4785
		.private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
4786
		.write = mem_cgroup_reset,
4787
		.read_u64 = mem_cgroup_read_u64,
B
Balbir Singh 已提交
4788
	},
4789 4790
	{
		.name = "stat",
4791
		.seq_show = memcg_stat_show,
4792
	},
4793 4794
	{
		.name = "force_empty",
4795
		.write = mem_cgroup_force_empty_write,
4796
	},
4797 4798 4799 4800 4801
	{
		.name = "use_hierarchy",
		.write_u64 = mem_cgroup_hierarchy_write,
		.read_u64 = mem_cgroup_hierarchy_read,
	},
4802
	{
4803
		.name = "cgroup.event_control",		/* XXX: for compat */
4804
		.write = memcg_write_event_control,
4805
		.flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE,
4806
	},
K
KOSAKI Motohiro 已提交
4807 4808 4809 4810 4811
	{
		.name = "swappiness",
		.read_u64 = mem_cgroup_swappiness_read,
		.write_u64 = mem_cgroup_swappiness_write,
	},
4812 4813 4814 4815 4816
	{
		.name = "move_charge_at_immigrate",
		.read_u64 = mem_cgroup_move_charge_read,
		.write_u64 = mem_cgroup_move_charge_write,
	},
K
KAMEZAWA Hiroyuki 已提交
4817 4818
	{
		.name = "oom_control",
4819
		.seq_show = mem_cgroup_oom_control_read,
4820
		.write_u64 = mem_cgroup_oom_control_write,
K
KAMEZAWA Hiroyuki 已提交
4821 4822
		.private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
	},
4823 4824 4825
	{
		.name = "pressure_level",
	},
4826 4827 4828
#ifdef CONFIG_NUMA
	{
		.name = "numa_stat",
4829
		.seq_show = memcg_numa_stat_show,
4830 4831
	},
#endif
4832 4833 4834
	{
		.name = "kmem.limit_in_bytes",
		.private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
4835
		.write = mem_cgroup_write,
4836
		.read_u64 = mem_cgroup_read_u64,
4837 4838 4839 4840
	},
	{
		.name = "kmem.usage_in_bytes",
		.private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
4841
		.read_u64 = mem_cgroup_read_u64,
4842 4843 4844 4845
	},
	{
		.name = "kmem.failcnt",
		.private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
4846
		.write = mem_cgroup_reset,
4847
		.read_u64 = mem_cgroup_read_u64,
4848 4849 4850 4851
	},
	{
		.name = "kmem.max_usage_in_bytes",
		.private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
4852
		.write = mem_cgroup_reset,
4853
		.read_u64 = mem_cgroup_read_u64,
4854
	},
4855 4856
#if defined(CONFIG_MEMCG_KMEM) && \
	(defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG))
4857 4858
	{
		.name = "kmem.slabinfo",
4859 4860 4861
		.seq_start = memcg_slab_start,
		.seq_next = memcg_slab_next,
		.seq_stop = memcg_slab_stop,
4862
		.seq_show = memcg_slab_show,
4863 4864
	},
#endif
V
Vladimir Davydov 已提交
4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887
	{
		.name = "kmem.tcp.limit_in_bytes",
		.private = MEMFILE_PRIVATE(_TCP, RES_LIMIT),
		.write = mem_cgroup_write,
		.read_u64 = mem_cgroup_read_u64,
	},
	{
		.name = "kmem.tcp.usage_in_bytes",
		.private = MEMFILE_PRIVATE(_TCP, RES_USAGE),
		.read_u64 = mem_cgroup_read_u64,
	},
	{
		.name = "kmem.tcp.failcnt",
		.private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT),
		.write = mem_cgroup_reset,
		.read_u64 = mem_cgroup_read_u64,
	},
	{
		.name = "kmem.tcp.max_usage_in_bytes",
		.private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE),
		.write = mem_cgroup_reset,
		.read_u64 = mem_cgroup_read_u64,
	},
4888
	{ },	/* terminate */
4889
};
4890

4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916
/*
 * Private memory cgroup IDR
 *
 * Swap-out records and page cache shadow entries need to store memcg
 * references in constrained space, so we maintain an ID space that is
 * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of
 * memory-controlled cgroups to 64k.
 *
 * However, there usually are many references to the oflline CSS after
 * the cgroup has been destroyed, such as page cache or reclaimable
 * slab objects, that don't need to hang on to the ID. We want to keep
 * those dead CSS from occupying IDs, or we might quickly exhaust the
 * relatively small ID space and prevent the creation of new cgroups
 * even when there are much fewer than 64k cgroups - possibly none.
 *
 * Maintain a private 16-bit ID space for memcg, and allow the ID to
 * be freed and recycled when it's no longer needed, which is usually
 * when the CSS is offlined.
 *
 * The only exception to that are records of swapped out tmpfs/shmem
 * pages that need to be attributed to live ancestors on swapin. But
 * those references are manageable from userspace.
 */

static DEFINE_IDR(mem_cgroup_idr);

4917 4918 4919 4920 4921 4922 4923 4924
static void mem_cgroup_id_remove(struct mem_cgroup *memcg)
{
	if (memcg->id.id > 0) {
		idr_remove(&mem_cgroup_idr, memcg->id.id);
		memcg->id.id = 0;
	}
}

4925 4926
static void __maybe_unused mem_cgroup_id_get_many(struct mem_cgroup *memcg,
						  unsigned int n)
4927
{
4928
	refcount_add(n, &memcg->id.ref);
4929 4930
}

4931
static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
4932
{
4933
	if (refcount_sub_and_test(n, &memcg->id.ref)) {
4934
		mem_cgroup_id_remove(memcg);
4935 4936 4937 4938 4939 4940

		/* Memcg ID pins CSS */
		css_put(&memcg->css);
	}
}

4941 4942 4943 4944 4945
static inline void mem_cgroup_id_put(struct mem_cgroup *memcg)
{
	mem_cgroup_id_put_many(memcg, 1);
}

4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957
/**
 * mem_cgroup_from_id - look up a memcg from a memcg id
 * @id: the memcg id to look up
 *
 * Caller must hold rcu_read_lock().
 */
struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
{
	WARN_ON_ONCE(!rcu_read_lock_held());
	return idr_find(&mem_cgroup_idr, id);
}

4958
static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
4959 4960
{
	struct mem_cgroup_per_node *pn;
4961
	int tmp = node;
4962 4963 4964 4965 4966 4967 4968 4969
	/*
	 * This routine is called against possible nodes.
	 * But it's BUG to call kmalloc() against offline node.
	 *
	 * TODO: this routine can waste much memory for nodes which will
	 *       never be onlined. It's better to use memory hotplug callback
	 *       function.
	 */
4970 4971
	if (!node_state(node, N_NORMAL_MEMORY))
		tmp = -1;
4972
	pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
4973 4974
	if (!pn)
		return 1;
4975

4976 4977 4978 4979 4980 4981
	pn->lruvec_stat_local = alloc_percpu(struct lruvec_stat);
	if (!pn->lruvec_stat_local) {
		kfree(pn);
		return 1;
	}

4982 4983
	pn->lruvec_stat_cpu = alloc_percpu(struct lruvec_stat);
	if (!pn->lruvec_stat_cpu) {
4984
		free_percpu(pn->lruvec_stat_local);
4985 4986 4987 4988
		kfree(pn);
		return 1;
	}

4989 4990 4991 4992 4993
	lruvec_init(&pn->lruvec);
	pn->usage_in_excess = 0;
	pn->on_tree = false;
	pn->memcg = memcg;

4994
	memcg->nodeinfo[node] = pn;
4995 4996 4997
	return 0;
}

4998
static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
4999
{
5000 5001
	struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];

M
Michal Hocko 已提交
5002 5003 5004
	if (!pn)
		return;

5005
	free_percpu(pn->lruvec_stat_cpu);
5006
	free_percpu(pn->lruvec_stat_local);
5007
	kfree(pn);
5008 5009
}

5010
static void __mem_cgroup_free(struct mem_cgroup *memcg)
5011
{
5012
	int node;
5013

5014
	for_each_node(node)
5015
		free_mem_cgroup_per_node_info(memcg, node);
5016
	free_percpu(memcg->vmstats_percpu);
5017
	free_percpu(memcg->vmstats_local);
5018
	kfree(memcg);
5019
}
5020

5021 5022 5023
static void mem_cgroup_free(struct mem_cgroup *memcg)
{
	memcg_wb_domain_exit(memcg);
5024 5025 5026 5027
	/*
	 * Flush percpu vmstats and vmevents to guarantee the value correctness
	 * on parent's and all ancestor levels.
	 */
5028
	memcg_flush_percpu_vmstats(memcg);
5029
	memcg_flush_percpu_vmevents(memcg);
5030 5031 5032
	__mem_cgroup_free(memcg);
}

5033
static struct mem_cgroup *mem_cgroup_alloc(void)
B
Balbir Singh 已提交
5034
{
5035
	struct mem_cgroup *memcg;
5036
	unsigned int size;
5037
	int node;
5038
	int __maybe_unused i;
5039
	long error = -ENOMEM;
B
Balbir Singh 已提交
5040

5041 5042 5043 5044
	size = sizeof(struct mem_cgroup);
	size += nr_node_ids * sizeof(struct mem_cgroup_per_node *);

	memcg = kzalloc(size, GFP_KERNEL);
5045
	if (!memcg)
5046
		return ERR_PTR(error);
5047

5048 5049 5050
	memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL,
				 1, MEM_CGROUP_ID_MAX,
				 GFP_KERNEL);
5051 5052
	if (memcg->id.id < 0) {
		error = memcg->id.id;
5053
		goto fail;
5054
	}
5055

5056 5057 5058 5059
	memcg->vmstats_local = alloc_percpu(struct memcg_vmstats_percpu);
	if (!memcg->vmstats_local)
		goto fail;

5060 5061
	memcg->vmstats_percpu = alloc_percpu(struct memcg_vmstats_percpu);
	if (!memcg->vmstats_percpu)
5062
		goto fail;
5063

B
Bob Liu 已提交
5064
	for_each_node(node)
5065
		if (alloc_mem_cgroup_per_node_info(memcg, node))
5066
			goto fail;
5067

5068 5069
	if (memcg_wb_domain_init(memcg, GFP_KERNEL))
		goto fail;
5070

5071
	INIT_WORK(&memcg->high_work, high_work_func);
5072 5073 5074
	INIT_LIST_HEAD(&memcg->oom_notify);
	mutex_init(&memcg->thresholds_lock);
	spin_lock_init(&memcg->move_lock);
5075
	vmpressure_init(&memcg->vmpressure);
5076 5077
	INIT_LIST_HEAD(&memcg->event_list);
	spin_lock_init(&memcg->event_list_lock);
5078
	memcg->socket_pressure = jiffies;
5079
#ifdef CONFIG_MEMCG_KMEM
V
Vladimir Davydov 已提交
5080 5081
	memcg->kmemcg_id = -1;
#endif
5082 5083
#ifdef CONFIG_CGROUP_WRITEBACK
	INIT_LIST_HEAD(&memcg->cgwb_list);
5084 5085 5086
	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
		memcg->cgwb_frn[i].done =
			__WB_COMPLETION_INIT(&memcg_cgwb_frn_waitq);
5087 5088 5089 5090 5091
#endif
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
	spin_lock_init(&memcg->deferred_split_queue.split_queue_lock);
	INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue);
	memcg->deferred_split_queue.split_queue_len = 0;
5092
#endif
5093
	idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
5094 5095
	return memcg;
fail:
5096
	mem_cgroup_id_remove(memcg);
5097
	__mem_cgroup_free(memcg);
5098
	return ERR_PTR(error);
5099 5100
}

5101 5102
static struct cgroup_subsys_state * __ref
mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
5103
{
5104 5105 5106
	struct mem_cgroup *parent = mem_cgroup_from_css(parent_css);
	struct mem_cgroup *memcg;
	long error = -ENOMEM;
5107

5108
	memcg = mem_cgroup_alloc();
5109 5110
	if (IS_ERR(memcg))
		return ERR_CAST(memcg);
5111

5112
	page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
5113
	memcg->soft_limit = PAGE_COUNTER_MAX;
5114
	page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
5115 5116 5117 5118 5119 5120
	if (parent) {
		memcg->swappiness = mem_cgroup_swappiness(parent);
		memcg->oom_kill_disable = parent->oom_kill_disable;
	}
	if (parent && parent->use_hierarchy) {
		memcg->use_hierarchy = true;
5121
		page_counter_init(&memcg->memory, &parent->memory);
5122
		page_counter_init(&memcg->swap, &parent->swap);
5123 5124
		page_counter_init(&memcg->memsw, &parent->memsw);
		page_counter_init(&memcg->kmem, &parent->kmem);
5125
		page_counter_init(&memcg->tcpmem, &parent->tcpmem);
5126
	} else {
5127
		page_counter_init(&memcg->memory, NULL);
5128
		page_counter_init(&memcg->swap, NULL);
5129 5130
		page_counter_init(&memcg->memsw, NULL);
		page_counter_init(&memcg->kmem, NULL);
5131
		page_counter_init(&memcg->tcpmem, NULL);
5132 5133 5134 5135 5136
		/*
		 * Deeper hierachy with use_hierarchy == false doesn't make
		 * much sense so let cgroup subsystem know about this
		 * unfortunate state in our controller.
		 */
5137
		if (parent != root_mem_cgroup)
5138
			memory_cgrp_subsys.broken_hierarchy = true;
5139
	}
5140

5141 5142
	/* The following stuff does not apply to the root */
	if (!parent) {
5143 5144 5145
#ifdef CONFIG_MEMCG_KMEM
		INIT_LIST_HEAD(&memcg->kmem_caches);
#endif
5146 5147 5148 5149
		root_mem_cgroup = memcg;
		return &memcg->css;
	}

5150
	error = memcg_online_kmem(memcg);
5151 5152
	if (error)
		goto fail;
5153

5154
	if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
5155
		static_branch_inc(&memcg_sockets_enabled_key);
5156

5157 5158
	return &memcg->css;
fail:
5159
	mem_cgroup_id_remove(memcg);
5160
	mem_cgroup_free(memcg);
5161
	return ERR_PTR(error);
5162 5163
}

5164
static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
5165
{
5166 5167
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);

5168 5169 5170 5171 5172 5173 5174 5175 5176 5177
	/*
	 * A memcg must be visible for memcg_expand_shrinker_maps()
	 * by the time the maps are allocated. So, we allocate maps
	 * here, when for_each_mem_cgroup() can't skip it.
	 */
	if (memcg_alloc_shrinker_maps(memcg)) {
		mem_cgroup_id_remove(memcg);
		return -ENOMEM;
	}

5178
	/* Online state pins memcg ID, memcg ID pins CSS */
5179
	refcount_set(&memcg->id.ref, 1);
5180
	css_get(css);
5181
	return 0;
B
Balbir Singh 已提交
5182 5183
}

5184
static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
5185
{
5186
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5187
	struct mem_cgroup_event *event, *tmp;
5188 5189 5190 5191 5192 5193

	/*
	 * Unregister events and notify userspace.
	 * Notify userspace about cgroup removing only after rmdir of cgroup
	 * directory to avoid race between userspace and kernelspace.
	 */
5194 5195
	spin_lock(&memcg->event_list_lock);
	list_for_each_entry_safe(event, tmp, &memcg->event_list, list) {
5196 5197 5198
		list_del_init(&event->list);
		schedule_work(&event->remove);
	}
5199
	spin_unlock(&memcg->event_list_lock);
5200

R
Roman Gushchin 已提交
5201
	page_counter_set_min(&memcg->memory, 0);
5202
	page_counter_set_low(&memcg->memory, 0);
5203

5204
	memcg_offline_kmem(memcg);
5205
	wb_memcg_offline(memcg);
5206

5207 5208
	drain_all_stock(memcg);

5209
	mem_cgroup_id_put(memcg);
5210 5211
}

5212 5213 5214 5215 5216 5217 5218
static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);

	invalidate_reclaim_iterators(memcg);
}

5219
static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
B
Balbir Singh 已提交
5220
{
5221
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5222
	int __maybe_unused i;
5223

5224 5225 5226 5227
#ifdef CONFIG_CGROUP_WRITEBACK
	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
		wb_wait_for_completion(&memcg->cgwb_frn[i].done);
#endif
5228
	if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
5229
		static_branch_dec(&memcg_sockets_enabled_key);
5230

5231
	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active)
V
Vladimir Davydov 已提交
5232
		static_branch_dec(&memcg_sockets_enabled_key);
5233

5234 5235 5236
	vmpressure_cleanup(&memcg->vmpressure);
	cancel_work_sync(&memcg->high_work);
	mem_cgroup_remove_from_trees(memcg);
5237
	memcg_free_shrinker_maps(memcg);
5238
	memcg_free_kmem(memcg);
5239
	mem_cgroup_free(memcg);
B
Balbir Singh 已提交
5240 5241
}

5242 5243 5244 5245 5246 5247 5248 5249 5250 5251 5252 5253 5254 5255 5256 5257 5258
/**
 * mem_cgroup_css_reset - reset the states of a mem_cgroup
 * @css: the target css
 *
 * Reset the states of the mem_cgroup associated with @css.  This is
 * invoked when the userland requests disabling on the default hierarchy
 * but the memcg is pinned through dependency.  The memcg should stop
 * applying policies and should revert to the vanilla state as it may be
 * made visible again.
 *
 * The current implementation only resets the essential configurations.
 * This needs to be expanded to cover all the visible parts.
 */
static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);

5259 5260 5261 5262 5263
	page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX);
	page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX);
	page_counter_set_max(&memcg->memsw, PAGE_COUNTER_MAX);
	page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX);
	page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX);
R
Roman Gushchin 已提交
5264
	page_counter_set_min(&memcg->memory, 0);
5265
	page_counter_set_low(&memcg->memory, 0);
5266
	page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
5267
	memcg->soft_limit = PAGE_COUNTER_MAX;
5268
	page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
5269
	memcg_wb_domain_size_changed(memcg);
5270 5271
}

5272
#ifdef CONFIG_MMU
5273
/* Handlers for move charge at task migration. */
5274
static int mem_cgroup_do_precharge(unsigned long count)
5275
{
5276
	int ret;
5277

5278 5279
	/* Try a single bulk charge without reclaim first, kswapd may wake */
	ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count);
5280
	if (!ret) {
5281 5282 5283
		mc.precharge += count;
		return ret;
	}
5284

5285
	/* Try charges one by one with reclaim, but do not retry */
5286
	while (count--) {
5287
		ret = try_charge(mc.to, GFP_KERNEL | __GFP_NORETRY, 1);
5288 5289
		if (ret)
			return ret;
5290
		mc.precharge++;
5291
		cond_resched();
5292
	}
5293
	return 0;
5294 5295 5296 5297
}

union mc_target {
	struct page	*page;
5298
	swp_entry_t	ent;
5299 5300 5301
};

enum mc_target_type {
5302
	MC_TARGET_NONE = 0,
5303
	MC_TARGET_PAGE,
5304
	MC_TARGET_SWAP,
5305
	MC_TARGET_DEVICE,
5306 5307
};

D
Daisuke Nishimura 已提交
5308 5309
static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
						unsigned long addr, pte_t ptent)
5310
{
5311
	struct page *page = vm_normal_page(vma, addr, ptent);
5312

D
Daisuke Nishimura 已提交
5313 5314 5315
	if (!page || !page_mapped(page))
		return NULL;
	if (PageAnon(page)) {
5316
		if (!(mc.flags & MOVE_ANON))
D
Daisuke Nishimura 已提交
5317
			return NULL;
5318 5319 5320 5321
	} else {
		if (!(mc.flags & MOVE_FILE))
			return NULL;
	}
D
Daisuke Nishimura 已提交
5322 5323 5324 5325 5326 5327
	if (!get_page_unless_zero(page))
		return NULL;

	return page;
}

5328
#if defined(CONFIG_SWAP) || defined(CONFIG_DEVICE_PRIVATE)
D
Daisuke Nishimura 已提交
5329
static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5330
			pte_t ptent, swp_entry_t *entry)
D
Daisuke Nishimura 已提交
5331 5332 5333 5334
{
	struct page *page = NULL;
	swp_entry_t ent = pte_to_swp_entry(ptent);

5335
	if (!(mc.flags & MOVE_ANON) || non_swap_entry(ent))
D
Daisuke Nishimura 已提交
5336
		return NULL;
5337 5338 5339 5340 5341 5342 5343 5344 5345 5346 5347 5348 5349 5350 5351 5352 5353

	/*
	 * Handle MEMORY_DEVICE_PRIVATE which are ZONE_DEVICE page belonging to
	 * a device and because they are not accessible by CPU they are store
	 * as special swap entry in the CPU page table.
	 */
	if (is_device_private_entry(ent)) {
		page = device_private_entry_to_page(ent);
		/*
		 * MEMORY_DEVICE_PRIVATE means ZONE_DEVICE page and which have
		 * a refcount of 1 when free (unlike normal page)
		 */
		if (!page_ref_add_unless(page, 1, 1))
			return NULL;
		return page;
	}

5354 5355 5356 5357
	/*
	 * Because lookup_swap_cache() updates some statistics counter,
	 * we call find_get_page() with swapper_space directly.
	 */
5358
	page = find_get_page(swap_address_space(ent), swp_offset(ent));
5359
	entry->val = ent.val;
D
Daisuke Nishimura 已提交
5360 5361 5362

	return page;
}
5363 5364
#else
static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5365
			pte_t ptent, swp_entry_t *entry)
5366 5367 5368 5369
{
	return NULL;
}
#endif
D
Daisuke Nishimura 已提交
5370

5371 5372 5373 5374 5375 5376 5377 5378 5379
static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
			unsigned long addr, pte_t ptent, swp_entry_t *entry)
{
	struct page *page = NULL;
	struct address_space *mapping;
	pgoff_t pgoff;

	if (!vma->vm_file) /* anonymous vma */
		return NULL;
5380
	if (!(mc.flags & MOVE_FILE))
5381 5382 5383
		return NULL;

	mapping = vma->vm_file->f_mapping;
5384
	pgoff = linear_page_index(vma, addr);
5385 5386

	/* page is moved even if it's not RSS of this task(page-faulted). */
5387 5388
#ifdef CONFIG_SWAP
	/* shmem/tmpfs may report page out on swap: account for that too. */
5389 5390
	if (shmem_mapping(mapping)) {
		page = find_get_entry(mapping, pgoff);
5391
		if (xa_is_value(page)) {
5392
			swp_entry_t swp = radix_to_swp_entry(page);
5393
			*entry = swp;
5394 5395
			page = find_get_page(swap_address_space(swp),
					     swp_offset(swp));
5396 5397 5398 5399 5400
		}
	} else
		page = find_get_page(mapping, pgoff);
#else
	page = find_get_page(mapping, pgoff);
5401
#endif
5402 5403 5404
	return page;
}

5405 5406 5407
/**
 * mem_cgroup_move_account - move account of the page
 * @page: the page
5408
 * @compound: charge the page as compound or small page
5409 5410 5411
 * @from: mem_cgroup which the page is moved from.
 * @to:	mem_cgroup which the page is moved to. @from != @to.
 *
5412
 * The caller must make sure the page is not on LRU (isolate_page() is useful.)
5413 5414 5415 5416 5417
 *
 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
 * from old cgroup.
 */
static int mem_cgroup_move_account(struct page *page,
5418
				   bool compound,
5419 5420 5421
				   struct mem_cgroup *from,
				   struct mem_cgroup *to)
{
5422 5423
	struct lruvec *from_vec, *to_vec;
	struct pglist_data *pgdat;
5424
	unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
5425 5426 5427 5428
	int ret;

	VM_BUG_ON(from == to);
	VM_BUG_ON_PAGE(PageLRU(page), page);
5429
	VM_BUG_ON(compound && !PageTransHuge(page));
5430 5431

	/*
5432
	 * Prevent mem_cgroup_migrate() from looking at
5433
	 * page->mem_cgroup of its source page while we change it.
5434
	 */
5435
	ret = -EBUSY;
5436 5437 5438 5439 5440 5441 5442
	if (!trylock_page(page))
		goto out;

	ret = -EINVAL;
	if (page->mem_cgroup != from)
		goto out_unlock;

5443
	pgdat = page_pgdat(page);
5444 5445
	from_vec = mem_cgroup_lruvec(from, pgdat);
	to_vec = mem_cgroup_lruvec(to, pgdat);
5446

5447
	lock_page_memcg(page);
5448

5449 5450 5451 5452
	if (PageAnon(page)) {
		if (page_mapped(page)) {
			__mod_lruvec_state(from_vec, NR_ANON_MAPPED, -nr_pages);
			__mod_lruvec_state(to_vec, NR_ANON_MAPPED, nr_pages);
5453 5454 5455 5456 5457 5458 5459
			if (PageTransHuge(page)) {
				__mod_lruvec_state(from_vec, NR_ANON_THPS,
						   -nr_pages);
				__mod_lruvec_state(to_vec, NR_ANON_THPS,
						   nr_pages);
			}

5460 5461
		}
	} else {
5462 5463 5464 5465 5466 5467 5468 5469
		__mod_lruvec_state(from_vec, NR_FILE_PAGES, -nr_pages);
		__mod_lruvec_state(to_vec, NR_FILE_PAGES, nr_pages);

		if (PageSwapBacked(page)) {
			__mod_lruvec_state(from_vec, NR_SHMEM, -nr_pages);
			__mod_lruvec_state(to_vec, NR_SHMEM, nr_pages);
		}

5470 5471 5472 5473
		if (page_mapped(page)) {
			__mod_lruvec_state(from_vec, NR_FILE_MAPPED, -nr_pages);
			__mod_lruvec_state(to_vec, NR_FILE_MAPPED, nr_pages);
		}
5474

5475 5476
		if (PageDirty(page)) {
			struct address_space *mapping = page_mapping(page);
5477

5478 5479 5480 5481 5482 5483
			if (mapping_cap_account_dirty(mapping)) {
				__mod_lruvec_state(from_vec, NR_FILE_DIRTY,
						   -nr_pages);
				__mod_lruvec_state(to_vec, NR_FILE_DIRTY,
						   nr_pages);
			}
5484 5485 5486
		}
	}

5487
	if (PageWriteback(page)) {
5488 5489
		__mod_lruvec_state(from_vec, NR_WRITEBACK, -nr_pages);
		__mod_lruvec_state(to_vec, NR_WRITEBACK, nr_pages);
5490 5491 5492
	}

	/*
5493 5494
	 * All state has been migrated, let's switch to the new memcg.
	 *
5495
	 * It is safe to change page->mem_cgroup here because the page
5496 5497 5498 5499 5500 5501 5502 5503
	 * is referenced, charged, isolated, and locked: we can't race
	 * with (un)charging, migration, LRU putback, or anything else
	 * that would rely on a stable page->mem_cgroup.
	 *
	 * Note that lock_page_memcg is a memcg lock, not a page lock,
	 * to save space. As soon as we switch page->mem_cgroup to a
	 * new memcg that isn't locked, the above state can change
	 * concurrently again. Make sure we're truly done with it.
5504
	 */
5505
	smp_mb();
5506

5507
	page->mem_cgroup = to; 	/* caller should have done css_get */
5508

5509
	__unlock_page_memcg(from);
5510 5511 5512 5513

	ret = 0;

	local_irq_disable();
5514
	mem_cgroup_charge_statistics(to, page, nr_pages);
5515
	memcg_check_events(to, page);
5516
	mem_cgroup_charge_statistics(from, page, -nr_pages);
5517 5518 5519 5520 5521 5522 5523 5524
	memcg_check_events(from, page);
	local_irq_enable();
out_unlock:
	unlock_page(page);
out:
	return ret;
}

5525 5526 5527 5528 5529 5530 5531 5532 5533 5534 5535 5536 5537 5538 5539
/**
 * get_mctgt_type - get target type of moving charge
 * @vma: the vma the pte to be checked belongs
 * @addr: the address corresponding to the pte to be checked
 * @ptent: the pte to be checked
 * @target: the pointer the target page or swap ent will be stored(can be NULL)
 *
 * Returns
 *   0(MC_TARGET_NONE): if the pte is not a target for move charge.
 *   1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
 *     move charge. if @target is not NULL, the page is stored in target->page
 *     with extra refcnt got(Callers should handle it).
 *   2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
 *     target for charge migration. if @target is not NULL, the entry is stored
 *     in target->ent.
5540 5541
 *   3(MC_TARGET_DEVICE): like MC_TARGET_PAGE  but page is MEMORY_DEVICE_PRIVATE
 *     (so ZONE_DEVICE page and thus not on the lru).
5542 5543 5544
 *     For now we such page is charge like a regular page would be as for all
 *     intent and purposes it is just special memory taking the place of a
 *     regular page.
5545 5546
 *
 *     See Documentations/vm/hmm.txt and include/linux/hmm.h
5547 5548 5549 5550
 *
 * Called with pte lock held.
 */

5551
static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
D
Daisuke Nishimura 已提交
5552 5553 5554
		unsigned long addr, pte_t ptent, union mc_target *target)
{
	struct page *page = NULL;
5555
	enum mc_target_type ret = MC_TARGET_NONE;
D
Daisuke Nishimura 已提交
5556 5557 5558 5559 5560
	swp_entry_t ent = { .val = 0 };

	if (pte_present(ptent))
		page = mc_handle_present_pte(vma, addr, ptent);
	else if (is_swap_pte(ptent))
5561
		page = mc_handle_swap_pte(vma, ptent, &ent);
5562
	else if (pte_none(ptent))
5563
		page = mc_handle_file_pte(vma, addr, ptent, &ent);
D
Daisuke Nishimura 已提交
5564 5565

	if (!page && !ent.val)
5566
		return ret;
5567 5568
	if (page) {
		/*
5569
		 * Do only loose check w/o serialization.
5570
		 * mem_cgroup_move_account() checks the page is valid or
5571
		 * not under LRU exclusion.
5572
		 */
5573
		if (page->mem_cgroup == mc.from) {
5574
			ret = MC_TARGET_PAGE;
5575
			if (is_device_private_page(page))
5576
				ret = MC_TARGET_DEVICE;
5577 5578 5579 5580 5581 5582
			if (target)
				target->page = page;
		}
		if (!ret || !target)
			put_page(page);
	}
5583 5584 5585 5586 5587
	/*
	 * There is a swap entry and a page doesn't exist or isn't charged.
	 * But we cannot move a tail-page in a THP.
	 */
	if (ent.val && !ret && (!page || !PageTransCompound(page)) &&
L
Li Zefan 已提交
5588
	    mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) {
5589 5590 5591
		ret = MC_TARGET_SWAP;
		if (target)
			target->ent = ent;
5592 5593 5594 5595
	}
	return ret;
}

5596 5597
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/*
5598 5599
 * We don't consider PMD mapped swapping or file mapped pages because THP does
 * not support them for now.
5600 5601 5602 5603 5604 5605 5606 5607
 * Caller should make sure that pmd_trans_huge(pmd) is true.
 */
static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
		unsigned long addr, pmd_t pmd, union mc_target *target)
{
	struct page *page = NULL;
	enum mc_target_type ret = MC_TARGET_NONE;

5608 5609 5610 5611 5612
	if (unlikely(is_swap_pmd(pmd))) {
		VM_BUG_ON(thp_migration_supported() &&
				  !is_pmd_migration_entry(pmd));
		return ret;
	}
5613
	page = pmd_page(pmd);
5614
	VM_BUG_ON_PAGE(!page || !PageHead(page), page);
5615
	if (!(mc.flags & MOVE_ANON))
5616
		return ret;
5617
	if (page->mem_cgroup == mc.from) {
5618 5619 5620 5621 5622 5623 5624 5625 5626 5627 5628 5629 5630 5631 5632 5633
		ret = MC_TARGET_PAGE;
		if (target) {
			get_page(page);
			target->page = page;
		}
	}
	return ret;
}
#else
static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
		unsigned long addr, pmd_t pmd, union mc_target *target)
{
	return MC_TARGET_NONE;
}
#endif

5634 5635 5636 5637
static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
					unsigned long addr, unsigned long end,
					struct mm_walk *walk)
{
5638
	struct vm_area_struct *vma = walk->vma;
5639 5640 5641
	pte_t *pte;
	spinlock_t *ptl;

5642 5643
	ptl = pmd_trans_huge_lock(pmd, vma);
	if (ptl) {
5644 5645
		/*
		 * Note their can not be MC_TARGET_DEVICE for now as we do not
5646 5647
		 * support transparent huge page with MEMORY_DEVICE_PRIVATE but
		 * this might change.
5648
		 */
5649 5650
		if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
			mc.precharge += HPAGE_PMD_NR;
5651
		spin_unlock(ptl);
5652
		return 0;
5653
	}
5654

5655 5656
	if (pmd_trans_unstable(pmd))
		return 0;
5657 5658
	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
	for (; addr != end; pte++, addr += PAGE_SIZE)
5659
		if (get_mctgt_type(vma, addr, *pte, NULL))
5660 5661 5662 5663
			mc.precharge++;	/* increment precharge temporarily */
	pte_unmap_unlock(pte - 1, ptl);
	cond_resched();

5664 5665 5666
	return 0;
}

5667 5668 5669 5670
static const struct mm_walk_ops precharge_walk_ops = {
	.pmd_entry	= mem_cgroup_count_precharge_pte_range,
};

5671 5672 5673 5674
static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
{
	unsigned long precharge;

5675
	down_read(&mm->mmap_sem);
5676
	walk_page_range(mm, 0, mm->highest_vm_end, &precharge_walk_ops, NULL);
5677
	up_read(&mm->mmap_sem);
5678 5679 5680 5681 5682 5683 5684 5685 5686

	precharge = mc.precharge;
	mc.precharge = 0;

	return precharge;
}

static int mem_cgroup_precharge_mc(struct mm_struct *mm)
{
5687 5688 5689 5690 5691
	unsigned long precharge = mem_cgroup_count_precharge(mm);

	VM_BUG_ON(mc.moving_task);
	mc.moving_task = current;
	return mem_cgroup_do_precharge(precharge);
5692 5693
}

5694 5695
/* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
static void __mem_cgroup_clear_mc(void)
5696
{
5697 5698 5699
	struct mem_cgroup *from = mc.from;
	struct mem_cgroup *to = mc.to;

5700
	/* we must uncharge all the leftover precharges from mc.to */
5701
	if (mc.precharge) {
5702
		cancel_charge(mc.to, mc.precharge);
5703 5704 5705 5706 5707 5708 5709
		mc.precharge = 0;
	}
	/*
	 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
	 * we must uncharge here.
	 */
	if (mc.moved_charge) {
5710
		cancel_charge(mc.from, mc.moved_charge);
5711
		mc.moved_charge = 0;
5712
	}
5713 5714 5715
	/* we must fixup refcnts and charges */
	if (mc.moved_swap) {
		/* uncharge swap account from the old cgroup */
5716
		if (!mem_cgroup_is_root(mc.from))
5717
			page_counter_uncharge(&mc.from->memsw, mc.moved_swap);
5718

5719 5720
		mem_cgroup_id_put_many(mc.from, mc.moved_swap);

5721
		/*
5722 5723
		 * we charged both to->memory and to->memsw, so we
		 * should uncharge to->memory.
5724
		 */
5725
		if (!mem_cgroup_is_root(mc.to))
5726 5727
			page_counter_uncharge(&mc.to->memory, mc.moved_swap);

5728 5729
		mem_cgroup_id_get_many(mc.to, mc.moved_swap);
		css_put_many(&mc.to->css, mc.moved_swap);
5730

5731 5732
		mc.moved_swap = 0;
	}
5733 5734 5735 5736 5737 5738 5739
	memcg_oom_recover(from);
	memcg_oom_recover(to);
	wake_up_all(&mc.waitq);
}

static void mem_cgroup_clear_mc(void)
{
5740 5741
	struct mm_struct *mm = mc.mm;

5742 5743 5744 5745 5746 5747
	/*
	 * we must clear moving_task before waking up waiters at the end of
	 * task migration.
	 */
	mc.moving_task = NULL;
	__mem_cgroup_clear_mc();
5748
	spin_lock(&mc.lock);
5749 5750
	mc.from = NULL;
	mc.to = NULL;
5751
	mc.mm = NULL;
5752
	spin_unlock(&mc.lock);
5753 5754

	mmput(mm);
5755 5756
}

5757
static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
5758
{
5759
	struct cgroup_subsys_state *css;
5760
	struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */
5761
	struct mem_cgroup *from;
5762
	struct task_struct *leader, *p;
5763
	struct mm_struct *mm;
5764
	unsigned long move_flags;
5765
	int ret = 0;
5766

5767 5768
	/* charge immigration isn't supported on the default hierarchy */
	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
5769 5770
		return 0;

5771 5772 5773 5774 5775 5776 5777
	/*
	 * Multi-process migrations only happen on the default hierarchy
	 * where charge immigration is not used.  Perform charge
	 * immigration if @tset contains a leader and whine if there are
	 * multiple.
	 */
	p = NULL;
5778
	cgroup_taskset_for_each_leader(leader, css, tset) {
5779 5780
		WARN_ON_ONCE(p);
		p = leader;
5781
		memcg = mem_cgroup_from_css(css);
5782 5783 5784 5785
	}
	if (!p)
		return 0;

5786 5787 5788 5789 5790 5791 5792 5793 5794
	/*
	 * We are now commited to this value whatever it is. Changes in this
	 * tunable will only affect upcoming migrations, not the current one.
	 * So we need to save it, and keep it going.
	 */
	move_flags = READ_ONCE(memcg->move_charge_at_immigrate);
	if (!move_flags)
		return 0;

5795 5796 5797 5798 5799 5800 5801 5802 5803 5804 5805 5806 5807 5808 5809 5810
	from = mem_cgroup_from_task(p);

	VM_BUG_ON(from == memcg);

	mm = get_task_mm(p);
	if (!mm)
		return 0;
	/* We move charges only when we move a owner of the mm */
	if (mm->owner == p) {
		VM_BUG_ON(mc.from);
		VM_BUG_ON(mc.to);
		VM_BUG_ON(mc.precharge);
		VM_BUG_ON(mc.moved_charge);
		VM_BUG_ON(mc.moved_swap);

		spin_lock(&mc.lock);
5811
		mc.mm = mm;
5812 5813 5814 5815 5816 5817 5818 5819 5820
		mc.from = from;
		mc.to = memcg;
		mc.flags = move_flags;
		spin_unlock(&mc.lock);
		/* We set mc.moving_task later */

		ret = mem_cgroup_precharge_mc(mm);
		if (ret)
			mem_cgroup_clear_mc();
5821 5822
	} else {
		mmput(mm);
5823 5824 5825 5826
	}
	return ret;
}

5827
static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
5828
{
5829 5830
	if (mc.to)
		mem_cgroup_clear_mc();
5831 5832
}

5833 5834 5835
static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
				unsigned long addr, unsigned long end,
				struct mm_walk *walk)
5836
{
5837
	int ret = 0;
5838
	struct vm_area_struct *vma = walk->vma;
5839 5840
	pte_t *pte;
	spinlock_t *ptl;
5841 5842 5843
	enum mc_target_type target_type;
	union mc_target target;
	struct page *page;
5844

5845 5846
	ptl = pmd_trans_huge_lock(pmd, vma);
	if (ptl) {
5847
		if (mc.precharge < HPAGE_PMD_NR) {
5848
			spin_unlock(ptl);
5849 5850 5851 5852 5853 5854
			return 0;
		}
		target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
		if (target_type == MC_TARGET_PAGE) {
			page = target.page;
			if (!isolate_lru_page(page)) {
5855
				if (!mem_cgroup_move_account(page, true,
5856
							     mc.from, mc.to)) {
5857 5858 5859 5860 5861 5862
					mc.precharge -= HPAGE_PMD_NR;
					mc.moved_charge += HPAGE_PMD_NR;
				}
				putback_lru_page(page);
			}
			put_page(page);
5863 5864 5865 5866 5867 5868 5869 5870
		} else if (target_type == MC_TARGET_DEVICE) {
			page = target.page;
			if (!mem_cgroup_move_account(page, true,
						     mc.from, mc.to)) {
				mc.precharge -= HPAGE_PMD_NR;
				mc.moved_charge += HPAGE_PMD_NR;
			}
			put_page(page);
5871
		}
5872
		spin_unlock(ptl);
5873
		return 0;
5874 5875
	}

5876 5877
	if (pmd_trans_unstable(pmd))
		return 0;
5878 5879 5880 5881
retry:
	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
	for (; addr != end; addr += PAGE_SIZE) {
		pte_t ptent = *(pte++);
5882
		bool device = false;
5883
		swp_entry_t ent;
5884 5885 5886 5887

		if (!mc.precharge)
			break;

5888
		switch (get_mctgt_type(vma, addr, ptent, &target)) {
5889 5890
		case MC_TARGET_DEVICE:
			device = true;
J
Joe Perches 已提交
5891
			fallthrough;
5892 5893
		case MC_TARGET_PAGE:
			page = target.page;
5894 5895 5896 5897 5898 5899 5900 5901
			/*
			 * We can have a part of the split pmd here. Moving it
			 * can be done but it would be too convoluted so simply
			 * ignore such a partial THP and keep it in original
			 * memcg. There should be somebody mapping the head.
			 */
			if (PageTransCompound(page))
				goto put;
5902
			if (!device && isolate_lru_page(page))
5903
				goto put;
5904 5905
			if (!mem_cgroup_move_account(page, false,
						mc.from, mc.to)) {
5906
				mc.precharge--;
5907 5908
				/* we uncharge from mc.from later. */
				mc.moved_charge++;
5909
			}
5910 5911
			if (!device)
				putback_lru_page(page);
5912
put:			/* get_mctgt_type() gets the page */
5913 5914
			put_page(page);
			break;
5915 5916
		case MC_TARGET_SWAP:
			ent = target.ent;
5917
			if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
5918
				mc.precharge--;
5919 5920 5921
				/* we fixup refcnts and charges later. */
				mc.moved_swap++;
			}
5922
			break;
5923 5924 5925 5926 5927 5928 5929 5930 5931 5932 5933 5934 5935 5936
		default:
			break;
		}
	}
	pte_unmap_unlock(pte - 1, ptl);
	cond_resched();

	if (addr != end) {
		/*
		 * We have consumed all precharges we got in can_attach().
		 * We try charge one by one, but don't do any additional
		 * charges to mc.to if we have failed in charge once in attach()
		 * phase.
		 */
5937
		ret = mem_cgroup_do_precharge(1);
5938 5939 5940 5941 5942 5943 5944
		if (!ret)
			goto retry;
	}

	return ret;
}

5945 5946 5947 5948
static const struct mm_walk_ops charge_walk_ops = {
	.pmd_entry	= mem_cgroup_move_charge_pte_range,
};

5949
static void mem_cgroup_move_charge(void)
5950 5951
{
	lru_add_drain_all();
5952
	/*
5953 5954 5955
	 * Signal lock_page_memcg() to take the memcg's move_lock
	 * while we're moving its pages to another memcg. Then wait
	 * for already started RCU-only updates to finish.
5956 5957 5958
	 */
	atomic_inc(&mc.from->moving_account);
	synchronize_rcu();
5959
retry:
5960
	if (unlikely(!down_read_trylock(&mc.mm->mmap_sem))) {
5961 5962 5963 5964 5965 5966 5967 5968 5969 5970 5971
		/*
		 * Someone who are holding the mmap_sem might be waiting in
		 * waitq. So we cancel all extra charges, wake up all waiters,
		 * and retry. Because we cancel precharges, we might not be able
		 * to move enough charges, but moving charge is a best-effort
		 * feature anyway, so it wouldn't be a big problem.
		 */
		__mem_cgroup_clear_mc();
		cond_resched();
		goto retry;
	}
5972 5973 5974 5975
	/*
	 * When we have consumed all precharges and failed in doing
	 * additional charge, the page walk just aborts.
	 */
5976 5977
	walk_page_range(mc.mm, 0, mc.mm->highest_vm_end, &charge_walk_ops,
			NULL);
5978

5979
	up_read(&mc.mm->mmap_sem);
5980
	atomic_dec(&mc.from->moving_account);
5981 5982
}

5983
static void mem_cgroup_move_task(void)
B
Balbir Singh 已提交
5984
{
5985 5986
	if (mc.to) {
		mem_cgroup_move_charge();
5987
		mem_cgroup_clear_mc();
5988
	}
B
Balbir Singh 已提交
5989
}
5990
#else	/* !CONFIG_MMU */
5991
static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
5992 5993 5994
{
	return 0;
}
5995
static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
5996 5997
{
}
5998
static void mem_cgroup_move_task(void)
5999 6000 6001
{
}
#endif
B
Balbir Singh 已提交
6002

6003 6004
/*
 * Cgroup retains root cgroups across [un]mount cycles making it necessary
6005 6006
 * to verify whether we're attached to the default hierarchy on each mount
 * attempt.
6007
 */
6008
static void mem_cgroup_bind(struct cgroup_subsys_state *root_css)
6009 6010
{
	/*
6011
	 * use_hierarchy is forced on the default hierarchy.  cgroup core
6012 6013 6014
	 * guarantees that @root doesn't have any children, so turning it
	 * on for the root memcg is enough.
	 */
6015
	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
6016 6017 6018
		root_mem_cgroup->use_hierarchy = true;
	else
		root_mem_cgroup->use_hierarchy = false;
6019 6020
}

6021 6022 6023 6024 6025 6026 6027 6028 6029 6030
static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value)
{
	if (value == PAGE_COUNTER_MAX)
		seq_puts(m, "max\n");
	else
		seq_printf(m, "%llu\n", (u64)value * PAGE_SIZE);

	return 0;
}

6031 6032 6033
static u64 memory_current_read(struct cgroup_subsys_state *css,
			       struct cftype *cft)
{
6034 6035 6036
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);

	return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE;
6037 6038
}

R
Roman Gushchin 已提交
6039 6040
static int memory_min_show(struct seq_file *m, void *v)
{
6041 6042
	return seq_puts_memcg_tunable(m,
		READ_ONCE(mem_cgroup_from_seq(m)->memory.min));
R
Roman Gushchin 已提交
6043 6044 6045 6046 6047 6048 6049 6050 6051 6052 6053 6054 6055 6056 6057 6058 6059 6060 6061
}

static ssize_t memory_min_write(struct kernfs_open_file *of,
				char *buf, size_t nbytes, loff_t off)
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
	unsigned long min;
	int err;

	buf = strstrip(buf);
	err = page_counter_memparse(buf, "max", &min);
	if (err)
		return err;

	page_counter_set_min(&memcg->memory, min);

	return nbytes;
}

6062 6063
static int memory_low_show(struct seq_file *m, void *v)
{
6064 6065
	return seq_puts_memcg_tunable(m,
		READ_ONCE(mem_cgroup_from_seq(m)->memory.low));
6066 6067 6068 6069 6070 6071 6072 6073 6074 6075
}

static ssize_t memory_low_write(struct kernfs_open_file *of,
				char *buf, size_t nbytes, loff_t off)
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
	unsigned long low;
	int err;

	buf = strstrip(buf);
6076
	err = page_counter_memparse(buf, "max", &low);
6077 6078 6079
	if (err)
		return err;

6080
	page_counter_set_low(&memcg->memory, low);
6081 6082 6083 6084 6085 6086

	return nbytes;
}

static int memory_high_show(struct seq_file *m, void *v)
{
6087 6088
	return seq_puts_memcg_tunable(m,
		READ_ONCE(mem_cgroup_from_seq(m)->memory.high));
6089 6090 6091 6092 6093 6094
}

static ssize_t memory_high_write(struct kernfs_open_file *of,
				 char *buf, size_t nbytes, loff_t off)
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6095 6096
	unsigned int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
	bool drained = false;
6097 6098 6099 6100
	unsigned long high;
	int err;

	buf = strstrip(buf);
6101
	err = page_counter_memparse(buf, "max", &high);
6102 6103 6104
	if (err)
		return err;

6105
	page_counter_set_high(&memcg->memory, high);
6106

6107 6108 6109 6110 6111 6112 6113 6114 6115 6116 6117 6118 6119 6120 6121 6122 6123 6124 6125 6126 6127 6128
	for (;;) {
		unsigned long nr_pages = page_counter_read(&memcg->memory);
		unsigned long reclaimed;

		if (nr_pages <= high)
			break;

		if (signal_pending(current))
			break;

		if (!drained) {
			drain_all_stock(memcg);
			drained = true;
			continue;
		}

		reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
							 GFP_KERNEL, true);

		if (!reclaimed && !nr_retries--)
			break;
	}
6129

6130 6131 6132 6133 6134
	return nbytes;
}

static int memory_max_show(struct seq_file *m, void *v)
{
6135 6136
	return seq_puts_memcg_tunable(m,
		READ_ONCE(mem_cgroup_from_seq(m)->memory.max));
6137 6138 6139 6140 6141 6142
}

static ssize_t memory_max_write(struct kernfs_open_file *of,
				char *buf, size_t nbytes, loff_t off)
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6143 6144
	unsigned int nr_reclaims = MEM_CGROUP_RECLAIM_RETRIES;
	bool drained = false;
6145 6146 6147 6148
	unsigned long max;
	int err;

	buf = strstrip(buf);
6149
	err = page_counter_memparse(buf, "max", &max);
6150 6151 6152
	if (err)
		return err;

6153
	xchg(&memcg->memory.max, max);
6154 6155 6156 6157 6158 6159 6160

	for (;;) {
		unsigned long nr_pages = page_counter_read(&memcg->memory);

		if (nr_pages <= max)
			break;

6161
		if (signal_pending(current))
6162 6163 6164 6165 6166 6167 6168 6169 6170 6171 6172 6173 6174 6175 6176
			break;

		if (!drained) {
			drain_all_stock(memcg);
			drained = true;
			continue;
		}

		if (nr_reclaims) {
			if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
							  GFP_KERNEL, true))
				nr_reclaims--;
			continue;
		}

6177
		memcg_memory_event(memcg, MEMCG_OOM);
6178 6179 6180
		if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
			break;
	}
6181

6182
	memcg_wb_domain_size_changed(memcg);
6183 6184 6185
	return nbytes;
}

6186 6187 6188 6189 6190 6191 6192 6193 6194 6195
static void __memory_events_show(struct seq_file *m, atomic_long_t *events)
{
	seq_printf(m, "low %lu\n", atomic_long_read(&events[MEMCG_LOW]));
	seq_printf(m, "high %lu\n", atomic_long_read(&events[MEMCG_HIGH]));
	seq_printf(m, "max %lu\n", atomic_long_read(&events[MEMCG_MAX]));
	seq_printf(m, "oom %lu\n", atomic_long_read(&events[MEMCG_OOM]));
	seq_printf(m, "oom_kill %lu\n",
		   atomic_long_read(&events[MEMCG_OOM_KILL]));
}

6196 6197
static int memory_events_show(struct seq_file *m, void *v)
{
6198
	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6199

6200 6201 6202 6203 6204 6205 6206
	__memory_events_show(m, memcg->memory_events);
	return 0;
}

static int memory_events_local_show(struct seq_file *m, void *v)
{
	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6207

6208
	__memory_events_show(m, memcg->memory_events_local);
6209 6210 6211
	return 0;
}

6212 6213
static int memory_stat_show(struct seq_file *m, void *v)
{
6214
	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6215
	char *buf;
6216

6217 6218 6219 6220 6221
	buf = memory_stat_format(memcg);
	if (!buf)
		return -ENOMEM;
	seq_puts(m, buf);
	kfree(buf);
6222 6223 6224
	return 0;
}

6225 6226
static int memory_oom_group_show(struct seq_file *m, void *v)
{
6227
	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6228 6229 6230 6231 6232 6233 6234 6235 6236 6237 6238 6239 6240 6241 6242 6243 6244 6245 6246 6247 6248 6249 6250 6251 6252 6253 6254 6255

	seq_printf(m, "%d\n", memcg->oom_group);

	return 0;
}

static ssize_t memory_oom_group_write(struct kernfs_open_file *of,
				      char *buf, size_t nbytes, loff_t off)
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
	int ret, oom_group;

	buf = strstrip(buf);
	if (!buf)
		return -EINVAL;

	ret = kstrtoint(buf, 0, &oom_group);
	if (ret)
		return ret;

	if (oom_group != 0 && oom_group != 1)
		return -EINVAL;

	memcg->oom_group = oom_group;

	return nbytes;
}

6256 6257 6258
static struct cftype memory_files[] = {
	{
		.name = "current",
6259
		.flags = CFTYPE_NOT_ON_ROOT,
6260 6261
		.read_u64 = memory_current_read,
	},
R
Roman Gushchin 已提交
6262 6263 6264 6265 6266 6267
	{
		.name = "min",
		.flags = CFTYPE_NOT_ON_ROOT,
		.seq_show = memory_min_show,
		.write = memory_min_write,
	},
6268 6269 6270 6271 6272 6273 6274 6275 6276 6277 6278 6279 6280 6281 6282 6283 6284 6285 6286 6287 6288
	{
		.name = "low",
		.flags = CFTYPE_NOT_ON_ROOT,
		.seq_show = memory_low_show,
		.write = memory_low_write,
	},
	{
		.name = "high",
		.flags = CFTYPE_NOT_ON_ROOT,
		.seq_show = memory_high_show,
		.write = memory_high_write,
	},
	{
		.name = "max",
		.flags = CFTYPE_NOT_ON_ROOT,
		.seq_show = memory_max_show,
		.write = memory_max_write,
	},
	{
		.name = "events",
		.flags = CFTYPE_NOT_ON_ROOT,
6289
		.file_offset = offsetof(struct mem_cgroup, events_file),
6290 6291
		.seq_show = memory_events_show,
	},
6292 6293 6294 6295 6296 6297
	{
		.name = "events.local",
		.flags = CFTYPE_NOT_ON_ROOT,
		.file_offset = offsetof(struct mem_cgroup, events_local_file),
		.seq_show = memory_events_local_show,
	},
6298 6299 6300 6301
	{
		.name = "stat",
		.seq_show = memory_stat_show,
	},
6302 6303 6304 6305 6306 6307
	{
		.name = "oom.group",
		.flags = CFTYPE_NOT_ON_ROOT | CFTYPE_NS_DELEGATABLE,
		.seq_show = memory_oom_group_show,
		.write = memory_oom_group_write,
	},
6308 6309 6310
	{ }	/* terminate */
};

6311
struct cgroup_subsys memory_cgrp_subsys = {
6312
	.css_alloc = mem_cgroup_css_alloc,
6313
	.css_online = mem_cgroup_css_online,
6314
	.css_offline = mem_cgroup_css_offline,
6315
	.css_released = mem_cgroup_css_released,
6316
	.css_free = mem_cgroup_css_free,
6317
	.css_reset = mem_cgroup_css_reset,
6318 6319
	.can_attach = mem_cgroup_can_attach,
	.cancel_attach = mem_cgroup_cancel_attach,
6320
	.post_attach = mem_cgroup_move_task,
6321
	.bind = mem_cgroup_bind,
6322 6323
	.dfl_cftypes = memory_files,
	.legacy_cftypes = mem_cgroup_legacy_files,
6324
	.early_init = 0,
B
Balbir Singh 已提交
6325
};
6326

6327 6328 6329 6330 6331 6332 6333 6334 6335 6336 6337 6338 6339 6340 6341 6342 6343 6344 6345 6346 6347 6348 6349 6350 6351 6352 6353 6354 6355 6356
/*
 * This function calculates an individual cgroup's effective
 * protection which is derived from its own memory.min/low, its
 * parent's and siblings' settings, as well as the actual memory
 * distribution in the tree.
 *
 * The following rules apply to the effective protection values:
 *
 * 1. At the first level of reclaim, effective protection is equal to
 *    the declared protection in memory.min and memory.low.
 *
 * 2. To enable safe delegation of the protection configuration, at
 *    subsequent levels the effective protection is capped to the
 *    parent's effective protection.
 *
 * 3. To make complex and dynamic subtrees easier to configure, the
 *    user is allowed to overcommit the declared protection at a given
 *    level. If that is the case, the parent's effective protection is
 *    distributed to the children in proportion to how much protection
 *    they have declared and how much of it they are utilizing.
 *
 *    This makes distribution proportional, but also work-conserving:
 *    if one cgroup claims much more protection than it uses memory,
 *    the unused remainder is available to its siblings.
 *
 * 4. Conversely, when the declared protection is undercommitted at a
 *    given level, the distribution of the larger parental protection
 *    budget is NOT proportional. A cgroup's protection from a sibling
 *    is capped to its own memory.min/low setting.
 *
6357 6358 6359 6360 6361 6362 6363 6364 6365 6366 6367 6368
 * 5. However, to allow protecting recursive subtrees from each other
 *    without having to declare each individual cgroup's fixed share
 *    of the ancestor's claim to protection, any unutilized -
 *    "floating" - protection from up the tree is distributed in
 *    proportion to each cgroup's *usage*. This makes the protection
 *    neutral wrt sibling cgroups and lets them compete freely over
 *    the shared parental protection budget, but it protects the
 *    subtree as a whole from neighboring subtrees.
 *
 * Note that 4. and 5. are not in conflict: 4. is about protecting
 * against immediate siblings whereas 5. is about protecting against
 * neighboring subtrees.
6369 6370
 */
static unsigned long effective_protection(unsigned long usage,
6371
					  unsigned long parent_usage,
6372 6373 6374 6375 6376
					  unsigned long setting,
					  unsigned long parent_effective,
					  unsigned long siblings_protected)
{
	unsigned long protected;
6377
	unsigned long ep;
6378 6379 6380 6381 6382 6383 6384 6385 6386 6387 6388 6389 6390 6391 6392 6393 6394 6395 6396 6397 6398 6399 6400 6401 6402 6403 6404 6405 6406 6407

	protected = min(usage, setting);
	/*
	 * If all cgroups at this level combined claim and use more
	 * protection then what the parent affords them, distribute
	 * shares in proportion to utilization.
	 *
	 * We are using actual utilization rather than the statically
	 * claimed protection in order to be work-conserving: claimed
	 * but unused protection is available to siblings that would
	 * otherwise get a smaller chunk than what they claimed.
	 */
	if (siblings_protected > parent_effective)
		return protected * parent_effective / siblings_protected;

	/*
	 * Ok, utilized protection of all children is within what the
	 * parent affords them, so we know whatever this child claims
	 * and utilizes is effectively protected.
	 *
	 * If there is unprotected usage beyond this value, reclaim
	 * will apply pressure in proportion to that amount.
	 *
	 * If there is unutilized protection, the cgroup will be fully
	 * shielded from reclaim, but we do return a smaller value for
	 * protection than what the group could enjoy in theory. This
	 * is okay. With the overcommit distribution above, effective
	 * protection is always dependent on how memory is actually
	 * consumed among the siblings anyway.
	 */
6408 6409 6410 6411 6412 6413 6414 6415 6416 6417 6418 6419 6420 6421 6422 6423 6424 6425 6426 6427 6428 6429 6430 6431 6432 6433 6434 6435
	ep = protected;

	/*
	 * If the children aren't claiming (all of) the protection
	 * afforded to them by the parent, distribute the remainder in
	 * proportion to the (unprotected) memory of each cgroup. That
	 * way, cgroups that aren't explicitly prioritized wrt each
	 * other compete freely over the allowance, but they are
	 * collectively protected from neighboring trees.
	 *
	 * We're using unprotected memory for the weight so that if
	 * some cgroups DO claim explicit protection, we don't protect
	 * the same bytes twice.
	 */
	if (!(cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT))
		return ep;

	if (parent_effective > siblings_protected && usage > protected) {
		unsigned long unclaimed;

		unclaimed = parent_effective - siblings_protected;
		unclaimed *= usage - protected;
		unclaimed /= parent_usage - siblings_protected;

		ep += unclaimed;
	}

	return ep;
6436 6437
}

6438
/**
R
Roman Gushchin 已提交
6439
 * mem_cgroup_protected - check if memory consumption is in the normal range
6440
 * @root: the top ancestor of the sub-tree being checked
6441 6442
 * @memcg: the memory cgroup to check
 *
6443 6444
 * WARNING: This function is not stateless! It can only be used as part
 *          of a top-down tree iteration, not for isolated queries.
6445
 *
R
Roman Gushchin 已提交
6446 6447 6448 6449 6450
 * Returns one of the following:
 *   MEMCG_PROT_NONE: cgroup memory is not protected
 *   MEMCG_PROT_LOW: cgroup memory is protected as long there is
 *     an unprotected supply of reclaimable memory from other cgroups.
 *   MEMCG_PROT_MIN: cgroup memory is protected
6451
 */
R
Roman Gushchin 已提交
6452 6453
enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
						struct mem_cgroup *memcg)
6454
{
6455
	unsigned long usage, parent_usage;
6456 6457
	struct mem_cgroup *parent;

6458
	if (mem_cgroup_disabled())
R
Roman Gushchin 已提交
6459
		return MEMCG_PROT_NONE;
6460

6461 6462 6463
	if (!root)
		root = root_mem_cgroup;
	if (memcg == root)
R
Roman Gushchin 已提交
6464
		return MEMCG_PROT_NONE;
6465

6466
	usage = page_counter_read(&memcg->memory);
R
Roman Gushchin 已提交
6467 6468 6469 6470
	if (!usage)
		return MEMCG_PROT_NONE;

	parent = parent_mem_cgroup(memcg);
6471 6472 6473 6474
	/* No parent means a non-hierarchical mode on v1 memcg */
	if (!parent)
		return MEMCG_PROT_NONE;

6475
	if (parent == root) {
6476
		memcg->memory.emin = READ_ONCE(memcg->memory.min);
6477 6478
		memcg->memory.elow = memcg->memory.low;
		goto out;
R
Roman Gushchin 已提交
6479 6480
	}

6481 6482
	parent_usage = page_counter_read(&parent->memory);

6483
	WRITE_ONCE(memcg->memory.emin, effective_protection(usage, parent_usage,
6484 6485
			READ_ONCE(memcg->memory.min),
			READ_ONCE(parent->memory.emin),
6486
			atomic_long_read(&parent->memory.children_min_usage)));
6487

6488
	WRITE_ONCE(memcg->memory.elow, effective_protection(usage, parent_usage,
6489
			memcg->memory.low, READ_ONCE(parent->memory.elow),
6490
			atomic_long_read(&parent->memory.children_low_usage)));
6491

6492 6493
out:
	if (usage <= memcg->memory.emin)
R
Roman Gushchin 已提交
6494
		return MEMCG_PROT_MIN;
6495
	else if (usage <= memcg->memory.elow)
R
Roman Gushchin 已提交
6496 6497 6498
		return MEMCG_PROT_LOW;
	else
		return MEMCG_PROT_NONE;
6499 6500
}

6501
/**
6502
 * mem_cgroup_charge - charge a newly allocated page to a cgroup
6503 6504 6505
 * @page: page to charge
 * @mm: mm context of the victim
 * @gfp_mask: reclaim mode
6506
 * @lrucare: page might be on the LRU already
6507 6508 6509 6510
 *
 * Try to charge @page to the memcg that @mm belongs to, reclaiming
 * pages according to @gfp_mask if necessary.
 *
6511
 * Returns 0 on success. Otherwise, an error code is returned.
6512
 */
6513 6514
int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask,
		      bool lrucare)
6515
{
6516
	unsigned int nr_pages = hpage_nr_pages(page);
6517 6518 6519
	struct mem_cgroup *memcg = NULL;
	int ret = 0;

6520 6521
	VM_BUG_ON_PAGE(PageLRU(page) && !lrucare, page);

6522 6523 6524 6525
	if (mem_cgroup_disabled())
		goto out;

	if (PageSwapCache(page)) {
6526 6527 6528
		swp_entry_t ent = { .val = page_private(page), };
		unsigned short id;

6529 6530 6531
		/*
		 * Every swap fault against a single page tries to charge the
		 * page, bail as early as possible.  shmem_unuse() encounters
6532 6533
		 * already charged pages, too.  page->mem_cgroup is protected
		 * by the page lock, which serializes swap cache removal, which
6534 6535
		 * in turn serializes uncharging.
		 */
6536
		VM_BUG_ON_PAGE(!PageLocked(page), page);
6537
		if (compound_head(page)->mem_cgroup)
6538
			goto out;
6539

6540 6541 6542 6543 6544 6545
		id = lookup_swap_cgroup_id(ent);
		rcu_read_lock();
		memcg = mem_cgroup_from_id(id);
		if (memcg && !css_tryget_online(&memcg->css))
			memcg = NULL;
		rcu_read_unlock();
6546 6547 6548 6549 6550 6551
	}

	if (!memcg)
		memcg = get_mem_cgroup_from_mm(mm);

	ret = try_charge(memcg, gfp_mask, nr_pages);
6552 6553
	if (ret)
		goto out_put;
6554

6555 6556 6557
	commit_charge(page, memcg, lrucare);

	local_irq_disable();
6558
	mem_cgroup_charge_statistics(memcg, page, nr_pages);
6559 6560
	memcg_check_events(memcg, page);
	local_irq_enable();
6561

6562
	if (PageSwapCache(page)) {
6563 6564 6565 6566 6567 6568
		swp_entry_t entry = { .val = page_private(page) };
		/*
		 * The swap entry might not get freed for a long time,
		 * let's not wait for it.  The page already received a
		 * memory+swap charge, drop the swap entry duplicate.
		 */
6569
		mem_cgroup_uncharge_swap(entry, nr_pages);
6570 6571
	}

6572 6573 6574 6575
out_put:
	css_put(&memcg->css);
out:
	return ret;
6576 6577
}

6578 6579
struct uncharge_gather {
	struct mem_cgroup *memcg;
6580
	unsigned long nr_pages;
6581 6582 6583 6584 6585 6586
	unsigned long pgpgout;
	unsigned long nr_kmem;
	struct page *dummy_page;
};

static inline void uncharge_gather_clear(struct uncharge_gather *ug)
6587
{
6588 6589 6590 6591 6592
	memset(ug, 0, sizeof(*ug));
}

static void uncharge_batch(const struct uncharge_gather *ug)
{
6593 6594
	unsigned long flags;

6595
	if (!mem_cgroup_is_root(ug->memcg)) {
6596
		page_counter_uncharge(&ug->memcg->memory, ug->nr_pages);
6597
		if (do_memsw_account())
6598
			page_counter_uncharge(&ug->memcg->memsw, ug->nr_pages);
6599 6600 6601
		if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && ug->nr_kmem)
			page_counter_uncharge(&ug->memcg->kmem, ug->nr_kmem);
		memcg_oom_recover(ug->memcg);
6602
	}
6603 6604

	local_irq_save(flags);
6605
	__count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout);
6606
	__this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_pages);
6607
	memcg_check_events(ug->memcg, ug->dummy_page);
6608
	local_irq_restore(flags);
6609

6610
	if (!mem_cgroup_is_root(ug->memcg))
6611
		css_put_many(&ug->memcg->css, ug->nr_pages);
6612 6613 6614 6615
}

static void uncharge_page(struct page *page, struct uncharge_gather *ug)
{
6616 6617
	unsigned long nr_pages;

6618 6619 6620 6621 6622 6623 6624 6625 6626 6627 6628 6629 6630 6631 6632 6633 6634 6635 6636
	VM_BUG_ON_PAGE(PageLRU(page), page);

	if (!page->mem_cgroup)
		return;

	/*
	 * Nobody should be changing or seriously looking at
	 * page->mem_cgroup at this point, we have fully
	 * exclusive access to the page.
	 */

	if (ug->memcg != page->mem_cgroup) {
		if (ug->memcg) {
			uncharge_batch(ug);
			uncharge_gather_clear(ug);
		}
		ug->memcg = page->mem_cgroup;
	}

6637 6638
	nr_pages = compound_nr(page);
	ug->nr_pages += nr_pages;
6639

6640
	if (!PageKmemcg(page)) {
6641 6642
		ug->pgpgout++;
	} else {
6643
		ug->nr_kmem += nr_pages;
6644 6645 6646 6647 6648
		__ClearPageKmemcg(page);
	}

	ug->dummy_page = page;
	page->mem_cgroup = NULL;
6649 6650 6651 6652
}

static void uncharge_list(struct list_head *page_list)
{
6653
	struct uncharge_gather ug;
6654
	struct list_head *next;
6655 6656

	uncharge_gather_clear(&ug);
6657

6658 6659 6660 6661
	/*
	 * Note that the list can be a single page->lru; hence the
	 * do-while loop instead of a simple list_for_each_entry().
	 */
6662 6663
	next = page_list->next;
	do {
6664 6665
		struct page *page;

6666 6667 6668
		page = list_entry(next, struct page, lru);
		next = page->lru.next;

6669
		uncharge_page(page, &ug);
6670 6671
	} while (next != page_list);

6672 6673
	if (ug.memcg)
		uncharge_batch(&ug);
6674 6675
}

6676 6677 6678 6679
/**
 * mem_cgroup_uncharge - uncharge a page
 * @page: page to uncharge
 *
6680
 * Uncharge a page previously charged with mem_cgroup_charge().
6681 6682 6683
 */
void mem_cgroup_uncharge(struct page *page)
{
6684 6685
	struct uncharge_gather ug;

6686 6687 6688
	if (mem_cgroup_disabled())
		return;

6689
	/* Don't touch page->lru of any random page, pre-check: */
6690
	if (!page->mem_cgroup)
6691 6692
		return;

6693 6694 6695
	uncharge_gather_clear(&ug);
	uncharge_page(page, &ug);
	uncharge_batch(&ug);
6696
}
6697

6698 6699 6700 6701 6702
/**
 * mem_cgroup_uncharge_list - uncharge a list of page
 * @page_list: list of pages to uncharge
 *
 * Uncharge a list of pages previously charged with
6703
 * mem_cgroup_charge().
6704 6705 6706 6707 6708
 */
void mem_cgroup_uncharge_list(struct list_head *page_list)
{
	if (mem_cgroup_disabled())
		return;
6709

6710 6711
	if (!list_empty(page_list))
		uncharge_list(page_list);
6712 6713 6714
}

/**
6715 6716 6717
 * mem_cgroup_migrate - charge a page's replacement
 * @oldpage: currently circulating page
 * @newpage: replacement page
6718
 *
6719 6720
 * Charge @newpage as a replacement page for @oldpage. @oldpage will
 * be uncharged upon free.
6721 6722 6723
 *
 * Both pages must be locked, @newpage->mapping must be set up.
 */
6724
void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
6725
{
6726
	struct mem_cgroup *memcg;
6727
	unsigned int nr_pages;
6728
	unsigned long flags;
6729 6730 6731 6732

	VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
	VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
	VM_BUG_ON_PAGE(PageAnon(oldpage) != PageAnon(newpage), newpage);
6733 6734
	VM_BUG_ON_PAGE(PageTransHuge(oldpage) != PageTransHuge(newpage),
		       newpage);
6735 6736 6737 6738 6739

	if (mem_cgroup_disabled())
		return;

	/* Page cache replacement: new page already charged? */
6740
	if (newpage->mem_cgroup)
6741 6742
		return;

6743
	/* Swapcache readahead pages can get replaced before being charged */
6744
	memcg = oldpage->mem_cgroup;
6745
	if (!memcg)
6746 6747
		return;

6748
	/* Force-charge the new page. The old one will be freed soon */
6749
	nr_pages = hpage_nr_pages(newpage);
6750 6751 6752 6753 6754

	page_counter_charge(&memcg->memory, nr_pages);
	if (do_memsw_account())
		page_counter_charge(&memcg->memsw, nr_pages);
	css_get_many(&memcg->css, nr_pages);
6755

6756
	commit_charge(newpage, memcg, false);
6757

6758
	local_irq_save(flags);
6759
	mem_cgroup_charge_statistics(memcg, newpage, nr_pages);
6760
	memcg_check_events(memcg, newpage);
6761
	local_irq_restore(flags);
6762 6763
}

6764
DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
6765 6766
EXPORT_SYMBOL(memcg_sockets_enabled_key);

6767
void mem_cgroup_sk_alloc(struct sock *sk)
6768 6769 6770
{
	struct mem_cgroup *memcg;

6771 6772 6773
	if (!mem_cgroup_sockets_enabled)
		return;

6774 6775 6776 6777
	/* Do not associate the sock with unrelated interrupted task's memcg. */
	if (in_interrupt())
		return;

6778 6779
	rcu_read_lock();
	memcg = mem_cgroup_from_task(current);
6780 6781
	if (memcg == root_mem_cgroup)
		goto out;
6782
	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active)
6783
		goto out;
S
Shakeel Butt 已提交
6784
	if (css_tryget(&memcg->css))
6785
		sk->sk_memcg = memcg;
6786
out:
6787 6788 6789
	rcu_read_unlock();
}

6790
void mem_cgroup_sk_free(struct sock *sk)
6791
{
6792 6793
	if (sk->sk_memcg)
		css_put(&sk->sk_memcg->css);
6794 6795 6796 6797 6798 6799 6800 6801 6802 6803 6804 6805
}

/**
 * mem_cgroup_charge_skmem - charge socket memory
 * @memcg: memcg to charge
 * @nr_pages: number of pages to charge
 *
 * Charges @nr_pages to @memcg. Returns %true if the charge fit within
 * @memcg's configured limit, %false if the charge had to be forced.
 */
bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
{
6806
	gfp_t gfp_mask = GFP_KERNEL;
6807

6808
	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
6809
		struct page_counter *fail;
6810

6811 6812
		if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) {
			memcg->tcpmem_pressure = 0;
6813 6814
			return true;
		}
6815 6816
		page_counter_charge(&memcg->tcpmem, nr_pages);
		memcg->tcpmem_pressure = 1;
6817
		return false;
6818
	}
6819

6820 6821 6822 6823
	/* Don't block in the packet receive path */
	if (in_softirq())
		gfp_mask = GFP_NOWAIT;

6824
	mod_memcg_state(memcg, MEMCG_SOCK, nr_pages);
6825

6826 6827 6828 6829
	if (try_charge(memcg, gfp_mask, nr_pages) == 0)
		return true;

	try_charge(memcg, gfp_mask|__GFP_NOFAIL, nr_pages);
6830 6831 6832 6833 6834
	return false;
}

/**
 * mem_cgroup_uncharge_skmem - uncharge socket memory
M
Mike Rapoport 已提交
6835 6836
 * @memcg: memcg to uncharge
 * @nr_pages: number of pages to uncharge
6837 6838 6839
 */
void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
{
6840
	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
6841
		page_counter_uncharge(&memcg->tcpmem, nr_pages);
6842 6843
		return;
	}
6844

6845
	mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages);
6846

6847
	refill_stock(memcg, nr_pages);
6848 6849
}

6850 6851 6852 6853 6854 6855 6856 6857 6858
static int __init cgroup_memory(char *s)
{
	char *token;

	while ((token = strsep(&s, ",")) != NULL) {
		if (!*token)
			continue;
		if (!strcmp(token, "nosocket"))
			cgroup_memory_nosocket = true;
6859 6860
		if (!strcmp(token, "nokmem"))
			cgroup_memory_nokmem = true;
6861 6862 6863 6864
	}
	return 0;
}
__setup("cgroup.memory=", cgroup_memory);
6865

6866
/*
6867 6868
 * subsys_initcall() for memory controller.
 *
6869 6870 6871 6872
 * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this
 * context because of lock dependencies (cgroup_lock -> cpu hotplug) but
 * basically everything that doesn't depend on a specific mem_cgroup structure
 * should be initialized from here.
6873 6874 6875
 */
static int __init mem_cgroup_init(void)
{
6876 6877
	int cpu, node;

6878
#ifdef CONFIG_MEMCG_KMEM
6879 6880
	/*
	 * Kmem cache creation is mostly done with the slab_mutex held,
6881 6882 6883
	 * so use a workqueue with limited concurrency to avoid stalling
	 * all worker threads in case lots of cgroups are created and
	 * destroyed simultaneously.
6884
	 */
6885 6886
	memcg_kmem_cache_wq = alloc_workqueue("memcg_kmem_cache", 0, 1);
	BUG_ON(!memcg_kmem_cache_wq);
6887 6888
#endif

6889 6890
	cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
				  memcg_hotplug_cpu_dead);
6891 6892 6893 6894 6895 6896 6897 6898 6899 6900 6901

	for_each_possible_cpu(cpu)
		INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
			  drain_local_stock);

	for_each_node(node) {
		struct mem_cgroup_tree_per_node *rtpn;

		rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL,
				    node_online(node) ? node : NUMA_NO_NODE);

6902
		rtpn->rb_root = RB_ROOT;
6903
		rtpn->rb_rightmost = NULL;
6904
		spin_lock_init(&rtpn->lock);
6905 6906 6907
		soft_limit_tree.rb_tree_per_node[node] = rtpn;
	}

6908 6909 6910
	return 0;
}
subsys_initcall(mem_cgroup_init);
6911 6912

#ifdef CONFIG_MEMCG_SWAP
6913 6914
static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg)
{
6915
	while (!refcount_inc_not_zero(&memcg->id.ref)) {
6916 6917 6918 6919 6920 6921 6922 6923 6924 6925 6926 6927 6928 6929 6930
		/*
		 * The root cgroup cannot be destroyed, so it's refcount must
		 * always be >= 1.
		 */
		if (WARN_ON_ONCE(memcg == root_mem_cgroup)) {
			VM_BUG_ON(1);
			break;
		}
		memcg = parent_mem_cgroup(memcg);
		if (!memcg)
			memcg = root_mem_cgroup;
	}
	return memcg;
}

6931 6932 6933 6934 6935 6936 6937 6938 6939
/**
 * mem_cgroup_swapout - transfer a memsw charge to swap
 * @page: page whose memsw charge to transfer
 * @entry: swap entry to move the charge to
 *
 * Transfer the memsw charge of @page to @entry.
 */
void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
{
6940
	struct mem_cgroup *memcg, *swap_memcg;
6941
	unsigned int nr_entries;
6942 6943 6944 6945 6946
	unsigned short oldid;

	VM_BUG_ON_PAGE(PageLRU(page), page);
	VM_BUG_ON_PAGE(page_count(page), page);

6947
	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
6948 6949 6950 6951 6952 6953 6954 6955
		return;

	memcg = page->mem_cgroup;

	/* Readahead page, never charged */
	if (!memcg)
		return;

6956 6957 6958 6959 6960 6961
	/*
	 * In case the memcg owning these pages has been offlined and doesn't
	 * have an ID allocated to it anymore, charge the closest online
	 * ancestor for the swap instead and transfer the memory+swap charge.
	 */
	swap_memcg = mem_cgroup_id_get_online(memcg);
6962 6963 6964 6965 6966 6967
	nr_entries = hpage_nr_pages(page);
	/* Get references for the tail pages, too */
	if (nr_entries > 1)
		mem_cgroup_id_get_many(swap_memcg, nr_entries - 1);
	oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg),
				   nr_entries);
6968
	VM_BUG_ON_PAGE(oldid, page);
6969
	mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries);
6970 6971 6972 6973

	page->mem_cgroup = NULL;

	if (!mem_cgroup_is_root(memcg))
6974
		page_counter_uncharge(&memcg->memory, nr_entries);
6975

6976
	if (!cgroup_memory_noswap && memcg != swap_memcg) {
6977
		if (!mem_cgroup_is_root(swap_memcg))
6978 6979
			page_counter_charge(&swap_memcg->memsw, nr_entries);
		page_counter_uncharge(&memcg->memsw, nr_entries);
6980 6981
	}

6982 6983
	/*
	 * Interrupts should be disabled here because the caller holds the
M
Matthew Wilcox 已提交
6984
	 * i_pages lock which is taken with interrupts-off. It is
6985
	 * important here to have the interrupts disabled because it is the
M
Matthew Wilcox 已提交
6986
	 * only synchronisation we have for updating the per-CPU variables.
6987 6988
	 */
	VM_BUG_ON(!irqs_disabled());
6989
	mem_cgroup_charge_statistics(memcg, page, -nr_entries);
6990
	memcg_check_events(memcg, page);
6991 6992

	if (!mem_cgroup_is_root(memcg))
6993
		css_put_many(&memcg->css, nr_entries);
6994 6995
}

6996 6997
/**
 * mem_cgroup_try_charge_swap - try charging swap space for a page
6998 6999 7000
 * @page: page being added to swap
 * @entry: swap entry to charge
 *
7001
 * Try to charge @page's memcg for the swap space at @entry.
7002 7003 7004 7005 7006
 *
 * Returns 0 on success, -ENOMEM on failure.
 */
int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
{
7007
	unsigned int nr_pages = hpage_nr_pages(page);
7008
	struct page_counter *counter;
7009
	struct mem_cgroup *memcg;
7010 7011
	unsigned short oldid;

7012
	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
7013 7014 7015 7016 7017 7018 7019 7020
		return 0;

	memcg = page->mem_cgroup;

	/* Readahead page, never charged */
	if (!memcg)
		return 0;

7021 7022
	if (!entry.val) {
		memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
7023
		return 0;
7024
	}
7025

7026 7027
	memcg = mem_cgroup_id_get_online(memcg);

7028
	if (!cgroup_memory_noswap && !mem_cgroup_is_root(memcg) &&
7029
	    !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) {
7030 7031
		memcg_memory_event(memcg, MEMCG_SWAP_MAX);
		memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
7032
		mem_cgroup_id_put(memcg);
7033
		return -ENOMEM;
7034
	}
7035

7036 7037 7038 7039
	/* Get references for the tail pages, too */
	if (nr_pages > 1)
		mem_cgroup_id_get_many(memcg, nr_pages - 1);
	oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages);
7040
	VM_BUG_ON_PAGE(oldid, page);
7041
	mod_memcg_state(memcg, MEMCG_SWAP, nr_pages);
7042 7043 7044 7045

	return 0;
}

7046
/**
7047
 * mem_cgroup_uncharge_swap - uncharge swap space
7048
 * @entry: swap entry to uncharge
7049
 * @nr_pages: the amount of swap space to uncharge
7050
 */
7051
void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
7052 7053 7054 7055
{
	struct mem_cgroup *memcg;
	unsigned short id;

7056
	id = swap_cgroup_record(entry, 0, nr_pages);
7057
	rcu_read_lock();
7058
	memcg = mem_cgroup_from_id(id);
7059
	if (memcg) {
7060
		if (!cgroup_memory_noswap && !mem_cgroup_is_root(memcg)) {
7061
			if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
7062
				page_counter_uncharge(&memcg->swap, nr_pages);
7063
			else
7064
				page_counter_uncharge(&memcg->memsw, nr_pages);
7065
		}
7066
		mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages);
7067
		mem_cgroup_id_put_many(memcg, nr_pages);
7068 7069 7070 7071
	}
	rcu_read_unlock();
}

7072 7073 7074 7075
long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
{
	long nr_swap_pages = get_nr_swap_pages();

7076
	if (cgroup_memory_noswap || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
7077 7078 7079
		return nr_swap_pages;
	for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
		nr_swap_pages = min_t(long, nr_swap_pages,
7080
				      READ_ONCE(memcg->swap.max) -
7081 7082 7083 7084
				      page_counter_read(&memcg->swap));
	return nr_swap_pages;
}

7085 7086 7087 7088 7089 7090 7091 7092
bool mem_cgroup_swap_full(struct page *page)
{
	struct mem_cgroup *memcg;

	VM_BUG_ON_PAGE(!PageLocked(page), page);

	if (vm_swap_full())
		return true;
7093
	if (cgroup_memory_noswap || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
7094 7095 7096 7097 7098 7099
		return false;

	memcg = page->mem_cgroup;
	if (!memcg)
		return false;

7100 7101 7102 7103 7104
	for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) {
		unsigned long usage = page_counter_read(&memcg->swap);

		if (usage * 2 >= READ_ONCE(memcg->swap.high) ||
		    usage * 2 >= READ_ONCE(memcg->swap.max))
7105
			return true;
7106
	}
7107 7108 7109 7110

	return false;
}

7111
static int __init setup_swap_account(char *s)
7112 7113
{
	if (!strcmp(s, "1"))
7114
		cgroup_memory_noswap = 0;
7115
	else if (!strcmp(s, "0"))
7116
		cgroup_memory_noswap = 1;
7117 7118
	return 1;
}
7119
__setup("swapaccount=", setup_swap_account);
7120

7121 7122 7123 7124 7125 7126 7127 7128
static u64 swap_current_read(struct cgroup_subsys_state *css,
			     struct cftype *cft)
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(css);

	return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE;
}

7129 7130 7131 7132 7133 7134 7135 7136 7137 7138 7139 7140 7141 7142 7143 7144 7145 7146 7147 7148 7149 7150 7151
static int swap_high_show(struct seq_file *m, void *v)
{
	return seq_puts_memcg_tunable(m,
		READ_ONCE(mem_cgroup_from_seq(m)->swap.high));
}

static ssize_t swap_high_write(struct kernfs_open_file *of,
			       char *buf, size_t nbytes, loff_t off)
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
	unsigned long high;
	int err;

	buf = strstrip(buf);
	err = page_counter_memparse(buf, "max", &high);
	if (err)
		return err;

	page_counter_set_high(&memcg->swap, high);

	return nbytes;
}

7152 7153
static int swap_max_show(struct seq_file *m, void *v)
{
7154 7155
	return seq_puts_memcg_tunable(m,
		READ_ONCE(mem_cgroup_from_seq(m)->swap.max));
7156 7157 7158 7159 7160 7161 7162 7163 7164 7165 7166 7167 7168 7169
}

static ssize_t swap_max_write(struct kernfs_open_file *of,
			      char *buf, size_t nbytes, loff_t off)
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
	unsigned long max;
	int err;

	buf = strstrip(buf);
	err = page_counter_memparse(buf, "max", &max);
	if (err)
		return err;

7170
	xchg(&memcg->swap.max, max);
7171 7172 7173 7174

	return nbytes;
}

7175 7176
static int swap_events_show(struct seq_file *m, void *v)
{
7177
	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
7178

7179 7180
	seq_printf(m, "high %lu\n",
		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_HIGH]));
7181 7182 7183 7184 7185 7186 7187 7188
	seq_printf(m, "max %lu\n",
		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX]));
	seq_printf(m, "fail %lu\n",
		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL]));

	return 0;
}

7189 7190 7191 7192 7193 7194
static struct cftype swap_files[] = {
	{
		.name = "swap.current",
		.flags = CFTYPE_NOT_ON_ROOT,
		.read_u64 = swap_current_read,
	},
7195 7196 7197 7198 7199 7200
	{
		.name = "swap.high",
		.flags = CFTYPE_NOT_ON_ROOT,
		.seq_show = swap_high_show,
		.write = swap_high_write,
	},
7201 7202 7203 7204 7205 7206
	{
		.name = "swap.max",
		.flags = CFTYPE_NOT_ON_ROOT,
		.seq_show = swap_max_show,
		.write = swap_max_write,
	},
7207 7208 7209 7210 7211 7212
	{
		.name = "swap.events",
		.flags = CFTYPE_NOT_ON_ROOT,
		.file_offset = offsetof(struct mem_cgroup, swap_events_file),
		.seq_show = swap_events_show,
	},
7213 7214 7215
	{ }	/* terminate */
};

7216
static struct cftype memsw_files[] = {
7217 7218 7219 7220 7221 7222 7223 7224 7225 7226 7227 7228 7229 7230 7231 7232 7233 7234 7235 7236 7237 7238 7239 7240 7241 7242 7243 7244
	{
		.name = "memsw.usage_in_bytes",
		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
		.read_u64 = mem_cgroup_read_u64,
	},
	{
		.name = "memsw.max_usage_in_bytes",
		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
		.write = mem_cgroup_reset,
		.read_u64 = mem_cgroup_read_u64,
	},
	{
		.name = "memsw.limit_in_bytes",
		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
		.write = mem_cgroup_write,
		.read_u64 = mem_cgroup_read_u64,
	},
	{
		.name = "memsw.failcnt",
		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
		.write = mem_cgroup_reset,
		.read_u64 = mem_cgroup_read_u64,
	},
	{ },	/* terminate */
};

static int __init mem_cgroup_swap_init(void)
{
7245 7246 7247 7248 7249
	/* No memory control -> no swap control */
	if (mem_cgroup_disabled())
		cgroup_memory_noswap = true;

	if (cgroup_memory_noswap)
7250 7251 7252 7253 7254
		return 0;

	WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, swap_files));
	WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, memsw_files));

7255 7256 7257 7258 7259
	return 0;
}
subsys_initcall(mem_cgroup_swap_init);

#endif /* CONFIG_MEMCG_SWAP */