memcontrol.c 48.1 KB
Newer Older
B
Balbir Singh 已提交
1 2 3 4 5
/* memcontrol.c - Memory Controller
 *
 * Copyright IBM Corporation, 2007
 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
 *
6 7 8
 * Copyright 2007 OpenVZ SWsoft Inc
 * Author: Pavel Emelianov <xemul@openvz.org>
 *
B
Balbir Singh 已提交
9 10 11 12 13 14 15 16 17 18 19 20 21 22
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 */

#include <linux/res_counter.h>
#include <linux/memcontrol.h>
#include <linux/cgroup.h>
23
#include <linux/mm.h>
K
KAMEZAWA Hiroyuki 已提交
24
#include <linux/pagemap.h>
25
#include <linux/smp.h>
26
#include <linux/page-flags.h>
27
#include <linux/backing-dev.h>
28 29
#include <linux/bit_spinlock.h>
#include <linux/rcupdate.h>
30
#include <linux/mutex.h>
31
#include <linux/slab.h>
32 33 34
#include <linux/swap.h>
#include <linux/spinlock.h>
#include <linux/fs.h>
35
#include <linux/seq_file.h>
36
#include <linux/vmalloc.h>
37
#include <linux/mm_inline.h>
38
#include <linux/page_cgroup.h>
K
KAMEZAWA Hiroyuki 已提交
39
#include "internal.h"
B
Balbir Singh 已提交
40

41 42
#include <asm/uaccess.h>

43 44
struct cgroup_subsys mem_cgroup_subsys __read_mostly;
#define MEM_CGROUP_RECLAIM_RETRIES	5
B
Balbir Singh 已提交
45

46 47 48 49 50 51 52 53 54
#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
/* Turned on only when memory cgroup is enabled && really_do_swap_account = 0 */
int do_swap_account __read_mostly;
static int really_do_swap_account __initdata = 1; /* for remember boot option*/
#else
#define do_swap_account		(0)
#endif


55 56 57 58 59 60 61 62 63
/*
 * Statistics for memory cgroup.
 */
enum mem_cgroup_stat_index {
	/*
	 * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
	 */
	MEM_CGROUP_STAT_CACHE, 	   /* # of pages charged as cache */
	MEM_CGROUP_STAT_RSS,	   /* # of pages charged as rss */
64 65
	MEM_CGROUP_STAT_PGPGIN_COUNT,	/* # of pages paged in */
	MEM_CGROUP_STAT_PGPGOUT_COUNT,	/* # of pages paged out */
66 67 68 69 70 71 72 73 74

	MEM_CGROUP_STAT_NSTATS,
};

struct mem_cgroup_stat_cpu {
	s64 count[MEM_CGROUP_STAT_NSTATS];
} ____cacheline_aligned_in_smp;

struct mem_cgroup_stat {
75
	struct mem_cgroup_stat_cpu cpustat[0];
76 77 78 79 80
};

/*
 * For accounting under irq disable, no need for increment preempt count.
 */
81
static inline void __mem_cgroup_stat_add_safe(struct mem_cgroup_stat_cpu *stat,
82 83
		enum mem_cgroup_stat_index idx, int val)
{
84
	stat->count[idx] += val;
85 86 87 88 89 90 91 92 93 94 95 96
}

static s64 mem_cgroup_read_stat(struct mem_cgroup_stat *stat,
		enum mem_cgroup_stat_index idx)
{
	int cpu;
	s64 ret = 0;
	for_each_possible_cpu(cpu)
		ret += stat->cpustat[cpu].count[idx];
	return ret;
}

97 98 99 100
/*
 * per-zone information in memory controller.
 */
struct mem_cgroup_per_zone {
101 102 103
	/*
	 * spin_lock to protect the per cgroup LRU
	 */
104 105
	struct list_head	lists[NR_LRU_LISTS];
	unsigned long		count[NR_LRU_LISTS];
106 107 108 109 110 111 112 113 114 115 116 117
};
/* Macro for accessing counter */
#define MEM_CGROUP_ZSTAT(mz, idx)	((mz)->count[(idx)])

struct mem_cgroup_per_node {
	struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
};

struct mem_cgroup_lru_info {
	struct mem_cgroup_per_node *nodeinfo[MAX_NUMNODES];
};

B
Balbir Singh 已提交
118 119 120 121 122 123 124
/*
 * The memory controller data structure. The memory controller controls both
 * page cache and RSS per cgroup. We would eventually like to provide
 * statistics based on the statistics developed by Rik Van Riel for clock-pro,
 * to help the administrator determine what knobs to tune.
 *
 * TODO: Add a water mark for the memory controller. Reclaim will begin when
125 126 127
 * we hit the water mark. May be even add a low water mark, such that
 * no reclaim occurs from a cgroup at it's low water mark, this is
 * a feature that will be implemented much later in the future.
B
Balbir Singh 已提交
128 129 130 131 132 133 134
 */
struct mem_cgroup {
	struct cgroup_subsys_state css;
	/*
	 * the counter to account for memory usage
	 */
	struct res_counter res;
135 136 137 138
	/*
	 * the counter to account for mem+swap usage.
	 */
	struct res_counter memsw;
139 140 141 142
	/*
	 * Per cgroup active and inactive list, similar to the
	 * per zone LRU lists.
	 */
143
	struct mem_cgroup_lru_info info;
144

145
	int	prev_priority;	/* for recording reclaim priority */
146 147 148 149 150 151

	/*
	 * While reclaiming in a hiearchy, we cache the last child we
	 * reclaimed from. Protected by cgroup_lock()
	 */
	struct mem_cgroup *last_scanned_child;
152 153 154 155
	/*
	 * Should the accounting and control be hierarchical, per subtree?
	 */
	bool use_hierarchy;
156

157 158
	int		obsolete;
	atomic_t	refcnt;
159
	/*
160
	 * statistics. This must be placed at the end of memcg.
161 162
	 */
	struct mem_cgroup_stat stat;
B
Balbir Singh 已提交
163 164
};

165 166 167
enum charge_type {
	MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
	MEM_CGROUP_CHARGE_TYPE_MAPPED,
168
	MEM_CGROUP_CHARGE_TYPE_SHMEM,	/* used by page migration of shmem */
169
	MEM_CGROUP_CHARGE_TYPE_FORCE,	/* used by force_empty */
K
KAMEZAWA Hiroyuki 已提交
170
	MEM_CGROUP_CHARGE_TYPE_SWAPOUT,	/* for accounting swapcache */
171 172 173
	NR_CHARGE_TYPE,
};

174 175 176 177
/* only for here (for easy reading.) */
#define PCGF_CACHE	(1UL << PCG_CACHE)
#define PCGF_USED	(1UL << PCG_USED)
#define PCGF_LOCK	(1UL << PCG_LOCK)
178 179
static const unsigned long
pcg_default_flags[NR_CHARGE_TYPE] = {
K
KAMEZAWA Hiroyuki 已提交
180 181 182
	PCGF_CACHE | PCGF_USED | PCGF_LOCK, /* File Cache */
	PCGF_USED | PCGF_LOCK, /* Anon */
	PCGF_CACHE | PCGF_USED | PCGF_LOCK, /* Shmem */
183
	0, /* FORCE */
184 185
};

186 187 188 189 190 191 192 193 194 195 196

/* for encoding cft->private value on file */
#define _MEM			(0)
#define _MEMSWAP		(1)
#define MEMFILE_PRIVATE(x, val)	(((x) << 16) | (val))
#define MEMFILE_TYPE(val)	(((val) >> 16) & 0xffff)
#define MEMFILE_ATTR(val)	((val) & 0xffff)

static void mem_cgroup_get(struct mem_cgroup *mem);
static void mem_cgroup_put(struct mem_cgroup *mem);

197 198 199
static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
					 struct page_cgroup *pc,
					 bool charge)
200 201 202
{
	int val = (charge)? 1 : -1;
	struct mem_cgroup_stat *stat = &mem->stat;
203
	struct mem_cgroup_stat_cpu *cpustat;
K
KAMEZAWA Hiroyuki 已提交
204
	int cpu = get_cpu();
205

K
KAMEZAWA Hiroyuki 已提交
206
	cpustat = &stat->cpustat[cpu];
207
	if (PageCgroupCache(pc))
208
		__mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_CACHE, val);
209
	else
210
		__mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_RSS, val);
211 212

	if (charge)
213
		__mem_cgroup_stat_add_safe(cpustat,
214 215
				MEM_CGROUP_STAT_PGPGIN_COUNT, 1);
	else
216
		__mem_cgroup_stat_add_safe(cpustat,
217
				MEM_CGROUP_STAT_PGPGOUT_COUNT, 1);
K
KAMEZAWA Hiroyuki 已提交
218
	put_cpu();
219 220
}

221
static struct mem_cgroup_per_zone *
222 223 224 225 226
mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid)
{
	return &mem->info.nodeinfo[nid]->zoneinfo[zid];
}

227
static struct mem_cgroup_per_zone *
228 229 230 231 232
page_cgroup_zoneinfo(struct page_cgroup *pc)
{
	struct mem_cgroup *mem = pc->mem_cgroup;
	int nid = page_cgroup_nid(pc);
	int zid = page_cgroup_zid(pc);
233

234 235 236 237
	return mem_cgroup_zoneinfo(mem, nid, zid);
}

static unsigned long mem_cgroup_get_all_zonestat(struct mem_cgroup *mem,
238
					enum lru_list idx)
239 240 241 242 243 244 245 246 247 248 249
{
	int nid, zid;
	struct mem_cgroup_per_zone *mz;
	u64 total = 0;

	for_each_online_node(nid)
		for (zid = 0; zid < MAX_NR_ZONES; zid++) {
			mz = mem_cgroup_zoneinfo(mem, nid, zid);
			total += MEM_CGROUP_ZSTAT(mz, idx);
		}
	return total;
250 251
}

252
static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
B
Balbir Singh 已提交
253 254 255 256 257 258
{
	return container_of(cgroup_subsys_state(cont,
				mem_cgroup_subsys_id), struct mem_cgroup,
				css);
}

259
struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
260
{
261 262 263 264 265 266 267 268
	/*
	 * mm_update_next_owner() may clear mm->owner to NULL
	 * if it races with swapoff, page migration, etc.
	 * So this can be called with p == NULL.
	 */
	if (unlikely(!p))
		return NULL;

269 270 271 272
	return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
				struct mem_cgroup, css);
}

K
KAMEZAWA Hiroyuki 已提交
273 274 275 276 277 278 279 280 281 282 283 284 285
/*
 * Following LRU functions are allowed to be used without PCG_LOCK.
 * Operations are called by routine of global LRU independently from memcg.
 * What we have to take care of here is validness of pc->mem_cgroup.
 *
 * Changes to pc->mem_cgroup happens when
 * 1. charge
 * 2. moving account
 * In typical case, "charge" is done before add-to-lru. Exception is SwapCache.
 * It is added to LRU before charge.
 * If PCG_USED bit is not set, page_cgroup is not added to this private LRU.
 * When moving account, the page is not on LRU. It's isolated.
 */
286

K
KAMEZAWA Hiroyuki 已提交
287 288 289 290 291
void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru)
{
	struct page_cgroup *pc;
	struct mem_cgroup *mem;
	struct mem_cgroup_per_zone *mz;
292

293
	if (mem_cgroup_disabled())
K
KAMEZAWA Hiroyuki 已提交
294 295 296 297 298 299 300
		return;
	pc = lookup_page_cgroup(page);
	/* can happen while we handle swapcache. */
	if (list_empty(&pc->lru))
		return;
	mz = page_cgroup_zoneinfo(pc);
	mem = pc->mem_cgroup;
301
	MEM_CGROUP_ZSTAT(mz, lru) -= 1;
K
KAMEZAWA Hiroyuki 已提交
302 303
	list_del_init(&pc->lru);
	return;
304 305
}

K
KAMEZAWA Hiroyuki 已提交
306
void mem_cgroup_del_lru(struct page *page)
307
{
K
KAMEZAWA Hiroyuki 已提交
308 309
	mem_cgroup_del_lru_list(page, page_lru(page));
}
310

K
KAMEZAWA Hiroyuki 已提交
311 312 313 314
void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru)
{
	struct mem_cgroup_per_zone *mz;
	struct page_cgroup *pc;
315

316
	if (mem_cgroup_disabled())
K
KAMEZAWA Hiroyuki 已提交
317
		return;
318

K
KAMEZAWA Hiroyuki 已提交
319 320 321 322 323 324 325
	pc = lookup_page_cgroup(page);
	smp_rmb();
	/* unused page is not rotated. */
	if (!PageCgroupUsed(pc))
		return;
	mz = page_cgroup_zoneinfo(pc);
	list_move(&pc->lru, &mz->lists[lru]);
326 327
}

K
KAMEZAWA Hiroyuki 已提交
328
void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru)
329
{
K
KAMEZAWA Hiroyuki 已提交
330 331
	struct page_cgroup *pc;
	struct mem_cgroup_per_zone *mz;
332

333
	if (mem_cgroup_disabled())
K
KAMEZAWA Hiroyuki 已提交
334 335 336 337 338
		return;
	pc = lookup_page_cgroup(page);
	/* barrier to sync with "charge" */
	smp_rmb();
	if (!PageCgroupUsed(pc))
L
Lee Schermerhorn 已提交
339
		return;
340

K
KAMEZAWA Hiroyuki 已提交
341
	mz = page_cgroup_zoneinfo(pc);
342
	MEM_CGROUP_ZSTAT(mz, lru) += 1;
K
KAMEZAWA Hiroyuki 已提交
343 344 345 346 347 348 349 350 351 352 353 354 355 356 357
	list_add(&pc->lru, &mz->lists[lru]);
}
/*
 * To add swapcache into LRU. Be careful to all this function.
 * zone->lru_lock shouldn't be held and irq must not be disabled.
 */
static void mem_cgroup_lru_fixup(struct page *page)
{
	if (!isolate_lru_page(page))
		putback_lru_page(page);
}

void mem_cgroup_move_lists(struct page *page,
			   enum lru_list from, enum lru_list to)
{
358
	if (mem_cgroup_disabled())
K
KAMEZAWA Hiroyuki 已提交
359 360 361
		return;
	mem_cgroup_del_lru_list(page, from);
	mem_cgroup_add_lru_list(page, to);
362 363
}

364 365 366 367 368
int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
{
	int ret;

	task_lock(task);
369
	ret = task->mm && mm_match_cgroup(task->mm, mem);
370 371 372 373
	task_unlock(task);
	return ret;
}

374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389
/*
 * Calculate mapped_ratio under memory controller. This will be used in
 * vmscan.c for deteremining we have to reclaim mapped pages.
 */
int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem)
{
	long total, rss;

	/*
	 * usage is recorded in bytes. But, here, we assume the number of
	 * physical pages can be represented by "long" on any arch.
	 */
	total = (long) (mem->res.usage >> PAGE_SHIFT) + 1L;
	rss = (long)mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS);
	return (int)((rss * 100L) / total);
}
390

391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409
/*
 * prev_priority control...this will be used in memory reclaim path.
 */
int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem)
{
	return mem->prev_priority;
}

void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem, int priority)
{
	if (priority < mem->prev_priority)
		mem->prev_priority = priority;
}

void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, int priority)
{
	mem->prev_priority = priority;
}

410 411 412 413 414 415 416 417
/*
 * Calculate # of pages to be scanned in this priority/zone.
 * See also vmscan.c
 *
 * priority starts from "DEF_PRIORITY" and decremented in each loop.
 * (see include/linux/mmzone.h)
 */

418 419
long mem_cgroup_calc_reclaim(struct mem_cgroup *mem, struct zone *zone,
					int priority, enum lru_list lru)
420
{
421
	long nr_pages;
422 423 424 425
	int nid = zone->zone_pgdat->node_id;
	int zid = zone_idx(zone);
	struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(mem, nid, zid);

426
	nr_pages = MEM_CGROUP_ZSTAT(mz, lru);
427

428
	return (nr_pages >> priority);
429 430
}

431 432 433 434 435
unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
					struct list_head *dst,
					unsigned long *scanned, int order,
					int mode, struct zone *z,
					struct mem_cgroup *mem_cont,
436
					int active, int file)
437 438 439 440 441 442
{
	unsigned long nr_taken = 0;
	struct page *page;
	unsigned long scan;
	LIST_HEAD(pc_list);
	struct list_head *src;
443
	struct page_cgroup *pc, *tmp;
444 445 446
	int nid = z->zone_pgdat->node_id;
	int zid = zone_idx(z);
	struct mem_cgroup_per_zone *mz;
447
	int lru = LRU_FILE * !!file + !!active;
448

449
	BUG_ON(!mem_cont);
450
	mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
451
	src = &mz->lists[lru];
452

453 454
	scan = 0;
	list_for_each_entry_safe_reverse(pc, tmp, src, lru) {
H
Hugh Dickins 已提交
455
		if (scan >= nr_to_scan)
456
			break;
K
KAMEZAWA Hiroyuki 已提交
457 458

		page = pc->page;
459 460
		if (unlikely(!PageCgroupUsed(pc)))
			continue;
H
Hugh Dickins 已提交
461
		if (unlikely(!PageLRU(page)))
462 463
			continue;

H
Hugh Dickins 已提交
464
		scan++;
465
		if (__isolate_lru_page(page, mode, file) == 0) {
466 467 468 469 470 471 472 473 474
			list_move(&page->lru, dst);
			nr_taken++;
		}
	}

	*scanned = scan;
	return nr_taken;
}

475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617
#define mem_cgroup_from_res_counter(counter, member)	\
	container_of(counter, struct mem_cgroup, member)

/*
 * This routine finds the DFS walk successor. This routine should be
 * called with cgroup_mutex held
 */
static struct mem_cgroup *
mem_cgroup_get_next_node(struct mem_cgroup *curr, struct mem_cgroup *root_mem)
{
	struct cgroup *cgroup, *curr_cgroup, *root_cgroup;

	curr_cgroup = curr->css.cgroup;
	root_cgroup = root_mem->css.cgroup;

	if (!list_empty(&curr_cgroup->children)) {
		/*
		 * Walk down to children
		 */
		mem_cgroup_put(curr);
		cgroup = list_entry(curr_cgroup->children.next,
						struct cgroup, sibling);
		curr = mem_cgroup_from_cont(cgroup);
		mem_cgroup_get(curr);
		goto done;
	}

visit_parent:
	if (curr_cgroup == root_cgroup) {
		mem_cgroup_put(curr);
		curr = root_mem;
		mem_cgroup_get(curr);
		goto done;
	}

	/*
	 * Goto next sibling
	 */
	if (curr_cgroup->sibling.next != &curr_cgroup->parent->children) {
		mem_cgroup_put(curr);
		cgroup = list_entry(curr_cgroup->sibling.next, struct cgroup,
						sibling);
		curr = mem_cgroup_from_cont(cgroup);
		mem_cgroup_get(curr);
		goto done;
	}

	/*
	 * Go up to next parent and next parent's sibling if need be
	 */
	curr_cgroup = curr_cgroup->parent;
	goto visit_parent;

done:
	root_mem->last_scanned_child = curr;
	return curr;
}

/*
 * Visit the first child (need not be the first child as per the ordering
 * of the cgroup list, since we track last_scanned_child) of @mem and use
 * that to reclaim free pages from.
 */
static struct mem_cgroup *
mem_cgroup_get_first_node(struct mem_cgroup *root_mem)
{
	struct cgroup *cgroup;
	struct mem_cgroup *ret;
	bool obsolete = (root_mem->last_scanned_child &&
				root_mem->last_scanned_child->obsolete);

	/*
	 * Scan all children under the mem_cgroup mem
	 */
	cgroup_lock();
	if (list_empty(&root_mem->css.cgroup->children)) {
		ret = root_mem;
		goto done;
	}

	if (!root_mem->last_scanned_child || obsolete) {

		if (obsolete)
			mem_cgroup_put(root_mem->last_scanned_child);

		cgroup = list_first_entry(&root_mem->css.cgroup->children,
				struct cgroup, sibling);
		ret = mem_cgroup_from_cont(cgroup);
		mem_cgroup_get(ret);
	} else
		ret = mem_cgroup_get_next_node(root_mem->last_scanned_child,
						root_mem);

done:
	root_mem->last_scanned_child = ret;
	cgroup_unlock();
	return ret;
}

/*
 * Dance down the hierarchy if needed to reclaim memory. We remember the
 * last child we reclaimed from, so that we don't end up penalizing
 * one child extensively based on its position in the children list.
 *
 * root_mem is the original ancestor that we've been reclaim from.
 */
static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
						gfp_t gfp_mask, bool noswap)
{
	struct mem_cgroup *next_mem;
	int ret = 0;

	/*
	 * Reclaim unconditionally and don't check for return value.
	 * We need to reclaim in the current group and down the tree.
	 * One might think about checking for children before reclaiming,
	 * but there might be left over accounting, even after children
	 * have left.
	 */
	ret = try_to_free_mem_cgroup_pages(root_mem, gfp_mask, noswap);
	if (res_counter_check_under_limit(&root_mem->res))
		return 0;

	next_mem = mem_cgroup_get_first_node(root_mem);

	while (next_mem != root_mem) {
		if (next_mem->obsolete) {
			mem_cgroup_put(next_mem);
			cgroup_lock();
			next_mem = mem_cgroup_get_first_node(root_mem);
			cgroup_unlock();
			continue;
		}
		ret = try_to_free_mem_cgroup_pages(next_mem, gfp_mask, noswap);
		if (res_counter_check_under_limit(&root_mem->res))
			return 0;
		cgroup_lock();
		next_mem = mem_cgroup_get_next_node(next_mem, root_mem);
		cgroup_unlock();
	}
	return ret;
}

618 619 620
/*
 * Unlike exported interface, "oom" parameter is added. if oom==true,
 * oom-killer can be invoked.
621
 */
622
static int __mem_cgroup_try_charge(struct mm_struct *mm,
623 624
			gfp_t gfp_mask, struct mem_cgroup **memcg,
			bool oom)
625
{
626
	struct mem_cgroup *mem, *mem_over_limit;
627
	int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
628
	struct res_counter *fail_res;
629
	/*
630 631
	 * We always charge the cgroup the mm_struct belongs to.
	 * The mm_struct's mem_cgroup changes on task migration if the
632 633 634
	 * thread group leader migrates. It's possible that mm is not
	 * set, if so charge the init_mm (happens for pagecache usage).
	 */
635
	if (likely(!*memcg)) {
636 637
		rcu_read_lock();
		mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
638 639 640 641
		if (unlikely(!mem)) {
			rcu_read_unlock();
			return 0;
		}
642 643 644 645
		/*
		 * For every charge from the cgroup, increment reference count
		 */
		css_get(&mem->css);
646
		*memcg = mem;
647 648
		rcu_read_unlock();
	} else {
649 650
		mem = *memcg;
		css_get(&mem->css);
651
	}
652

653 654 655
	while (1) {
		int ret;
		bool noswap = false;
656

657
		ret = res_counter_charge(&mem->res, PAGE_SIZE, &fail_res);
658 659 660
		if (likely(!ret)) {
			if (!do_swap_account)
				break;
661 662
			ret = res_counter_charge(&mem->memsw, PAGE_SIZE,
							&fail_res);
663 664 665 666 667
			if (likely(!ret))
				break;
			/* mem+swap counter fails */
			res_counter_uncharge(&mem->res, PAGE_SIZE);
			noswap = true;
668 669 670 671 672 673 674
			mem_over_limit = mem_cgroup_from_res_counter(fail_res,
									memsw);
		} else
			/* mem counter fails */
			mem_over_limit = mem_cgroup_from_res_counter(fail_res,
									res);

675
		if (!(gfp_mask & __GFP_WAIT))
676
			goto nomem;
677

678 679
		ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, gfp_mask,
							noswap);
680 681

		/*
682 683 684 685 686
		 * try_to_free_mem_cgroup_pages() might not give us a full
		 * picture of reclaim. Some pages are reclaimed and might be
		 * moved to swap cache or just unmapped from the cgroup.
		 * Check the limit again to see if the reclaim reduced the
		 * current usage of the cgroup before giving up
687
		 *
688
		 */
689 690 691 692 693
		if (!do_swap_account &&
			res_counter_check_under_limit(&mem->res))
			continue;
		if (do_swap_account &&
			res_counter_check_under_limit(&mem->memsw))
694
			continue;
695 696

		if (!nr_retries--) {
697 698
			if (oom)
				mem_cgroup_out_of_memory(mem, gfp_mask);
699
			goto nomem;
700
		}
701
	}
702 703 704 705 706
	return 0;
nomem:
	css_put(&mem->css);
	return -ENOMEM;
}
707

708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726
/**
 * mem_cgroup_try_charge - get charge of PAGE_SIZE.
 * @mm: an mm_struct which is charged against. (when *memcg is NULL)
 * @gfp_mask: gfp_mask for reclaim.
 * @memcg: a pointer to memory cgroup which is charged against.
 *
 * charge against memory cgroup pointed by *memcg. if *memcg == NULL, estimated
 * memory cgroup from @mm is got and stored in *memcg.
 *
 * Returns 0 if success. -ENOMEM at failure.
 * This call can invoke OOM-Killer.
 */

int mem_cgroup_try_charge(struct mm_struct *mm,
			  gfp_t mask, struct mem_cgroup **memcg)
{
	return __mem_cgroup_try_charge(mm, mask, memcg, true);
}

727 728 729 730 731 732 733 734 735 736 737 738
/*
 * commit a charge got by mem_cgroup_try_charge() and makes page_cgroup to be
 * USED state. If already USED, uncharge and return.
 */

static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
				     struct page_cgroup *pc,
				     enum charge_type ctype)
{
	/* try_charge() can return NULL to *memcg, taking care of it. */
	if (!mem)
		return;
739 740 741 742 743

	lock_page_cgroup(pc);
	if (unlikely(PageCgroupUsed(pc))) {
		unlock_page_cgroup(pc);
		res_counter_uncharge(&mem->res, PAGE_SIZE);
744 745
		if (do_swap_account)
			res_counter_uncharge(&mem->memsw, PAGE_SIZE);
746
		css_put(&mem->css);
747
		return;
748
	}
749
	pc->mem_cgroup = mem;
K
KAMEZAWA Hiroyuki 已提交
750
	smp_wmb();
751
	pc->flags = pcg_default_flags[ctype];
752

K
KAMEZAWA Hiroyuki 已提交
753
	mem_cgroup_charge_statistics(mem, pc, true);
754 755

	unlock_page_cgroup(pc);
756
}
757

758 759 760 761 762 763 764
/**
 * mem_cgroup_move_account - move account of the page
 * @pc:	page_cgroup of the page.
 * @from: mem_cgroup which the page is moved from.
 * @to:	mem_cgroup which the page is moved to. @from != @to.
 *
 * The caller must confirm following.
K
KAMEZAWA Hiroyuki 已提交
765
 * - page is not on LRU (isolate_page() is useful.)
766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781
 *
 * returns 0 at success,
 * returns -EBUSY when lock is busy or "pc" is unstable.
 *
 * This function does "uncharge" from old cgroup but doesn't do "charge" to
 * new cgroup. It should be done by a caller.
 */

static int mem_cgroup_move_account(struct page_cgroup *pc,
	struct mem_cgroup *from, struct mem_cgroup *to)
{
	struct mem_cgroup_per_zone *from_mz, *to_mz;
	int nid, zid;
	int ret = -EBUSY;

	VM_BUG_ON(from == to);
K
KAMEZAWA Hiroyuki 已提交
782
	VM_BUG_ON(PageLRU(pc->page));
783 784 785 786 787 788 789 790 791 792 793 794 795 796 797

	nid = page_cgroup_nid(pc);
	zid = page_cgroup_zid(pc);
	from_mz =  mem_cgroup_zoneinfo(from, nid, zid);
	to_mz =  mem_cgroup_zoneinfo(to, nid, zid);

	if (!trylock_page_cgroup(pc))
		return ret;

	if (!PageCgroupUsed(pc))
		goto out;

	if (pc->mem_cgroup != from)
		goto out;

K
KAMEZAWA Hiroyuki 已提交
798 799 800 801 802 803 804 805 806
	css_put(&from->css);
	res_counter_uncharge(&from->res, PAGE_SIZE);
	mem_cgroup_charge_statistics(from, pc, false);
	if (do_swap_account)
		res_counter_uncharge(&from->memsw, PAGE_SIZE);
	pc->mem_cgroup = to;
	mem_cgroup_charge_statistics(to, pc, true);
	css_get(&to->css);
	ret = 0;
807 808 809 810 811 812 813 814 815 816 817 818 819
out:
	unlock_page_cgroup(pc);
	return ret;
}

/*
 * move charges to its parent.
 */

static int mem_cgroup_move_parent(struct page_cgroup *pc,
				  struct mem_cgroup *child,
				  gfp_t gfp_mask)
{
K
KAMEZAWA Hiroyuki 已提交
820
	struct page *page = pc->page;
821 822 823 824 825 826 827 828 829
	struct cgroup *cg = child->css.cgroup;
	struct cgroup *pcg = cg->parent;
	struct mem_cgroup *parent;
	int ret;

	/* Is ROOT ? */
	if (!pcg)
		return -EINVAL;

K
KAMEZAWA Hiroyuki 已提交
830

831 832
	parent = mem_cgroup_from_cont(pcg);

K
KAMEZAWA Hiroyuki 已提交
833

834 835 836 837
	ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false);
	if (ret)
		return ret;

K
KAMEZAWA Hiroyuki 已提交
838 839 840 841 842 843 844
	if (!get_page_unless_zero(page))
		return -EBUSY;

	ret = isolate_lru_page(page);

	if (ret)
		goto cancel;
845 846 847

	ret = mem_cgroup_move_account(pc, child, parent);

K
KAMEZAWA Hiroyuki 已提交
848
	/* drop extra refcnt by try_charge() (move_account increment one) */
849
	css_put(&parent->css);
K
KAMEZAWA Hiroyuki 已提交
850 851 852 853
	putback_lru_page(page);
	if (!ret) {
		put_page(page);
		return 0;
854
	}
K
KAMEZAWA Hiroyuki 已提交
855 856 857 858 859 860
	/* uncharge if move fails */
cancel:
	res_counter_uncharge(&parent->res, PAGE_SIZE);
	if (do_swap_account)
		res_counter_uncharge(&parent->memsw, PAGE_SIZE);
	put_page(page);
861 862 863
	return ret;
}

864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884
/*
 * Charge the memory controller for page usage.
 * Return
 * 0 if the charge was successful
 * < 0 if the cgroup is over its limit
 */
static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
				gfp_t gfp_mask, enum charge_type ctype,
				struct mem_cgroup *memcg)
{
	struct mem_cgroup *mem;
	struct page_cgroup *pc;
	int ret;

	pc = lookup_page_cgroup(page);
	/* can happen at boot */
	if (unlikely(!pc))
		return 0;
	prefetchw(pc);

	mem = memcg;
885
	ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, true);
886 887 888 889
	if (ret)
		return ret;

	__mem_cgroup_commit_charge(mem, pc, ctype);
890 891 892
	return 0;
}

893 894
int mem_cgroup_newpage_charge(struct page *page,
			      struct mm_struct *mm, gfp_t gfp_mask)
895
{
896
	if (mem_cgroup_disabled())
897
		return 0;
898 899
	if (PageCompound(page))
		return 0;
900 901 902 903 904 905 906 907 908 909 910
	/*
	 * If already mapped, we don't have to account.
	 * If page cache, page->mapping has address_space.
	 * But page->mapping may have out-of-use anon_vma pointer,
	 * detecit it by PageAnon() check. newly-mapped-anon's page->mapping
	 * is NULL.
  	 */
	if (page_mapped(page) || (page->mapping && !PageAnon(page)))
		return 0;
	if (unlikely(!mm))
		mm = &init_mm;
911
	return mem_cgroup_charge_common(page, mm, gfp_mask,
912
				MEM_CGROUP_CHARGE_TYPE_MAPPED, NULL);
913 914
}

915 916
int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
				gfp_t gfp_mask)
917
{
918
	if (mem_cgroup_disabled())
919
		return 0;
920 921
	if (PageCompound(page))
		return 0;
922 923 924 925 926 927 928 929 930 931 932 933
	/*
	 * Corner case handling. This is called from add_to_page_cache()
	 * in usual. But some FS (shmem) precharges this page before calling it
	 * and call add_to_page_cache() with GFP_NOWAIT.
	 *
	 * For GFP_NOWAIT case, the page may be pre-charged before calling
	 * add_to_page_cache(). (See shmem.c) check it here and avoid to call
	 * charge twice. (It works but has to pay a bit larger cost.)
	 */
	if (!(gfp_mask & __GFP_WAIT)) {
		struct page_cgroup *pc;

934 935 936 937 938 939 940

		pc = lookup_page_cgroup(page);
		if (!pc)
			return 0;
		lock_page_cgroup(pc);
		if (PageCgroupUsed(pc)) {
			unlock_page_cgroup(pc);
941 942
			return 0;
		}
943
		unlock_page_cgroup(pc);
944 945
	}

946
	if (unlikely(!mm))
947
		mm = &init_mm;
948

949 950
	if (page_is_file_cache(page))
		return mem_cgroup_charge_common(page, mm, gfp_mask,
951
				MEM_CGROUP_CHARGE_TYPE_CACHE, NULL);
952 953 954
	else
		return mem_cgroup_charge_common(page, mm, gfp_mask,
				MEM_CGROUP_CHARGE_TYPE_SHMEM, NULL);
955 956
}

957 958 959 960 961 962 963
int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
				 struct page *page,
				 gfp_t mask, struct mem_cgroup **ptr)
{
	struct mem_cgroup *mem;
	swp_entry_t     ent;

964
	if (mem_cgroup_disabled())
965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990
		return 0;

	if (!do_swap_account)
		goto charge_cur_mm;

	/*
	 * A racing thread's fault, or swapoff, may have already updated
	 * the pte, and even removed page from swap cache: return success
	 * to go on to do_swap_page()'s pte_same() test, which should fail.
	 */
	if (!PageSwapCache(page))
		return 0;

	ent.val = page_private(page);

	mem = lookup_swap_cgroup(ent);
	if (!mem || mem->obsolete)
		goto charge_cur_mm;
	*ptr = mem;
	return __mem_cgroup_try_charge(NULL, mask, ptr, true);
charge_cur_mm:
	if (unlikely(!mm))
		mm = &init_mm;
	return __mem_cgroup_try_charge(mm, mask, ptr, true);
}

K
KAMEZAWA Hiroyuki 已提交
991
#ifdef CONFIG_SWAP
992

K
KAMEZAWA Hiroyuki 已提交
993 994 995 996 997
int mem_cgroup_cache_charge_swapin(struct page *page,
			struct mm_struct *mm, gfp_t mask, bool locked)
{
	int ret = 0;

998
	if (mem_cgroup_disabled())
K
KAMEZAWA Hiroyuki 已提交
999 1000 1001 1002 1003 1004 1005 1006 1007 1008
		return 0;
	if (unlikely(!mm))
		mm = &init_mm;
	if (!locked)
		lock_page(page);
	/*
	 * If not locked, the page can be dropped from SwapCache until
	 * we reach here.
	 */
	if (PageSwapCache(page)) {
1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019
		struct mem_cgroup *mem = NULL;
		swp_entry_t ent;

		ent.val = page_private(page);
		if (do_swap_account) {
			mem = lookup_swap_cgroup(ent);
			if (mem && mem->obsolete)
				mem = NULL;
			if (mem)
				mm = NULL;
		}
K
KAMEZAWA Hiroyuki 已提交
1020
		ret = mem_cgroup_charge_common(page, mm, mask,
1021 1022 1023 1024 1025 1026 1027 1028 1029 1030
				MEM_CGROUP_CHARGE_TYPE_SHMEM, mem);

		if (!ret && do_swap_account) {
			/* avoid double counting */
			mem = swap_cgroup_record(ent, NULL);
			if (mem) {
				res_counter_uncharge(&mem->memsw, PAGE_SIZE);
				mem_cgroup_put(mem);
			}
		}
K
KAMEZAWA Hiroyuki 已提交
1031 1032 1033
	}
	if (!locked)
		unlock_page(page);
K
KAMEZAWA Hiroyuki 已提交
1034 1035
	/* add this page(page_cgroup) to the LRU we want. */
	mem_cgroup_lru_fixup(page);
K
KAMEZAWA Hiroyuki 已提交
1036 1037 1038 1039 1040

	return ret;
}
#endif

1041 1042 1043 1044
void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
{
	struct page_cgroup *pc;

1045
	if (mem_cgroup_disabled())
1046 1047 1048 1049 1050
		return;
	if (!ptr)
		return;
	pc = lookup_page_cgroup(page);
	__mem_cgroup_commit_charge(ptr, pc, MEM_CGROUP_CHARGE_TYPE_MAPPED);
1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067
	/*
	 * Now swap is on-memory. This means this page may be
	 * counted both as mem and swap....double count.
	 * Fix it by uncharging from memsw. This SwapCache is stable
	 * because we're still under lock_page().
	 */
	if (do_swap_account) {
		swp_entry_t ent = {.val = page_private(page)};
		struct mem_cgroup *memcg;
		memcg = swap_cgroup_record(ent, NULL);
		if (memcg) {
			/* If memcg is obsolete, memcg can be != ptr */
			res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
			mem_cgroup_put(memcg);
		}

	}
K
KAMEZAWA Hiroyuki 已提交
1068 1069
	/* add this page(page_cgroup) to the LRU we want. */
	mem_cgroup_lru_fixup(page);
1070 1071 1072 1073
}

void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem)
{
1074
	if (mem_cgroup_disabled())
1075 1076 1077 1078
		return;
	if (!mem)
		return;
	res_counter_uncharge(&mem->res, PAGE_SIZE);
1079 1080
	if (do_swap_account)
		res_counter_uncharge(&mem->memsw, PAGE_SIZE);
1081 1082 1083 1084
	css_put(&mem->css);
}


1085
/*
1086
 * uncharge if !page_mapped(page)
1087
 */
1088
static struct mem_cgroup *
1089
__mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
1090
{
H
Hugh Dickins 已提交
1091
	struct page_cgroup *pc;
1092
	struct mem_cgroup *mem = NULL;
1093
	struct mem_cgroup_per_zone *mz;
1094

1095
	if (mem_cgroup_disabled())
1096
		return NULL;
1097

K
KAMEZAWA Hiroyuki 已提交
1098
	if (PageSwapCache(page))
1099
		return NULL;
K
KAMEZAWA Hiroyuki 已提交
1100

1101
	/*
1102
	 * Check if our page_cgroup is valid
1103
	 */
1104 1105
	pc = lookup_page_cgroup(page);
	if (unlikely(!pc || !PageCgroupUsed(pc)))
1106
		return NULL;
1107

1108
	lock_page_cgroup(pc);
K
KAMEZAWA Hiroyuki 已提交
1109

1110 1111
	mem = pc->mem_cgroup;

K
KAMEZAWA Hiroyuki 已提交
1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128
	if (!PageCgroupUsed(pc))
		goto unlock_out;

	switch (ctype) {
	case MEM_CGROUP_CHARGE_TYPE_MAPPED:
		if (page_mapped(page))
			goto unlock_out;
		break;
	case MEM_CGROUP_CHARGE_TYPE_SWAPOUT:
		if (!PageAnon(page)) {	/* Shared memory */
			if (page->mapping && !page_is_file_cache(page))
				goto unlock_out;
		} else if (page_mapped(page)) /* Anon */
				goto unlock_out;
		break;
	default:
		break;
1129
	}
K
KAMEZAWA Hiroyuki 已提交
1130

1131 1132 1133 1134
	res_counter_uncharge(&mem->res, PAGE_SIZE);
	if (do_swap_account && (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT))
		res_counter_uncharge(&mem->memsw, PAGE_SIZE);

K
KAMEZAWA Hiroyuki 已提交
1135
	mem_cgroup_charge_statistics(mem, pc, false);
1136
	ClearPageCgroupUsed(pc);
1137

1138
	mz = page_cgroup_zoneinfo(pc);
1139
	unlock_page_cgroup(pc);
H
Hugh Dickins 已提交
1140

1141
	css_put(&mem->css);
1142

1143
	return mem;
K
KAMEZAWA Hiroyuki 已提交
1144 1145 1146

unlock_out:
	unlock_page_cgroup(pc);
1147
	return NULL;
1148 1149
}

1150 1151
void mem_cgroup_uncharge_page(struct page *page)
{
1152 1153 1154 1155 1156
	/* early check. */
	if (page_mapped(page))
		return;
	if (page->mapping && !PageAnon(page))
		return;
1157 1158 1159 1160 1161 1162
	__mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_MAPPED);
}

void mem_cgroup_uncharge_cache_page(struct page *page)
{
	VM_BUG_ON(page_mapped(page));
1163
	VM_BUG_ON(page->mapping);
1164 1165 1166
	__mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE);
}

1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189
/*
 * called from __delete_from_swap_cache() and drop "page" account.
 * memcg information is recorded to swap_cgroup of "ent"
 */
void mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent)
{
	struct mem_cgroup *memcg;

	memcg = __mem_cgroup_uncharge_common(page,
					MEM_CGROUP_CHARGE_TYPE_SWAPOUT);
	/* record memcg information */
	if (do_swap_account && memcg) {
		swap_cgroup_record(ent, memcg);
		mem_cgroup_get(memcg);
	}
}

#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
/*
 * called from swap_entry_free(). remove record in swap_cgroup and
 * uncharge "memsw" account.
 */
void mem_cgroup_uncharge_swap(swp_entry_t ent)
K
KAMEZAWA Hiroyuki 已提交
1190
{
1191 1192 1193 1194 1195 1196 1197 1198 1199 1200
	struct mem_cgroup *memcg;

	if (!do_swap_account)
		return;

	memcg = swap_cgroup_record(ent, NULL);
	if (memcg) {
		res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
		mem_cgroup_put(memcg);
	}
K
KAMEZAWA Hiroyuki 已提交
1201
}
1202
#endif
K
KAMEZAWA Hiroyuki 已提交
1203

1204
/*
1205 1206
 * Before starting migration, account PAGE_SIZE to mem_cgroup that the old
 * page belongs to.
1207
 */
1208
int mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr)
1209 1210
{
	struct page_cgroup *pc;
1211 1212
	struct mem_cgroup *mem = NULL;
	int ret = 0;
1213

1214
	if (mem_cgroup_disabled())
1215 1216
		return 0;

1217 1218 1219
	pc = lookup_page_cgroup(page);
	lock_page_cgroup(pc);
	if (PageCgroupUsed(pc)) {
1220 1221 1222
		mem = pc->mem_cgroup;
		css_get(&mem->css);
	}
1223
	unlock_page_cgroup(pc);
1224

1225
	if (mem) {
1226
		ret = mem_cgroup_try_charge(NULL, GFP_HIGHUSER_MOVABLE, &mem);
1227 1228
		css_put(&mem->css);
	}
1229
	*ptr = mem;
1230
	return ret;
1231
}
1232

1233
/* remove redundant charge if migration failed*/
1234 1235
void mem_cgroup_end_migration(struct mem_cgroup *mem,
		struct page *oldpage, struct page *newpage)
1236
{
1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260
	struct page *target, *unused;
	struct page_cgroup *pc;
	enum charge_type ctype;

	if (!mem)
		return;

	/* at migration success, oldpage->mapping is NULL. */
	if (oldpage->mapping) {
		target = oldpage;
		unused = NULL;
	} else {
		target = newpage;
		unused = oldpage;
	}

	if (PageAnon(target))
		ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED;
	else if (page_is_file_cache(target))
		ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
	else
		ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;

	/* unused page is not on radix-tree now. */
K
KAMEZAWA Hiroyuki 已提交
1261
	if (unused)
1262 1263 1264
		__mem_cgroup_uncharge_common(unused, ctype);

	pc = lookup_page_cgroup(target);
1265
	/*
1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279
	 * __mem_cgroup_commit_charge() check PCG_USED bit of page_cgroup.
	 * So, double-counting is effectively avoided.
	 */
	__mem_cgroup_commit_charge(mem, pc, ctype);

	/*
	 * Both of oldpage and newpage are still under lock_page().
	 * Then, we don't have to care about race in radix-tree.
	 * But we have to be careful that this page is unmapped or not.
	 *
	 * There is a case for !page_mapped(). At the start of
	 * migration, oldpage was mapped. But now, it's zapped.
	 * But we know *target* page is not freed/reused under us.
	 * mem_cgroup_uncharge_page() does all necessary checks.
1280
	 */
1281 1282
	if (ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED)
		mem_cgroup_uncharge_page(target);
1283
}
1284

1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295
/*
 * A call to try to shrink memory usage under specified resource controller.
 * This is typically used for page reclaiming for shmem for reducing side
 * effect of page allocation from shmem, which is used by some mem_cgroup.
 */
int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask)
{
	struct mem_cgroup *mem;
	int progress = 0;
	int retry = MEM_CGROUP_RECLAIM_RETRIES;

1296
	if (mem_cgroup_disabled())
1297
		return 0;
1298 1299
	if (!mm)
		return 0;
1300

1301 1302
	rcu_read_lock();
	mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
1303 1304 1305 1306
	if (unlikely(!mem)) {
		rcu_read_unlock();
		return 0;
	}
1307 1308 1309 1310
	css_get(&mem->css);
	rcu_read_unlock();

	do {
1311
		progress = try_to_free_mem_cgroup_pages(mem, gfp_mask, true);
1312
		progress += res_counter_check_under_limit(&mem->res);
1313 1314 1315 1316 1317 1318 1319 1320
	} while (!progress && --retry);

	css_put(&mem->css);
	if (!retry)
		return -ENOMEM;
	return 0;
}

1321 1322
static DEFINE_MUTEX(set_limit_mutex);

1323
static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
1324
				unsigned long long val)
1325 1326 1327 1328
{

	int retry_count = MEM_CGROUP_RECLAIM_RETRIES;
	int progress;
1329
	u64 memswlimit;
1330 1331
	int ret = 0;

1332
	while (retry_count) {
1333 1334 1335 1336
		if (signal_pending(current)) {
			ret = -EINTR;
			break;
		}
1337 1338 1339 1340 1341 1342 1343 1344 1345 1346
		/*
		 * Rather than hide all in some function, I do this in
		 * open coded manner. You see what this really does.
		 * We have to guarantee mem->res.limit < mem->memsw.limit.
		 */
		mutex_lock(&set_limit_mutex);
		memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
		if (memswlimit < val) {
			ret = -EINVAL;
			mutex_unlock(&set_limit_mutex);
1347 1348
			break;
		}
1349 1350 1351 1352 1353 1354
		ret = res_counter_set_limit(&memcg->res, val);
		mutex_unlock(&set_limit_mutex);

		if (!ret)
			break;

1355
		progress = try_to_free_mem_cgroup_pages(memcg,
1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398
				GFP_HIGHUSER_MOVABLE, false);
  		if (!progress)			retry_count--;
	}
	return ret;
}

int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
				unsigned long long val)
{
	int retry_count = MEM_CGROUP_RECLAIM_RETRIES;
	u64 memlimit, oldusage, curusage;
	int ret;

	if (!do_swap_account)
		return -EINVAL;

	while (retry_count) {
		if (signal_pending(current)) {
			ret = -EINTR;
			break;
		}
		/*
		 * Rather than hide all in some function, I do this in
		 * open coded manner. You see what this really does.
		 * We have to guarantee mem->res.limit < mem->memsw.limit.
		 */
		mutex_lock(&set_limit_mutex);
		memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
		if (memlimit > val) {
			ret = -EINVAL;
			mutex_unlock(&set_limit_mutex);
			break;
		}
		ret = res_counter_set_limit(&memcg->memsw, val);
		mutex_unlock(&set_limit_mutex);

		if (!ret)
			break;

		oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
		try_to_free_mem_cgroup_pages(memcg, GFP_HIGHUSER_MOVABLE, true);
		curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
		if (curusage >= oldusage)
1399 1400 1401 1402 1403
			retry_count--;
	}
	return ret;
}

1404 1405 1406 1407
/*
 * This routine traverse page_cgroup in given list and drop them all.
 * *And* this routine doesn't reclaim page itself, just removes page_cgroup.
 */
1408
static int mem_cgroup_force_empty_list(struct mem_cgroup *mem,
K
KAMEZAWA Hiroyuki 已提交
1409
				int node, int zid, enum lru_list lru)
1410
{
K
KAMEZAWA Hiroyuki 已提交
1411 1412
	struct zone *zone;
	struct mem_cgroup_per_zone *mz;
1413
	struct page_cgroup *pc, *busy;
K
KAMEZAWA Hiroyuki 已提交
1414
	unsigned long flags, loop;
1415
	struct list_head *list;
1416
	int ret = 0;
1417

K
KAMEZAWA Hiroyuki 已提交
1418 1419
	zone = &NODE_DATA(node)->node_zones[zid];
	mz = mem_cgroup_zoneinfo(mem, node, zid);
1420
	list = &mz->lists[lru];
1421

1422 1423 1424 1425 1426 1427
	loop = MEM_CGROUP_ZSTAT(mz, lru);
	/* give some margin against EBUSY etc...*/
	loop += 256;
	busy = NULL;
	while (loop--) {
		ret = 0;
K
KAMEZAWA Hiroyuki 已提交
1428
		spin_lock_irqsave(&zone->lru_lock, flags);
1429
		if (list_empty(list)) {
K
KAMEZAWA Hiroyuki 已提交
1430
			spin_unlock_irqrestore(&zone->lru_lock, flags);
1431
			break;
1432 1433 1434 1435 1436
		}
		pc = list_entry(list->prev, struct page_cgroup, lru);
		if (busy == pc) {
			list_move(&pc->lru, list);
			busy = 0;
K
KAMEZAWA Hiroyuki 已提交
1437
			spin_unlock_irqrestore(&zone->lru_lock, flags);
1438 1439
			continue;
		}
K
KAMEZAWA Hiroyuki 已提交
1440
		spin_unlock_irqrestore(&zone->lru_lock, flags);
1441 1442 1443

		ret = mem_cgroup_move_parent(pc, mem, GFP_HIGHUSER_MOVABLE);
		if (ret == -ENOMEM)
1444
			break;
1445 1446 1447 1448 1449 1450 1451

		if (ret == -EBUSY || ret == -EINVAL) {
			/* found lock contention or "pc" is obsolete. */
			busy = pc;
			cond_resched();
		} else
			busy = NULL;
1452
	}
K
KAMEZAWA Hiroyuki 已提交
1453

1454 1455 1456
	if (!ret && !list_empty(list))
		return -EBUSY;
	return ret;
1457 1458 1459 1460 1461 1462
}

/*
 * make mem_cgroup's charge to be 0 if there is no task.
 * This enables deleting this mem_cgroup.
 */
1463
static int mem_cgroup_force_empty(struct mem_cgroup *mem, bool free_all)
1464
{
1465 1466 1467
	int ret;
	int node, zid, shrink;
	int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
1468
	struct cgroup *cgrp = mem->css.cgroup;
1469

1470
	css_get(&mem->css);
1471 1472

	shrink = 0;
1473 1474 1475
	/* should free all ? */
	if (free_all)
		goto try_to_free;
1476
move_account:
1477
	while (mem->res.usage > 0) {
1478
		ret = -EBUSY;
1479 1480 1481 1482
		if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children))
			goto out;
		ret = -EINTR;
		if (signal_pending(current))
1483
			goto out;
1484 1485
		/* This is for making all *used* pages to be on LRU. */
		lru_add_drain_all();
1486 1487 1488
		ret = 0;
		for_each_node_state(node, N_POSSIBLE) {
			for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) {
1489
				enum lru_list l;
1490 1491
				for_each_lru(l) {
					ret = mem_cgroup_force_empty_list(mem,
K
KAMEZAWA Hiroyuki 已提交
1492
							node, zid, l);
1493 1494 1495
					if (ret)
						break;
				}
1496
			}
1497 1498 1499 1500 1501 1502
			if (ret)
				break;
		}
		/* it seems parent cgroup doesn't have enough mem */
		if (ret == -ENOMEM)
			goto try_to_free;
1503
		cond_resched();
1504 1505 1506 1507 1508
	}
	ret = 0;
out:
	css_put(&mem->css);
	return ret;
1509 1510

try_to_free:
1511 1512
	/* returns EBUSY if there is a task or if we come here twice. */
	if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children) || shrink) {
1513 1514 1515
		ret = -EBUSY;
		goto out;
	}
1516 1517
	/* we call try-to-free pages for make this cgroup empty */
	lru_add_drain_all();
1518 1519 1520 1521
	/* try to free all pages in this cgroup */
	shrink = 1;
	while (nr_retries && mem->res.usage > 0) {
		int progress;
1522 1523 1524 1525 1526

		if (signal_pending(current)) {
			ret = -EINTR;
			goto out;
		}
1527
		progress = try_to_free_mem_cgroup_pages(mem,
1528
						  GFP_HIGHUSER_MOVABLE, false);
1529
		if (!progress) {
1530
			nr_retries--;
1531 1532 1533
			/* maybe some writeback is necessary */
			congestion_wait(WRITE, HZ/10);
		}
1534 1535

	}
K
KAMEZAWA Hiroyuki 已提交
1536
	lru_add_drain();
1537 1538 1539 1540 1541
	/* try move_account...there may be some *locked* pages. */
	if (mem->res.usage)
		goto move_account;
	ret = 0;
	goto out;
1542 1543
}

1544 1545 1546 1547 1548 1549
int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event)
{
	return mem_cgroup_force_empty(mem_cgroup_from_cont(cont), true);
}


1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587
static u64 mem_cgroup_hierarchy_read(struct cgroup *cont, struct cftype *cft)
{
	return mem_cgroup_from_cont(cont)->use_hierarchy;
}

static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft,
					u64 val)
{
	int retval = 0;
	struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
	struct cgroup *parent = cont->parent;
	struct mem_cgroup *parent_mem = NULL;

	if (parent)
		parent_mem = mem_cgroup_from_cont(parent);

	cgroup_lock();
	/*
	 * If parent's use_hiearchy is set, we can't make any modifications
	 * in the child subtrees. If it is unset, then the change can
	 * occur, provided the current cgroup has no children.
	 *
	 * For the root cgroup, parent_mem is NULL, we allow value to be
	 * set if there are no children.
	 */
	if ((!parent_mem || !parent_mem->use_hierarchy) &&
				(val == 1 || val == 0)) {
		if (list_empty(&cont->children))
			mem->use_hierarchy = val;
		else
			retval = -EBUSY;
	} else
		retval = -EINVAL;
	cgroup_unlock();

	return retval;
}

1588
static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft)
B
Balbir Singh 已提交
1589
{
1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608
	struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
	u64 val = 0;
	int type, name;

	type = MEMFILE_TYPE(cft->private);
	name = MEMFILE_ATTR(cft->private);
	switch (type) {
	case _MEM:
		val = res_counter_read_u64(&mem->res, name);
		break;
	case _MEMSWAP:
		if (do_swap_account)
			val = res_counter_read_u64(&mem->memsw, name);
		break;
	default:
		BUG();
		break;
	}
	return val;
B
Balbir Singh 已提交
1609
}
1610 1611 1612 1613
/*
 * The user of this function is...
 * RES_LIMIT.
 */
1614 1615
static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
			    const char *buffer)
B
Balbir Singh 已提交
1616
{
1617
	struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
1618
	int type, name;
1619 1620 1621
	unsigned long long val;
	int ret;

1622 1623 1624
	type = MEMFILE_TYPE(cft->private);
	name = MEMFILE_ATTR(cft->private);
	switch (name) {
1625 1626 1627
	case RES_LIMIT:
		/* This function does all necessary parse...reuse it */
		ret = res_counter_memparse_write_strategy(buffer, &val);
1628 1629 1630
		if (ret)
			break;
		if (type == _MEM)
1631
			ret = mem_cgroup_resize_limit(memcg, val);
1632 1633
		else
			ret = mem_cgroup_resize_memsw_limit(memcg, val);
1634 1635 1636 1637 1638 1639
		break;
	default:
		ret = -EINVAL; /* should be BUG() ? */
		break;
	}
	return ret;
B
Balbir Singh 已提交
1640 1641
}

1642
static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
1643 1644
{
	struct mem_cgroup *mem;
1645
	int type, name;
1646 1647

	mem = mem_cgroup_from_cont(cont);
1648 1649 1650
	type = MEMFILE_TYPE(event);
	name = MEMFILE_ATTR(event);
	switch (name) {
1651
	case RES_MAX_USAGE:
1652 1653 1654 1655
		if (type == _MEM)
			res_counter_reset_max(&mem->res);
		else
			res_counter_reset_max(&mem->memsw);
1656 1657
		break;
	case RES_FAILCNT:
1658 1659 1660 1661
		if (type == _MEM)
			res_counter_reset_failcnt(&mem->res);
		else
			res_counter_reset_failcnt(&mem->memsw);
1662 1663
		break;
	}
1664
	return 0;
1665 1666
}

1667 1668 1669 1670 1671 1672
static const struct mem_cgroup_stat_desc {
	const char *msg;
	u64 unit;
} mem_cgroup_stat_desc[] = {
	[MEM_CGROUP_STAT_CACHE] = { "cache", PAGE_SIZE, },
	[MEM_CGROUP_STAT_RSS] = { "rss", PAGE_SIZE, },
1673 1674
	[MEM_CGROUP_STAT_PGPGIN_COUNT] = {"pgpgin", 1, },
	[MEM_CGROUP_STAT_PGPGOUT_COUNT] = {"pgpgout", 1, },
1675 1676
};

1677 1678
static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
				 struct cgroup_map_cb *cb)
1679 1680 1681 1682 1683 1684 1685 1686 1687 1688
{
	struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont);
	struct mem_cgroup_stat *stat = &mem_cont->stat;
	int i;

	for (i = 0; i < ARRAY_SIZE(stat->cpustat[0].count); i++) {
		s64 val;

		val = mem_cgroup_read_stat(stat, i);
		val *= mem_cgroup_stat_desc[i].unit;
1689
		cb->fill(cb, mem_cgroup_stat_desc[i].msg, val);
1690
	}
1691 1692
	/* showing # of active pages */
	{
1693 1694
		unsigned long active_anon, inactive_anon;
		unsigned long active_file, inactive_file;
L
Lee Schermerhorn 已提交
1695
		unsigned long unevictable;
1696 1697 1698 1699 1700 1701 1702 1703 1704

		inactive_anon = mem_cgroup_get_all_zonestat(mem_cont,
						LRU_INACTIVE_ANON);
		active_anon = mem_cgroup_get_all_zonestat(mem_cont,
						LRU_ACTIVE_ANON);
		inactive_file = mem_cgroup_get_all_zonestat(mem_cont,
						LRU_INACTIVE_FILE);
		active_file = mem_cgroup_get_all_zonestat(mem_cont,
						LRU_ACTIVE_FILE);
L
Lee Schermerhorn 已提交
1705 1706 1707
		unevictable = mem_cgroup_get_all_zonestat(mem_cont,
							LRU_UNEVICTABLE);

1708 1709 1710 1711
		cb->fill(cb, "active_anon", (active_anon) * PAGE_SIZE);
		cb->fill(cb, "inactive_anon", (inactive_anon) * PAGE_SIZE);
		cb->fill(cb, "active_file", (active_file) * PAGE_SIZE);
		cb->fill(cb, "inactive_file", (inactive_file) * PAGE_SIZE);
L
Lee Schermerhorn 已提交
1712 1713
		cb->fill(cb, "unevictable", unevictable * PAGE_SIZE);

1714
	}
1715 1716 1717
	return 0;
}

1718

B
Balbir Singh 已提交
1719 1720
static struct cftype mem_cgroup_files[] = {
	{
1721
		.name = "usage_in_bytes",
1722
		.private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
1723
		.read_u64 = mem_cgroup_read,
B
Balbir Singh 已提交
1724
	},
1725 1726
	{
		.name = "max_usage_in_bytes",
1727
		.private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
1728
		.trigger = mem_cgroup_reset,
1729 1730
		.read_u64 = mem_cgroup_read,
	},
B
Balbir Singh 已提交
1731
	{
1732
		.name = "limit_in_bytes",
1733
		.private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
1734
		.write_string = mem_cgroup_write,
1735
		.read_u64 = mem_cgroup_read,
B
Balbir Singh 已提交
1736 1737 1738
	},
	{
		.name = "failcnt",
1739
		.private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
1740
		.trigger = mem_cgroup_reset,
1741
		.read_u64 = mem_cgroup_read,
B
Balbir Singh 已提交
1742
	},
1743 1744
	{
		.name = "stat",
1745
		.read_map = mem_control_stat_show,
1746
	},
1747 1748 1749 1750
	{
		.name = "force_empty",
		.trigger = mem_cgroup_force_empty_write,
	},
1751 1752 1753 1754 1755
	{
		.name = "use_hierarchy",
		.write_u64 = mem_cgroup_hierarchy_write,
		.read_u64 = mem_cgroup_hierarchy_read,
	},
B
Balbir Singh 已提交
1756 1757
};

1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798
#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
static struct cftype memsw_cgroup_files[] = {
	{
		.name = "memsw.usage_in_bytes",
		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
		.read_u64 = mem_cgroup_read,
	},
	{
		.name = "memsw.max_usage_in_bytes",
		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
		.trigger = mem_cgroup_reset,
		.read_u64 = mem_cgroup_read,
	},
	{
		.name = "memsw.limit_in_bytes",
		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
		.write_string = mem_cgroup_write,
		.read_u64 = mem_cgroup_read,
	},
	{
		.name = "memsw.failcnt",
		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
		.trigger = mem_cgroup_reset,
		.read_u64 = mem_cgroup_read,
	},
};

static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
{
	if (!do_swap_account)
		return 0;
	return cgroup_add_files(cont, ss, memsw_cgroup_files,
				ARRAY_SIZE(memsw_cgroup_files));
};
#else
static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
{
	return 0;
}
#endif

1799 1800 1801
static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
{
	struct mem_cgroup_per_node *pn;
1802
	struct mem_cgroup_per_zone *mz;
1803
	enum lru_list l;
1804
	int zone, tmp = node;
1805 1806 1807 1808 1809 1810 1811 1812
	/*
	 * This routine is called against possible nodes.
	 * But it's BUG to call kmalloc() against offline node.
	 *
	 * TODO: this routine can waste much memory for nodes which will
	 *       never be onlined. It's better to use memory hotplug callback
	 *       function.
	 */
1813 1814 1815
	if (!node_state(node, N_NORMAL_MEMORY))
		tmp = -1;
	pn = kmalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
1816 1817
	if (!pn)
		return 1;
1818

1819 1820
	mem->info.nodeinfo[node] = pn;
	memset(pn, 0, sizeof(*pn));
1821 1822 1823

	for (zone = 0; zone < MAX_NR_ZONES; zone++) {
		mz = &pn->zoneinfo[zone];
1824 1825
		for_each_lru(l)
			INIT_LIST_HEAD(&mz->lists[l]);
1826
	}
1827 1828 1829
	return 0;
}

1830 1831 1832 1833 1834
static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
{
	kfree(mem->info.nodeinfo[node]);
}

1835 1836 1837 1838 1839 1840
static int mem_cgroup_size(void)
{
	int cpustat_size = nr_cpu_ids * sizeof(struct mem_cgroup_stat_cpu);
	return sizeof(struct mem_cgroup) + cpustat_size;
}

1841 1842 1843
static struct mem_cgroup *mem_cgroup_alloc(void)
{
	struct mem_cgroup *mem;
1844
	int size = mem_cgroup_size();
1845

1846 1847
	if (size < PAGE_SIZE)
		mem = kmalloc(size, GFP_KERNEL);
1848
	else
1849
		mem = vmalloc(size);
1850 1851

	if (mem)
1852
		memset(mem, 0, size);
1853 1854 1855
	return mem;
}

1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869
/*
 * At destroying mem_cgroup, references from swap_cgroup can remain.
 * (scanning all at force_empty is too costly...)
 *
 * Instead of clearing all references at force_empty, we remember
 * the number of reference from swap_cgroup and free mem_cgroup when
 * it goes down to 0.
 *
 * When mem_cgroup is destroyed, mem->obsolete will be set to 0 and
 * entry which points to this memcg will be ignore at swapin.
 *
 * Removal of cgroup itself succeeds regardless of refs from swap.
 */

1870 1871
static void mem_cgroup_free(struct mem_cgroup *mem)
{
K
KAMEZAWA Hiroyuki 已提交
1872 1873
	int node;

1874 1875
	if (atomic_read(&mem->refcnt) > 0)
		return;
K
KAMEZAWA Hiroyuki 已提交
1876 1877 1878 1879 1880


	for_each_node_state(node, N_POSSIBLE)
		free_mem_cgroup_per_zone_info(mem, node);

1881
	if (mem_cgroup_size() < PAGE_SIZE)
1882 1883 1884 1885 1886
		kfree(mem);
	else
		vfree(mem);
}

1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900
static void mem_cgroup_get(struct mem_cgroup *mem)
{
	atomic_inc(&mem->refcnt);
}

static void mem_cgroup_put(struct mem_cgroup *mem)
{
	if (atomic_dec_and_test(&mem->refcnt)) {
		if (!mem->obsolete)
			return;
		mem_cgroup_free(mem);
	}
}

1901

1902 1903 1904
#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
static void __init enable_swap_cgroup(void)
{
1905
	if (!mem_cgroup_disabled() && really_do_swap_account)
1906 1907 1908 1909 1910 1911 1912 1913
		do_swap_account = 1;
}
#else
static void __init enable_swap_cgroup(void)
{
}
#endif

B
Balbir Singh 已提交
1914 1915 1916
static struct cgroup_subsys_state *
mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
{
1917
	struct mem_cgroup *mem, *parent;
1918
	int node;
B
Balbir Singh 已提交
1919

1920 1921 1922
	mem = mem_cgroup_alloc();
	if (!mem)
		return ERR_PTR(-ENOMEM);
1923

1924 1925 1926
	for_each_node_state(node, N_POSSIBLE)
		if (alloc_mem_cgroup_per_zone_info(mem, node))
			goto free_out;
1927
	/* root ? */
1928
	if (cont->parent == NULL) {
1929
		enable_swap_cgroup();
1930
		parent = NULL;
1931
	} else {
1932
		parent = mem_cgroup_from_cont(cont->parent);
1933 1934
		mem->use_hierarchy = parent->use_hierarchy;
	}
1935

1936 1937 1938 1939 1940 1941 1942
	if (parent && parent->use_hierarchy) {
		res_counter_init(&mem->res, &parent->res);
		res_counter_init(&mem->memsw, &parent->memsw);
	} else {
		res_counter_init(&mem->res, NULL);
		res_counter_init(&mem->memsw, NULL);
	}
1943

1944 1945
	mem->last_scanned_child = NULL;

B
Balbir Singh 已提交
1946
	return &mem->css;
1947 1948
free_out:
	for_each_node_state(node, N_POSSIBLE)
1949
		free_mem_cgroup_per_zone_info(mem, node);
1950
	mem_cgroup_free(mem);
1951
	return ERR_PTR(-ENOMEM);
B
Balbir Singh 已提交
1952 1953
}

1954 1955 1956 1957
static void mem_cgroup_pre_destroy(struct cgroup_subsys *ss,
					struct cgroup *cont)
{
	struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
1958
	mem->obsolete = 1;
1959
	mem_cgroup_force_empty(mem, false);
1960 1961
}

B
Balbir Singh 已提交
1962 1963 1964
static void mem_cgroup_destroy(struct cgroup_subsys *ss,
				struct cgroup *cont)
{
1965
	mem_cgroup_free(mem_cgroup_from_cont(cont));
B
Balbir Singh 已提交
1966 1967 1968 1969 1970
}

static int mem_cgroup_populate(struct cgroup_subsys *ss,
				struct cgroup *cont)
{
1971 1972 1973 1974 1975 1976 1977 1978
	int ret;

	ret = cgroup_add_files(cont, ss, mem_cgroup_files,
				ARRAY_SIZE(mem_cgroup_files));

	if (!ret)
		ret = register_memsw_files(cont, ss);
	return ret;
B
Balbir Singh 已提交
1979 1980
}

B
Balbir Singh 已提交
1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999
static void mem_cgroup_move_task(struct cgroup_subsys *ss,
				struct cgroup *cont,
				struct cgroup *old_cont,
				struct task_struct *p)
{
	struct mm_struct *mm;
	struct mem_cgroup *mem, *old_mem;

	mm = get_task_mm(p);
	if (mm == NULL)
		return;

	mem = mem_cgroup_from_cont(cont);
	old_mem = mem_cgroup_from_cont(old_cont);

	/*
	 * Only thread group leaders are allowed to migrate, the mm_struct is
	 * in effect owned by the leader
	 */
2000
	if (!thread_group_leader(p))
B
Balbir Singh 已提交
2001 2002 2003 2004 2005 2006
		goto out;

out:
	mmput(mm);
}

B
Balbir Singh 已提交
2007 2008 2009 2010
struct cgroup_subsys mem_cgroup_subsys = {
	.name = "memory",
	.subsys_id = mem_cgroup_subsys_id,
	.create = mem_cgroup_create,
2011
	.pre_destroy = mem_cgroup_pre_destroy,
B
Balbir Singh 已提交
2012 2013
	.destroy = mem_cgroup_destroy,
	.populate = mem_cgroup_populate,
B
Balbir Singh 已提交
2014
	.attach = mem_cgroup_move_task,
2015
	.early_init = 0,
B
Balbir Singh 已提交
2016
};
2017 2018 2019 2020 2021 2022 2023 2024 2025 2026

#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP

static int __init disable_swap_account(char *s)
{
	really_do_swap_account = 0;
	return 1;
}
__setup("noswapaccount", disable_swap_account);
#endif