memcontrol.c 50.1 KB
Newer Older
B
Balbir Singh 已提交
1 2 3 4 5
/* memcontrol.c - Memory Controller
 *
 * Copyright IBM Corporation, 2007
 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
 *
6 7 8
 * Copyright 2007 OpenVZ SWsoft Inc
 * Author: Pavel Emelianov <xemul@openvz.org>
 *
B
Balbir Singh 已提交
9 10 11 12 13 14 15 16 17 18 19 20 21 22
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 */

#include <linux/res_counter.h>
#include <linux/memcontrol.h>
#include <linux/cgroup.h>
23
#include <linux/mm.h>
K
KAMEZAWA Hiroyuki 已提交
24
#include <linux/pagemap.h>
25
#include <linux/smp.h>
26
#include <linux/page-flags.h>
27
#include <linux/backing-dev.h>
28 29
#include <linux/bit_spinlock.h>
#include <linux/rcupdate.h>
30
#include <linux/mutex.h>
31
#include <linux/slab.h>
32 33 34
#include <linux/swap.h>
#include <linux/spinlock.h>
#include <linux/fs.h>
35
#include <linux/seq_file.h>
36
#include <linux/vmalloc.h>
37
#include <linux/mm_inline.h>
38
#include <linux/page_cgroup.h>
K
KAMEZAWA Hiroyuki 已提交
39
#include "internal.h"
B
Balbir Singh 已提交
40

41 42
#include <asm/uaccess.h>

43 44
struct cgroup_subsys mem_cgroup_subsys __read_mostly;
#define MEM_CGROUP_RECLAIM_RETRIES	5
B
Balbir Singh 已提交
45

46 47 48 49 50 51 52 53 54
#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
/* Turned on only when memory cgroup is enabled && really_do_swap_account = 0 */
int do_swap_account __read_mostly;
static int really_do_swap_account __initdata = 1; /* for remember boot option*/
#else
#define do_swap_account		(0)
#endif


55 56 57 58 59 60 61 62 63
/*
 * Statistics for memory cgroup.
 */
enum mem_cgroup_stat_index {
	/*
	 * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
	 */
	MEM_CGROUP_STAT_CACHE, 	   /* # of pages charged as cache */
	MEM_CGROUP_STAT_RSS,	   /* # of pages charged as rss */
64 65
	MEM_CGROUP_STAT_PGPGIN_COUNT,	/* # of pages paged in */
	MEM_CGROUP_STAT_PGPGOUT_COUNT,	/* # of pages paged out */
66 67 68 69 70 71 72 73 74

	MEM_CGROUP_STAT_NSTATS,
};

struct mem_cgroup_stat_cpu {
	s64 count[MEM_CGROUP_STAT_NSTATS];
} ____cacheline_aligned_in_smp;

struct mem_cgroup_stat {
75
	struct mem_cgroup_stat_cpu cpustat[0];
76 77 78 79 80
};

/*
 * For accounting under irq disable, no need for increment preempt count.
 */
81
static inline void __mem_cgroup_stat_add_safe(struct mem_cgroup_stat_cpu *stat,
82 83
		enum mem_cgroup_stat_index idx, int val)
{
84
	stat->count[idx] += val;
85 86 87 88 89 90 91 92 93 94 95 96
}

static s64 mem_cgroup_read_stat(struct mem_cgroup_stat *stat,
		enum mem_cgroup_stat_index idx)
{
	int cpu;
	s64 ret = 0;
	for_each_possible_cpu(cpu)
		ret += stat->cpustat[cpu].count[idx];
	return ret;
}

97 98 99 100
/*
 * per-zone information in memory controller.
 */
struct mem_cgroup_per_zone {
101 102 103
	/*
	 * spin_lock to protect the per cgroup LRU
	 */
104 105
	struct list_head	lists[NR_LRU_LISTS];
	unsigned long		count[NR_LRU_LISTS];
106 107 108 109 110 111 112 113 114 115 116 117
};
/* Macro for accessing counter */
#define MEM_CGROUP_ZSTAT(mz, idx)	((mz)->count[(idx)])

struct mem_cgroup_per_node {
	struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
};

struct mem_cgroup_lru_info {
	struct mem_cgroup_per_node *nodeinfo[MAX_NUMNODES];
};

B
Balbir Singh 已提交
118 119 120 121 122 123 124
/*
 * The memory controller data structure. The memory controller controls both
 * page cache and RSS per cgroup. We would eventually like to provide
 * statistics based on the statistics developed by Rik Van Riel for clock-pro,
 * to help the administrator determine what knobs to tune.
 *
 * TODO: Add a water mark for the memory controller. Reclaim will begin when
125 126 127
 * we hit the water mark. May be even add a low water mark, such that
 * no reclaim occurs from a cgroup at it's low water mark, this is
 * a feature that will be implemented much later in the future.
B
Balbir Singh 已提交
128 129 130 131 132 133 134
 */
struct mem_cgroup {
	struct cgroup_subsys_state css;
	/*
	 * the counter to account for memory usage
	 */
	struct res_counter res;
135 136 137 138
	/*
	 * the counter to account for mem+swap usage.
	 */
	struct res_counter memsw;
139 140 141 142
	/*
	 * Per cgroup active and inactive list, similar to the
	 * per zone LRU lists.
	 */
143
	struct mem_cgroup_lru_info info;
144

145
	int	prev_priority;	/* for recording reclaim priority */
146 147 148 149 150 151

	/*
	 * While reclaiming in a hiearchy, we cache the last child we
	 * reclaimed from. Protected by cgroup_lock()
	 */
	struct mem_cgroup *last_scanned_child;
152 153 154 155
	/*
	 * Should the accounting and control be hierarchical, per subtree?
	 */
	bool use_hierarchy;
156
	unsigned long	last_oom_jiffies;
157 158
	int		obsolete;
	atomic_t	refcnt;
159 160 161

	unsigned int inactive_ratio;

162
	/*
163
	 * statistics. This must be placed at the end of memcg.
164 165
	 */
	struct mem_cgroup_stat stat;
B
Balbir Singh 已提交
166 167
};

168 169 170
enum charge_type {
	MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
	MEM_CGROUP_CHARGE_TYPE_MAPPED,
171
	MEM_CGROUP_CHARGE_TYPE_SHMEM,	/* used by page migration of shmem */
172
	MEM_CGROUP_CHARGE_TYPE_FORCE,	/* used by force_empty */
K
KAMEZAWA Hiroyuki 已提交
173
	MEM_CGROUP_CHARGE_TYPE_SWAPOUT,	/* for accounting swapcache */
174 175 176
	NR_CHARGE_TYPE,
};

177 178 179 180
/* only for here (for easy reading.) */
#define PCGF_CACHE	(1UL << PCG_CACHE)
#define PCGF_USED	(1UL << PCG_USED)
#define PCGF_LOCK	(1UL << PCG_LOCK)
181 182
static const unsigned long
pcg_default_flags[NR_CHARGE_TYPE] = {
K
KAMEZAWA Hiroyuki 已提交
183 184 185
	PCGF_CACHE | PCGF_USED | PCGF_LOCK, /* File Cache */
	PCGF_USED | PCGF_LOCK, /* Anon */
	PCGF_CACHE | PCGF_USED | PCGF_LOCK, /* Shmem */
186
	0, /* FORCE */
187 188
};

189 190 191 192 193 194 195 196 197 198
/* for encoding cft->private value on file */
#define _MEM			(0)
#define _MEMSWAP		(1)
#define MEMFILE_PRIVATE(x, val)	(((x) << 16) | (val))
#define MEMFILE_TYPE(val)	(((val) >> 16) & 0xffff)
#define MEMFILE_ATTR(val)	((val) & 0xffff)

static void mem_cgroup_get(struct mem_cgroup *mem);
static void mem_cgroup_put(struct mem_cgroup *mem);

199 200 201
static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
					 struct page_cgroup *pc,
					 bool charge)
202 203 204
{
	int val = (charge)? 1 : -1;
	struct mem_cgroup_stat *stat = &mem->stat;
205
	struct mem_cgroup_stat_cpu *cpustat;
K
KAMEZAWA Hiroyuki 已提交
206
	int cpu = get_cpu();
207

K
KAMEZAWA Hiroyuki 已提交
208
	cpustat = &stat->cpustat[cpu];
209
	if (PageCgroupCache(pc))
210
		__mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_CACHE, val);
211
	else
212
		__mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_RSS, val);
213 214

	if (charge)
215
		__mem_cgroup_stat_add_safe(cpustat,
216 217
				MEM_CGROUP_STAT_PGPGIN_COUNT, 1);
	else
218
		__mem_cgroup_stat_add_safe(cpustat,
219
				MEM_CGROUP_STAT_PGPGOUT_COUNT, 1);
K
KAMEZAWA Hiroyuki 已提交
220
	put_cpu();
221 222
}

223
static struct mem_cgroup_per_zone *
224 225 226 227 228
mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid)
{
	return &mem->info.nodeinfo[nid]->zoneinfo[zid];
}

229
static struct mem_cgroup_per_zone *
230 231 232 233 234
page_cgroup_zoneinfo(struct page_cgroup *pc)
{
	struct mem_cgroup *mem = pc->mem_cgroup;
	int nid = page_cgroup_nid(pc);
	int zid = page_cgroup_zid(pc);
235

236 237 238
	if (!mem)
		return NULL;

239 240 241 242
	return mem_cgroup_zoneinfo(mem, nid, zid);
}

static unsigned long mem_cgroup_get_all_zonestat(struct mem_cgroup *mem,
243
					enum lru_list idx)
244 245 246 247 248 249 250 251 252 253 254
{
	int nid, zid;
	struct mem_cgroup_per_zone *mz;
	u64 total = 0;

	for_each_online_node(nid)
		for (zid = 0; zid < MAX_NR_ZONES; zid++) {
			mz = mem_cgroup_zoneinfo(mem, nid, zid);
			total += MEM_CGROUP_ZSTAT(mz, idx);
		}
	return total;
255 256
}

257
static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
B
Balbir Singh 已提交
258 259 260 261 262 263
{
	return container_of(cgroup_subsys_state(cont,
				mem_cgroup_subsys_id), struct mem_cgroup,
				css);
}

264
struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
265
{
266 267 268 269 270 271 272 273
	/*
	 * mm_update_next_owner() may clear mm->owner to NULL
	 * if it races with swapoff, page migration, etc.
	 * So this can be called with p == NULL.
	 */
	if (unlikely(!p))
		return NULL;

274 275 276 277
	return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
				struct mem_cgroup, css);
}

K
KAMEZAWA Hiroyuki 已提交
278 279 280 281 282 283 284 285 286 287 288 289 290
/*
 * Following LRU functions are allowed to be used without PCG_LOCK.
 * Operations are called by routine of global LRU independently from memcg.
 * What we have to take care of here is validness of pc->mem_cgroup.
 *
 * Changes to pc->mem_cgroup happens when
 * 1. charge
 * 2. moving account
 * In typical case, "charge" is done before add-to-lru. Exception is SwapCache.
 * It is added to LRU before charge.
 * If PCG_USED bit is not set, page_cgroup is not added to this private LRU.
 * When moving account, the page is not on LRU. It's isolated.
 */
291

K
KAMEZAWA Hiroyuki 已提交
292 293 294 295 296
void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru)
{
	struct page_cgroup *pc;
	struct mem_cgroup *mem;
	struct mem_cgroup_per_zone *mz;
297

298
	if (mem_cgroup_disabled())
K
KAMEZAWA Hiroyuki 已提交
299 300 301 302 303 304 305
		return;
	pc = lookup_page_cgroup(page);
	/* can happen while we handle swapcache. */
	if (list_empty(&pc->lru))
		return;
	mz = page_cgroup_zoneinfo(pc);
	mem = pc->mem_cgroup;
306
	MEM_CGROUP_ZSTAT(mz, lru) -= 1;
K
KAMEZAWA Hiroyuki 已提交
307 308
	list_del_init(&pc->lru);
	return;
309 310
}

K
KAMEZAWA Hiroyuki 已提交
311
void mem_cgroup_del_lru(struct page *page)
312
{
K
KAMEZAWA Hiroyuki 已提交
313 314
	mem_cgroup_del_lru_list(page, page_lru(page));
}
315

K
KAMEZAWA Hiroyuki 已提交
316 317 318 319
void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru)
{
	struct mem_cgroup_per_zone *mz;
	struct page_cgroup *pc;
320

321
	if (mem_cgroup_disabled())
K
KAMEZAWA Hiroyuki 已提交
322
		return;
323

K
KAMEZAWA Hiroyuki 已提交
324 325 326 327 328 329 330
	pc = lookup_page_cgroup(page);
	smp_rmb();
	/* unused page is not rotated. */
	if (!PageCgroupUsed(pc))
		return;
	mz = page_cgroup_zoneinfo(pc);
	list_move(&pc->lru, &mz->lists[lru]);
331 332
}

K
KAMEZAWA Hiroyuki 已提交
333
void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru)
334
{
K
KAMEZAWA Hiroyuki 已提交
335 336
	struct page_cgroup *pc;
	struct mem_cgroup_per_zone *mz;
337

338
	if (mem_cgroup_disabled())
K
KAMEZAWA Hiroyuki 已提交
339 340 341 342 343
		return;
	pc = lookup_page_cgroup(page);
	/* barrier to sync with "charge" */
	smp_rmb();
	if (!PageCgroupUsed(pc))
L
Lee Schermerhorn 已提交
344
		return;
345

K
KAMEZAWA Hiroyuki 已提交
346
	mz = page_cgroup_zoneinfo(pc);
347
	MEM_CGROUP_ZSTAT(mz, lru) += 1;
K
KAMEZAWA Hiroyuki 已提交
348 349 350 351 352 353 354 355 356 357 358 359 360 361 362
	list_add(&pc->lru, &mz->lists[lru]);
}
/*
 * To add swapcache into LRU. Be careful to all this function.
 * zone->lru_lock shouldn't be held and irq must not be disabled.
 */
static void mem_cgroup_lru_fixup(struct page *page)
{
	if (!isolate_lru_page(page))
		putback_lru_page(page);
}

void mem_cgroup_move_lists(struct page *page,
			   enum lru_list from, enum lru_list to)
{
363
	if (mem_cgroup_disabled())
K
KAMEZAWA Hiroyuki 已提交
364 365 366
		return;
	mem_cgroup_del_lru_list(page, from);
	mem_cgroup_add_lru_list(page, to);
367 368
}

369 370 371 372 373
int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
{
	int ret;

	task_lock(task);
374
	ret = task->mm && mm_match_cgroup(task->mm, mem);
375 376 377 378
	task_unlock(task);
	return ret;
}

379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394
/*
 * Calculate mapped_ratio under memory controller. This will be used in
 * vmscan.c for deteremining we have to reclaim mapped pages.
 */
int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem)
{
	long total, rss;

	/*
	 * usage is recorded in bytes. But, here, we assume the number of
	 * physical pages can be represented by "long" on any arch.
	 */
	total = (long) (mem->res.usage >> PAGE_SHIFT) + 1L;
	rss = (long)mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS);
	return (int)((rss * 100L) / total);
}
395

396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414
/*
 * prev_priority control...this will be used in memory reclaim path.
 */
int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem)
{
	return mem->prev_priority;
}

void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem, int priority)
{
	if (priority < mem->prev_priority)
		mem->prev_priority = priority;
}

void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, int priority)
{
	mem->prev_priority = priority;
}

415 416 417 418 419 420 421 422
/*
 * Calculate # of pages to be scanned in this priority/zone.
 * See also vmscan.c
 *
 * priority starts from "DEF_PRIORITY" and decremented in each loop.
 * (see include/linux/mmzone.h)
 */

423 424
long mem_cgroup_calc_reclaim(struct mem_cgroup *mem, struct zone *zone,
					int priority, enum lru_list lru)
425
{
426
	long nr_pages;
427 428 429 430
	int nid = zone->zone_pgdat->node_id;
	int zid = zone_idx(zone);
	struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(mem, nid, zid);

431
	nr_pages = MEM_CGROUP_ZSTAT(mz, lru);
432

433
	return (nr_pages >> priority);
434 435
}

436 437 438 439 440 441 442 443 444 445 446 447 448 449
int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg, struct zone *zone)
{
	unsigned long active;
	unsigned long inactive;

	inactive = mem_cgroup_get_all_zonestat(memcg, LRU_INACTIVE_ANON);
	active = mem_cgroup_get_all_zonestat(memcg, LRU_ACTIVE_ANON);

	if (inactive * memcg->inactive_ratio < active)
		return 1;

	return 0;
}

450 451 452 453 454 455 456 457 458 459 460
unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg,
				       struct zone *zone,
				       enum lru_list lru)
{
	int nid = zone->zone_pgdat->node_id;
	int zid = zone_idx(zone);
	struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid);

	return MEM_CGROUP_ZSTAT(mz, lru);
}

461 462 463 464 465
unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
					struct list_head *dst,
					unsigned long *scanned, int order,
					int mode, struct zone *z,
					struct mem_cgroup *mem_cont,
466
					int active, int file)
467 468 469 470 471 472
{
	unsigned long nr_taken = 0;
	struct page *page;
	unsigned long scan;
	LIST_HEAD(pc_list);
	struct list_head *src;
473
	struct page_cgroup *pc, *tmp;
474 475 476
	int nid = z->zone_pgdat->node_id;
	int zid = zone_idx(z);
	struct mem_cgroup_per_zone *mz;
477
	int lru = LRU_FILE * !!file + !!active;
478

479
	BUG_ON(!mem_cont);
480
	mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
481
	src = &mz->lists[lru];
482

483 484
	scan = 0;
	list_for_each_entry_safe_reverse(pc, tmp, src, lru) {
H
Hugh Dickins 已提交
485
		if (scan >= nr_to_scan)
486
			break;
K
KAMEZAWA Hiroyuki 已提交
487 488

		page = pc->page;
489 490
		if (unlikely(!PageCgroupUsed(pc)))
			continue;
H
Hugh Dickins 已提交
491
		if (unlikely(!PageLRU(page)))
492 493
			continue;

H
Hugh Dickins 已提交
494
		scan++;
495
		if (__isolate_lru_page(page, mode, file) == 0) {
496 497 498 499 500 501 502 503 504
			list_move(&page->lru, dst);
			nr_taken++;
		}
	}

	*scanned = scan;
	return nr_taken;
}

505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603
#define mem_cgroup_from_res_counter(counter, member)	\
	container_of(counter, struct mem_cgroup, member)

/*
 * This routine finds the DFS walk successor. This routine should be
 * called with cgroup_mutex held
 */
static struct mem_cgroup *
mem_cgroup_get_next_node(struct mem_cgroup *curr, struct mem_cgroup *root_mem)
{
	struct cgroup *cgroup, *curr_cgroup, *root_cgroup;

	curr_cgroup = curr->css.cgroup;
	root_cgroup = root_mem->css.cgroup;

	if (!list_empty(&curr_cgroup->children)) {
		/*
		 * Walk down to children
		 */
		mem_cgroup_put(curr);
		cgroup = list_entry(curr_cgroup->children.next,
						struct cgroup, sibling);
		curr = mem_cgroup_from_cont(cgroup);
		mem_cgroup_get(curr);
		goto done;
	}

visit_parent:
	if (curr_cgroup == root_cgroup) {
		mem_cgroup_put(curr);
		curr = root_mem;
		mem_cgroup_get(curr);
		goto done;
	}

	/*
	 * Goto next sibling
	 */
	if (curr_cgroup->sibling.next != &curr_cgroup->parent->children) {
		mem_cgroup_put(curr);
		cgroup = list_entry(curr_cgroup->sibling.next, struct cgroup,
						sibling);
		curr = mem_cgroup_from_cont(cgroup);
		mem_cgroup_get(curr);
		goto done;
	}

	/*
	 * Go up to next parent and next parent's sibling if need be
	 */
	curr_cgroup = curr_cgroup->parent;
	goto visit_parent;

done:
	root_mem->last_scanned_child = curr;
	return curr;
}

/*
 * Visit the first child (need not be the first child as per the ordering
 * of the cgroup list, since we track last_scanned_child) of @mem and use
 * that to reclaim free pages from.
 */
static struct mem_cgroup *
mem_cgroup_get_first_node(struct mem_cgroup *root_mem)
{
	struct cgroup *cgroup;
	struct mem_cgroup *ret;
	bool obsolete = (root_mem->last_scanned_child &&
				root_mem->last_scanned_child->obsolete);

	/*
	 * Scan all children under the mem_cgroup mem
	 */
	cgroup_lock();
	if (list_empty(&root_mem->css.cgroup->children)) {
		ret = root_mem;
		goto done;
	}

	if (!root_mem->last_scanned_child || obsolete) {

		if (obsolete)
			mem_cgroup_put(root_mem->last_scanned_child);

		cgroup = list_first_entry(&root_mem->css.cgroup->children,
				struct cgroup, sibling);
		ret = mem_cgroup_from_cont(cgroup);
		mem_cgroup_get(ret);
	} else
		ret = mem_cgroup_get_next_node(root_mem->last_scanned_child,
						root_mem);

done:
	root_mem->last_scanned_child = ret;
	cgroup_unlock();
	return ret;
}

604 605 606 607 608 609 610 611 612 613 614 615
static bool mem_cgroup_check_under_limit(struct mem_cgroup *mem)
{
	if (do_swap_account) {
		if (res_counter_check_under_limit(&mem->res) &&
			res_counter_check_under_limit(&mem->memsw))
			return true;
	} else
		if (res_counter_check_under_limit(&mem->res))
			return true;
	return false;
}

616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636
/*
 * Dance down the hierarchy if needed to reclaim memory. We remember the
 * last child we reclaimed from, so that we don't end up penalizing
 * one child extensively based on its position in the children list.
 *
 * root_mem is the original ancestor that we've been reclaim from.
 */
static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
						gfp_t gfp_mask, bool noswap)
{
	struct mem_cgroup *next_mem;
	int ret = 0;

	/*
	 * Reclaim unconditionally and don't check for return value.
	 * We need to reclaim in the current group and down the tree.
	 * One might think about checking for children before reclaiming,
	 * but there might be left over accounting, even after children
	 * have left.
	 */
	ret = try_to_free_mem_cgroup_pages(root_mem, gfp_mask, noswap);
637
	if (mem_cgroup_check_under_limit(root_mem))
638
		return 0;
639 640
	if (!root_mem->use_hierarchy)
		return ret;
641 642 643 644 645 646 647 648 649 650 651 652

	next_mem = mem_cgroup_get_first_node(root_mem);

	while (next_mem != root_mem) {
		if (next_mem->obsolete) {
			mem_cgroup_put(next_mem);
			cgroup_lock();
			next_mem = mem_cgroup_get_first_node(root_mem);
			cgroup_unlock();
			continue;
		}
		ret = try_to_free_mem_cgroup_pages(next_mem, gfp_mask, noswap);
653
		if (mem_cgroup_check_under_limit(root_mem))
654 655 656 657 658 659 660 661
			return 0;
		cgroup_lock();
		next_mem = mem_cgroup_get_next_node(next_mem, root_mem);
		cgroup_unlock();
	}
	return ret;
}

662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677
bool mem_cgroup_oom_called(struct task_struct *task)
{
	bool ret = false;
	struct mem_cgroup *mem;
	struct mm_struct *mm;

	rcu_read_lock();
	mm = task->mm;
	if (!mm)
		mm = &init_mm;
	mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
	if (mem && time_before(jiffies, mem->last_oom_jiffies + HZ/10))
		ret = true;
	rcu_read_unlock();
	return ret;
}
678 679 680
/*
 * Unlike exported interface, "oom" parameter is added. if oom==true,
 * oom-killer can be invoked.
681
 */
682
static int __mem_cgroup_try_charge(struct mm_struct *mm,
683 684
			gfp_t gfp_mask, struct mem_cgroup **memcg,
			bool oom)
685
{
686
	struct mem_cgroup *mem, *mem_over_limit;
687
	int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
688
	struct res_counter *fail_res;
689 690 691 692 693 694 695

	if (unlikely(test_thread_flag(TIF_MEMDIE))) {
		/* Don't account this! */
		*memcg = NULL;
		return 0;
	}

696
	/*
697 698
	 * We always charge the cgroup the mm_struct belongs to.
	 * The mm_struct's mem_cgroup changes on task migration if the
699 700 701
	 * thread group leader migrates. It's possible that mm is not
	 * set, if so charge the init_mm (happens for pagecache usage).
	 */
702
	if (likely(!*memcg)) {
703 704
		rcu_read_lock();
		mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
705 706 707 708
		if (unlikely(!mem)) {
			rcu_read_unlock();
			return 0;
		}
709 710 711 712
		/*
		 * For every charge from the cgroup, increment reference count
		 */
		css_get(&mem->css);
713
		*memcg = mem;
714 715
		rcu_read_unlock();
	} else {
716 717
		mem = *memcg;
		css_get(&mem->css);
718
	}
719

720 721 722
	while (1) {
		int ret;
		bool noswap = false;
723

724
		ret = res_counter_charge(&mem->res, PAGE_SIZE, &fail_res);
725 726 727
		if (likely(!ret)) {
			if (!do_swap_account)
				break;
728 729
			ret = res_counter_charge(&mem->memsw, PAGE_SIZE,
							&fail_res);
730 731 732 733 734
			if (likely(!ret))
				break;
			/* mem+swap counter fails */
			res_counter_uncharge(&mem->res, PAGE_SIZE);
			noswap = true;
735 736 737 738 739 740 741
			mem_over_limit = mem_cgroup_from_res_counter(fail_res,
									memsw);
		} else
			/* mem counter fails */
			mem_over_limit = mem_cgroup_from_res_counter(fail_res,
									res);

742
		if (!(gfp_mask & __GFP_WAIT))
743
			goto nomem;
744

745 746
		ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, gfp_mask,
							noswap);
747 748

		/*
749 750 751 752 753
		 * try_to_free_mem_cgroup_pages() might not give us a full
		 * picture of reclaim. Some pages are reclaimed and might be
		 * moved to swap cache or just unmapped from the cgroup.
		 * Check the limit again to see if the reclaim reduced the
		 * current usage of the cgroup before giving up
754
		 *
755
		 */
756 757
		if (mem_cgroup_check_under_limit(mem_over_limit))
			continue;
758 759

		if (!nr_retries--) {
760
			if (oom) {
761 762
				mem_cgroup_out_of_memory(mem_over_limit, gfp_mask);
				mem_over_limit->last_oom_jiffies = jiffies;
763
			}
764
			goto nomem;
765
		}
766
	}
767 768 769 770 771
	return 0;
nomem:
	css_put(&mem->css);
	return -ENOMEM;
}
772

773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791
/**
 * mem_cgroup_try_charge - get charge of PAGE_SIZE.
 * @mm: an mm_struct which is charged against. (when *memcg is NULL)
 * @gfp_mask: gfp_mask for reclaim.
 * @memcg: a pointer to memory cgroup which is charged against.
 *
 * charge against memory cgroup pointed by *memcg. if *memcg == NULL, estimated
 * memory cgroup from @mm is got and stored in *memcg.
 *
 * Returns 0 if success. -ENOMEM at failure.
 * This call can invoke OOM-Killer.
 */

int mem_cgroup_try_charge(struct mm_struct *mm,
			  gfp_t mask, struct mem_cgroup **memcg)
{
	return __mem_cgroup_try_charge(mm, mask, memcg, true);
}

792 793 794 795 796 797 798 799 800 801 802 803
/*
 * commit a charge got by mem_cgroup_try_charge() and makes page_cgroup to be
 * USED state. If already USED, uncharge and return.
 */

static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
				     struct page_cgroup *pc,
				     enum charge_type ctype)
{
	/* try_charge() can return NULL to *memcg, taking care of it. */
	if (!mem)
		return;
804 805 806 807 808

	lock_page_cgroup(pc);
	if (unlikely(PageCgroupUsed(pc))) {
		unlock_page_cgroup(pc);
		res_counter_uncharge(&mem->res, PAGE_SIZE);
809 810
		if (do_swap_account)
			res_counter_uncharge(&mem->memsw, PAGE_SIZE);
811
		css_put(&mem->css);
812
		return;
813
	}
814
	pc->mem_cgroup = mem;
K
KAMEZAWA Hiroyuki 已提交
815
	smp_wmb();
816
	pc->flags = pcg_default_flags[ctype];
817

K
KAMEZAWA Hiroyuki 已提交
818
	mem_cgroup_charge_statistics(mem, pc, true);
819 820

	unlock_page_cgroup(pc);
821
}
822

823 824 825 826 827 828 829
/**
 * mem_cgroup_move_account - move account of the page
 * @pc:	page_cgroup of the page.
 * @from: mem_cgroup which the page is moved from.
 * @to:	mem_cgroup which the page is moved to. @from != @to.
 *
 * The caller must confirm following.
K
KAMEZAWA Hiroyuki 已提交
830
 * - page is not on LRU (isolate_page() is useful.)
831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846
 *
 * returns 0 at success,
 * returns -EBUSY when lock is busy or "pc" is unstable.
 *
 * This function does "uncharge" from old cgroup but doesn't do "charge" to
 * new cgroup. It should be done by a caller.
 */

static int mem_cgroup_move_account(struct page_cgroup *pc,
	struct mem_cgroup *from, struct mem_cgroup *to)
{
	struct mem_cgroup_per_zone *from_mz, *to_mz;
	int nid, zid;
	int ret = -EBUSY;

	VM_BUG_ON(from == to);
K
KAMEZAWA Hiroyuki 已提交
847
	VM_BUG_ON(PageLRU(pc->page));
848 849 850 851 852 853 854 855 856 857 858 859 860 861 862

	nid = page_cgroup_nid(pc);
	zid = page_cgroup_zid(pc);
	from_mz =  mem_cgroup_zoneinfo(from, nid, zid);
	to_mz =  mem_cgroup_zoneinfo(to, nid, zid);

	if (!trylock_page_cgroup(pc))
		return ret;

	if (!PageCgroupUsed(pc))
		goto out;

	if (pc->mem_cgroup != from)
		goto out;

K
KAMEZAWA Hiroyuki 已提交
863 864 865 866 867 868 869 870 871
	css_put(&from->css);
	res_counter_uncharge(&from->res, PAGE_SIZE);
	mem_cgroup_charge_statistics(from, pc, false);
	if (do_swap_account)
		res_counter_uncharge(&from->memsw, PAGE_SIZE);
	pc->mem_cgroup = to;
	mem_cgroup_charge_statistics(to, pc, true);
	css_get(&to->css);
	ret = 0;
872 873 874 875 876 877 878 879 880 881 882 883 884
out:
	unlock_page_cgroup(pc);
	return ret;
}

/*
 * move charges to its parent.
 */

static int mem_cgroup_move_parent(struct page_cgroup *pc,
				  struct mem_cgroup *child,
				  gfp_t gfp_mask)
{
K
KAMEZAWA Hiroyuki 已提交
885
	struct page *page = pc->page;
886 887 888 889 890 891 892 893 894
	struct cgroup *cg = child->css.cgroup;
	struct cgroup *pcg = cg->parent;
	struct mem_cgroup *parent;
	int ret;

	/* Is ROOT ? */
	if (!pcg)
		return -EINVAL;

K
KAMEZAWA Hiroyuki 已提交
895

896 897
	parent = mem_cgroup_from_cont(pcg);

K
KAMEZAWA Hiroyuki 已提交
898

899
	ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false);
900
	if (ret || !parent)
901 902
		return ret;

K
KAMEZAWA Hiroyuki 已提交
903 904 905 906 907 908 909
	if (!get_page_unless_zero(page))
		return -EBUSY;

	ret = isolate_lru_page(page);

	if (ret)
		goto cancel;
910 911 912

	ret = mem_cgroup_move_account(pc, child, parent);

K
KAMEZAWA Hiroyuki 已提交
913
	/* drop extra refcnt by try_charge() (move_account increment one) */
914
	css_put(&parent->css);
K
KAMEZAWA Hiroyuki 已提交
915 916 917 918
	putback_lru_page(page);
	if (!ret) {
		put_page(page);
		return 0;
919
	}
K
KAMEZAWA Hiroyuki 已提交
920 921 922 923 924 925
	/* uncharge if move fails */
cancel:
	res_counter_uncharge(&parent->res, PAGE_SIZE);
	if (do_swap_account)
		res_counter_uncharge(&parent->memsw, PAGE_SIZE);
	put_page(page);
926 927 928
	return ret;
}

929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949
/*
 * Charge the memory controller for page usage.
 * Return
 * 0 if the charge was successful
 * < 0 if the cgroup is over its limit
 */
static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
				gfp_t gfp_mask, enum charge_type ctype,
				struct mem_cgroup *memcg)
{
	struct mem_cgroup *mem;
	struct page_cgroup *pc;
	int ret;

	pc = lookup_page_cgroup(page);
	/* can happen at boot */
	if (unlikely(!pc))
		return 0;
	prefetchw(pc);

	mem = memcg;
950
	ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, true);
951
	if (ret || !mem)
952 953 954
		return ret;

	__mem_cgroup_commit_charge(mem, pc, ctype);
955 956 957
	return 0;
}

958 959
int mem_cgroup_newpage_charge(struct page *page,
			      struct mm_struct *mm, gfp_t gfp_mask)
960
{
961
	if (mem_cgroup_disabled())
962
		return 0;
963 964
	if (PageCompound(page))
		return 0;
965 966 967 968 969 970 971 972 973 974 975
	/*
	 * If already mapped, we don't have to account.
	 * If page cache, page->mapping has address_space.
	 * But page->mapping may have out-of-use anon_vma pointer,
	 * detecit it by PageAnon() check. newly-mapped-anon's page->mapping
	 * is NULL.
  	 */
	if (page_mapped(page) || (page->mapping && !PageAnon(page)))
		return 0;
	if (unlikely(!mm))
		mm = &init_mm;
976
	return mem_cgroup_charge_common(page, mm, gfp_mask,
977
				MEM_CGROUP_CHARGE_TYPE_MAPPED, NULL);
978 979
}

980 981
int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
				gfp_t gfp_mask)
982
{
983
	if (mem_cgroup_disabled())
984
		return 0;
985 986
	if (PageCompound(page))
		return 0;
987 988 989 990 991 992 993 994 995 996 997 998
	/*
	 * Corner case handling. This is called from add_to_page_cache()
	 * in usual. But some FS (shmem) precharges this page before calling it
	 * and call add_to_page_cache() with GFP_NOWAIT.
	 *
	 * For GFP_NOWAIT case, the page may be pre-charged before calling
	 * add_to_page_cache(). (See shmem.c) check it here and avoid to call
	 * charge twice. (It works but has to pay a bit larger cost.)
	 */
	if (!(gfp_mask & __GFP_WAIT)) {
		struct page_cgroup *pc;

999 1000 1001 1002 1003 1004 1005

		pc = lookup_page_cgroup(page);
		if (!pc)
			return 0;
		lock_page_cgroup(pc);
		if (PageCgroupUsed(pc)) {
			unlock_page_cgroup(pc);
1006 1007
			return 0;
		}
1008
		unlock_page_cgroup(pc);
1009 1010
	}

1011
	if (unlikely(!mm))
1012
		mm = &init_mm;
1013

1014 1015
	if (page_is_file_cache(page))
		return mem_cgroup_charge_common(page, mm, gfp_mask,
1016
				MEM_CGROUP_CHARGE_TYPE_CACHE, NULL);
1017 1018 1019
	else
		return mem_cgroup_charge_common(page, mm, gfp_mask,
				MEM_CGROUP_CHARGE_TYPE_SHMEM, NULL);
1020 1021
}

1022 1023 1024 1025 1026 1027 1028
int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
				 struct page *page,
				 gfp_t mask, struct mem_cgroup **ptr)
{
	struct mem_cgroup *mem;
	swp_entry_t     ent;

1029
	if (mem_cgroup_disabled())
1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055
		return 0;

	if (!do_swap_account)
		goto charge_cur_mm;

	/*
	 * A racing thread's fault, or swapoff, may have already updated
	 * the pte, and even removed page from swap cache: return success
	 * to go on to do_swap_page()'s pte_same() test, which should fail.
	 */
	if (!PageSwapCache(page))
		return 0;

	ent.val = page_private(page);

	mem = lookup_swap_cgroup(ent);
	if (!mem || mem->obsolete)
		goto charge_cur_mm;
	*ptr = mem;
	return __mem_cgroup_try_charge(NULL, mask, ptr, true);
charge_cur_mm:
	if (unlikely(!mm))
		mm = &init_mm;
	return __mem_cgroup_try_charge(mm, mask, ptr, true);
}

K
KAMEZAWA Hiroyuki 已提交
1056
#ifdef CONFIG_SWAP
1057

K
KAMEZAWA Hiroyuki 已提交
1058 1059 1060 1061 1062
int mem_cgroup_cache_charge_swapin(struct page *page,
			struct mm_struct *mm, gfp_t mask, bool locked)
{
	int ret = 0;

1063
	if (mem_cgroup_disabled())
K
KAMEZAWA Hiroyuki 已提交
1064 1065 1066 1067 1068 1069 1070 1071 1072 1073
		return 0;
	if (unlikely(!mm))
		mm = &init_mm;
	if (!locked)
		lock_page(page);
	/*
	 * If not locked, the page can be dropped from SwapCache until
	 * we reach here.
	 */
	if (PageSwapCache(page)) {
1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084
		struct mem_cgroup *mem = NULL;
		swp_entry_t ent;

		ent.val = page_private(page);
		if (do_swap_account) {
			mem = lookup_swap_cgroup(ent);
			if (mem && mem->obsolete)
				mem = NULL;
			if (mem)
				mm = NULL;
		}
K
KAMEZAWA Hiroyuki 已提交
1085
		ret = mem_cgroup_charge_common(page, mm, mask,
1086 1087 1088 1089 1090 1091 1092 1093 1094 1095
				MEM_CGROUP_CHARGE_TYPE_SHMEM, mem);

		if (!ret && do_swap_account) {
			/* avoid double counting */
			mem = swap_cgroup_record(ent, NULL);
			if (mem) {
				res_counter_uncharge(&mem->memsw, PAGE_SIZE);
				mem_cgroup_put(mem);
			}
		}
K
KAMEZAWA Hiroyuki 已提交
1096 1097 1098
	}
	if (!locked)
		unlock_page(page);
K
KAMEZAWA Hiroyuki 已提交
1099 1100
	/* add this page(page_cgroup) to the LRU we want. */
	mem_cgroup_lru_fixup(page);
K
KAMEZAWA Hiroyuki 已提交
1101 1102 1103 1104 1105

	return ret;
}
#endif

1106 1107 1108 1109
void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
{
	struct page_cgroup *pc;

1110
	if (mem_cgroup_disabled())
1111 1112 1113 1114 1115
		return;
	if (!ptr)
		return;
	pc = lookup_page_cgroup(page);
	__mem_cgroup_commit_charge(ptr, pc, MEM_CGROUP_CHARGE_TYPE_MAPPED);
1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132
	/*
	 * Now swap is on-memory. This means this page may be
	 * counted both as mem and swap....double count.
	 * Fix it by uncharging from memsw. This SwapCache is stable
	 * because we're still under lock_page().
	 */
	if (do_swap_account) {
		swp_entry_t ent = {.val = page_private(page)};
		struct mem_cgroup *memcg;
		memcg = swap_cgroup_record(ent, NULL);
		if (memcg) {
			/* If memcg is obsolete, memcg can be != ptr */
			res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
			mem_cgroup_put(memcg);
		}

	}
K
KAMEZAWA Hiroyuki 已提交
1133 1134
	/* add this page(page_cgroup) to the LRU we want. */
	mem_cgroup_lru_fixup(page);
1135 1136 1137 1138
}

void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem)
{
1139
	if (mem_cgroup_disabled())
1140 1141 1142 1143
		return;
	if (!mem)
		return;
	res_counter_uncharge(&mem->res, PAGE_SIZE);
1144 1145
	if (do_swap_account)
		res_counter_uncharge(&mem->memsw, PAGE_SIZE);
1146 1147 1148 1149
	css_put(&mem->css);
}


1150
/*
1151
 * uncharge if !page_mapped(page)
1152
 */
1153
static struct mem_cgroup *
1154
__mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
1155
{
H
Hugh Dickins 已提交
1156
	struct page_cgroup *pc;
1157
	struct mem_cgroup *mem = NULL;
1158
	struct mem_cgroup_per_zone *mz;
1159

1160
	if (mem_cgroup_disabled())
1161
		return NULL;
1162

K
KAMEZAWA Hiroyuki 已提交
1163
	if (PageSwapCache(page))
1164
		return NULL;
K
KAMEZAWA Hiroyuki 已提交
1165

1166
	/*
1167
	 * Check if our page_cgroup is valid
1168
	 */
1169 1170
	pc = lookup_page_cgroup(page);
	if (unlikely(!pc || !PageCgroupUsed(pc)))
1171
		return NULL;
1172

1173
	lock_page_cgroup(pc);
K
KAMEZAWA Hiroyuki 已提交
1174

1175 1176
	mem = pc->mem_cgroup;

K
KAMEZAWA Hiroyuki 已提交
1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193
	if (!PageCgroupUsed(pc))
		goto unlock_out;

	switch (ctype) {
	case MEM_CGROUP_CHARGE_TYPE_MAPPED:
		if (page_mapped(page))
			goto unlock_out;
		break;
	case MEM_CGROUP_CHARGE_TYPE_SWAPOUT:
		if (!PageAnon(page)) {	/* Shared memory */
			if (page->mapping && !page_is_file_cache(page))
				goto unlock_out;
		} else if (page_mapped(page)) /* Anon */
				goto unlock_out;
		break;
	default:
		break;
1194
	}
K
KAMEZAWA Hiroyuki 已提交
1195

1196 1197 1198 1199
	res_counter_uncharge(&mem->res, PAGE_SIZE);
	if (do_swap_account && (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT))
		res_counter_uncharge(&mem->memsw, PAGE_SIZE);

K
KAMEZAWA Hiroyuki 已提交
1200
	mem_cgroup_charge_statistics(mem, pc, false);
1201
	ClearPageCgroupUsed(pc);
1202

1203
	mz = page_cgroup_zoneinfo(pc);
1204
	unlock_page_cgroup(pc);
H
Hugh Dickins 已提交
1205

K
KAMEZAWA Hiroyuki 已提交
1206 1207 1208
	/* at swapout, this memcg will be accessed to record to swap */
	if (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
		css_put(&mem->css);
1209

1210
	return mem;
K
KAMEZAWA Hiroyuki 已提交
1211 1212 1213

unlock_out:
	unlock_page_cgroup(pc);
1214
	return NULL;
1215 1216
}

1217 1218
void mem_cgroup_uncharge_page(struct page *page)
{
1219 1220 1221 1222 1223
	/* early check. */
	if (page_mapped(page))
		return;
	if (page->mapping && !PageAnon(page))
		return;
1224 1225 1226 1227 1228 1229
	__mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_MAPPED);
}

void mem_cgroup_uncharge_cache_page(struct page *page)
{
	VM_BUG_ON(page_mapped(page));
1230
	VM_BUG_ON(page->mapping);
1231 1232 1233
	__mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE);
}

1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248
/*
 * called from __delete_from_swap_cache() and drop "page" account.
 * memcg information is recorded to swap_cgroup of "ent"
 */
void mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent)
{
	struct mem_cgroup *memcg;

	memcg = __mem_cgroup_uncharge_common(page,
					MEM_CGROUP_CHARGE_TYPE_SWAPOUT);
	/* record memcg information */
	if (do_swap_account && memcg) {
		swap_cgroup_record(ent, memcg);
		mem_cgroup_get(memcg);
	}
K
KAMEZAWA Hiroyuki 已提交
1249 1250
	if (memcg)
		css_put(&memcg->css);
1251 1252 1253 1254 1255 1256 1257 1258
}

#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
/*
 * called from swap_entry_free(). remove record in swap_cgroup and
 * uncharge "memsw" account.
 */
void mem_cgroup_uncharge_swap(swp_entry_t ent)
K
KAMEZAWA Hiroyuki 已提交
1259
{
1260 1261 1262 1263 1264 1265 1266 1267 1268 1269
	struct mem_cgroup *memcg;

	if (!do_swap_account)
		return;

	memcg = swap_cgroup_record(ent, NULL);
	if (memcg) {
		res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
		mem_cgroup_put(memcg);
	}
K
KAMEZAWA Hiroyuki 已提交
1270
}
1271
#endif
K
KAMEZAWA Hiroyuki 已提交
1272

1273
/*
1274 1275
 * Before starting migration, account PAGE_SIZE to mem_cgroup that the old
 * page belongs to.
1276
 */
1277
int mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr)
1278 1279
{
	struct page_cgroup *pc;
1280 1281
	struct mem_cgroup *mem = NULL;
	int ret = 0;
1282

1283
	if (mem_cgroup_disabled())
1284 1285
		return 0;

1286 1287 1288
	pc = lookup_page_cgroup(page);
	lock_page_cgroup(pc);
	if (PageCgroupUsed(pc)) {
1289 1290 1291
		mem = pc->mem_cgroup;
		css_get(&mem->css);
	}
1292
	unlock_page_cgroup(pc);
1293

1294
	if (mem) {
K
KAMEZAWA Hiroyuki 已提交
1295
		ret = mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem);
1296 1297
		css_put(&mem->css);
	}
1298
	*ptr = mem;
1299
	return ret;
1300
}
1301

1302
/* remove redundant charge if migration failed*/
1303 1304
void mem_cgroup_end_migration(struct mem_cgroup *mem,
		struct page *oldpage, struct page *newpage)
1305
{
1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329
	struct page *target, *unused;
	struct page_cgroup *pc;
	enum charge_type ctype;

	if (!mem)
		return;

	/* at migration success, oldpage->mapping is NULL. */
	if (oldpage->mapping) {
		target = oldpage;
		unused = NULL;
	} else {
		target = newpage;
		unused = oldpage;
	}

	if (PageAnon(target))
		ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED;
	else if (page_is_file_cache(target))
		ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
	else
		ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;

	/* unused page is not on radix-tree now. */
K
KAMEZAWA Hiroyuki 已提交
1330
	if (unused)
1331 1332 1333
		__mem_cgroup_uncharge_common(unused, ctype);

	pc = lookup_page_cgroup(target);
1334
	/*
1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348
	 * __mem_cgroup_commit_charge() check PCG_USED bit of page_cgroup.
	 * So, double-counting is effectively avoided.
	 */
	__mem_cgroup_commit_charge(mem, pc, ctype);

	/*
	 * Both of oldpage and newpage are still under lock_page().
	 * Then, we don't have to care about race in radix-tree.
	 * But we have to be careful that this page is unmapped or not.
	 *
	 * There is a case for !page_mapped(). At the start of
	 * migration, oldpage was mapped. But now, it's zapped.
	 * But we know *target* page is not freed/reused under us.
	 * mem_cgroup_uncharge_page() does all necessary checks.
1349
	 */
1350 1351
	if (ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED)
		mem_cgroup_uncharge_page(target);
1352
}
1353

1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364
/*
 * A call to try to shrink memory usage under specified resource controller.
 * This is typically used for page reclaiming for shmem for reducing side
 * effect of page allocation from shmem, which is used by some mem_cgroup.
 */
int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask)
{
	struct mem_cgroup *mem;
	int progress = 0;
	int retry = MEM_CGROUP_RECLAIM_RETRIES;

1365
	if (mem_cgroup_disabled())
1366
		return 0;
1367 1368
	if (!mm)
		return 0;
1369

1370 1371
	rcu_read_lock();
	mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
1372 1373 1374 1375
	if (unlikely(!mem)) {
		rcu_read_unlock();
		return 0;
	}
1376 1377 1378 1379
	css_get(&mem->css);
	rcu_read_unlock();

	do {
1380
		progress = try_to_free_mem_cgroup_pages(mem, gfp_mask, true);
1381
		progress += mem_cgroup_check_under_limit(mem);
1382 1383 1384 1385 1386 1387 1388 1389
	} while (!progress && --retry);

	css_put(&mem->css);
	if (!retry)
		return -ENOMEM;
	return 0;
}

1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412
/*
 * The inactive anon list should be small enough that the VM never has to
 * do too much work, but large enough that each inactive page has a chance
 * to be referenced again before it is swapped out.
 *
 * this calculation is straightforward porting from
 * page_alloc.c::setup_per_zone_inactive_ratio().
 * it describe more detail.
 */
static void mem_cgroup_set_inactive_ratio(struct mem_cgroup *memcg)
{
	unsigned int gb, ratio;

	gb = res_counter_read_u64(&memcg->res, RES_LIMIT) >> 30;
	if (gb)
		ratio = int_sqrt(10 * gb);
	else
		ratio = 1;

	memcg->inactive_ratio = ratio;

}

1413 1414
static DEFINE_MUTEX(set_limit_mutex);

1415
static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
1416
				unsigned long long val)
1417 1418 1419 1420
{

	int retry_count = MEM_CGROUP_RECLAIM_RETRIES;
	int progress;
1421
	u64 memswlimit;
1422 1423
	int ret = 0;

1424
	while (retry_count) {
1425 1426 1427 1428
		if (signal_pending(current)) {
			ret = -EINTR;
			break;
		}
1429 1430 1431 1432 1433 1434 1435 1436 1437 1438
		/*
		 * Rather than hide all in some function, I do this in
		 * open coded manner. You see what this really does.
		 * We have to guarantee mem->res.limit < mem->memsw.limit.
		 */
		mutex_lock(&set_limit_mutex);
		memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
		if (memswlimit < val) {
			ret = -EINVAL;
			mutex_unlock(&set_limit_mutex);
1439 1440
			break;
		}
1441 1442 1443 1444 1445 1446
		ret = res_counter_set_limit(&memcg->res, val);
		mutex_unlock(&set_limit_mutex);

		if (!ret)
			break;

1447
		progress = try_to_free_mem_cgroup_pages(memcg,
K
KAMEZAWA Hiroyuki 已提交
1448
				GFP_KERNEL, false);
1449 1450
  		if (!progress)			retry_count--;
	}
1451 1452 1453 1454

	if (!ret)
		mem_cgroup_set_inactive_ratio(memcg);

1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491
	return ret;
}

int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
				unsigned long long val)
{
	int retry_count = MEM_CGROUP_RECLAIM_RETRIES;
	u64 memlimit, oldusage, curusage;
	int ret;

	if (!do_swap_account)
		return -EINVAL;

	while (retry_count) {
		if (signal_pending(current)) {
			ret = -EINTR;
			break;
		}
		/*
		 * Rather than hide all in some function, I do this in
		 * open coded manner. You see what this really does.
		 * We have to guarantee mem->res.limit < mem->memsw.limit.
		 */
		mutex_lock(&set_limit_mutex);
		memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
		if (memlimit > val) {
			ret = -EINVAL;
			mutex_unlock(&set_limit_mutex);
			break;
		}
		ret = res_counter_set_limit(&memcg->memsw, val);
		mutex_unlock(&set_limit_mutex);

		if (!ret)
			break;

		oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
K
KAMEZAWA Hiroyuki 已提交
1492
		try_to_free_mem_cgroup_pages(memcg, GFP_KERNEL, true);
1493 1494
		curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
		if (curusage >= oldusage)
1495 1496 1497 1498 1499
			retry_count--;
	}
	return ret;
}

1500 1501 1502 1503
/*
 * This routine traverse page_cgroup in given list and drop them all.
 * *And* this routine doesn't reclaim page itself, just removes page_cgroup.
 */
1504
static int mem_cgroup_force_empty_list(struct mem_cgroup *mem,
K
KAMEZAWA Hiroyuki 已提交
1505
				int node, int zid, enum lru_list lru)
1506
{
K
KAMEZAWA Hiroyuki 已提交
1507 1508
	struct zone *zone;
	struct mem_cgroup_per_zone *mz;
1509
	struct page_cgroup *pc, *busy;
K
KAMEZAWA Hiroyuki 已提交
1510
	unsigned long flags, loop;
1511
	struct list_head *list;
1512
	int ret = 0;
1513

K
KAMEZAWA Hiroyuki 已提交
1514 1515
	zone = &NODE_DATA(node)->node_zones[zid];
	mz = mem_cgroup_zoneinfo(mem, node, zid);
1516
	list = &mz->lists[lru];
1517

1518 1519 1520 1521 1522 1523
	loop = MEM_CGROUP_ZSTAT(mz, lru);
	/* give some margin against EBUSY etc...*/
	loop += 256;
	busy = NULL;
	while (loop--) {
		ret = 0;
K
KAMEZAWA Hiroyuki 已提交
1524
		spin_lock_irqsave(&zone->lru_lock, flags);
1525
		if (list_empty(list)) {
K
KAMEZAWA Hiroyuki 已提交
1526
			spin_unlock_irqrestore(&zone->lru_lock, flags);
1527
			break;
1528 1529 1530 1531 1532
		}
		pc = list_entry(list->prev, struct page_cgroup, lru);
		if (busy == pc) {
			list_move(&pc->lru, list);
			busy = 0;
K
KAMEZAWA Hiroyuki 已提交
1533
			spin_unlock_irqrestore(&zone->lru_lock, flags);
1534 1535
			continue;
		}
K
KAMEZAWA Hiroyuki 已提交
1536
		spin_unlock_irqrestore(&zone->lru_lock, flags);
1537

K
KAMEZAWA Hiroyuki 已提交
1538
		ret = mem_cgroup_move_parent(pc, mem, GFP_KERNEL);
1539
		if (ret == -ENOMEM)
1540
			break;
1541 1542 1543 1544 1545 1546 1547

		if (ret == -EBUSY || ret == -EINVAL) {
			/* found lock contention or "pc" is obsolete. */
			busy = pc;
			cond_resched();
		} else
			busy = NULL;
1548
	}
K
KAMEZAWA Hiroyuki 已提交
1549

1550 1551 1552
	if (!ret && !list_empty(list))
		return -EBUSY;
	return ret;
1553 1554 1555 1556 1557 1558
}

/*
 * make mem_cgroup's charge to be 0 if there is no task.
 * This enables deleting this mem_cgroup.
 */
1559
static int mem_cgroup_force_empty(struct mem_cgroup *mem, bool free_all)
1560
{
1561 1562 1563
	int ret;
	int node, zid, shrink;
	int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
1564
	struct cgroup *cgrp = mem->css.cgroup;
1565

1566
	css_get(&mem->css);
1567 1568

	shrink = 0;
1569 1570 1571
	/* should free all ? */
	if (free_all)
		goto try_to_free;
1572
move_account:
1573
	while (mem->res.usage > 0) {
1574
		ret = -EBUSY;
1575 1576 1577 1578
		if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children))
			goto out;
		ret = -EINTR;
		if (signal_pending(current))
1579
			goto out;
1580 1581
		/* This is for making all *used* pages to be on LRU. */
		lru_add_drain_all();
1582 1583 1584
		ret = 0;
		for_each_node_state(node, N_POSSIBLE) {
			for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) {
1585
				enum lru_list l;
1586 1587
				for_each_lru(l) {
					ret = mem_cgroup_force_empty_list(mem,
K
KAMEZAWA Hiroyuki 已提交
1588
							node, zid, l);
1589 1590 1591
					if (ret)
						break;
				}
1592
			}
1593 1594 1595 1596 1597 1598
			if (ret)
				break;
		}
		/* it seems parent cgroup doesn't have enough mem */
		if (ret == -ENOMEM)
			goto try_to_free;
1599
		cond_resched();
1600 1601 1602 1603 1604
	}
	ret = 0;
out:
	css_put(&mem->css);
	return ret;
1605 1606

try_to_free:
1607 1608
	/* returns EBUSY if there is a task or if we come here twice. */
	if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children) || shrink) {
1609 1610 1611
		ret = -EBUSY;
		goto out;
	}
1612 1613
	/* we call try-to-free pages for make this cgroup empty */
	lru_add_drain_all();
1614 1615 1616 1617
	/* try to free all pages in this cgroup */
	shrink = 1;
	while (nr_retries && mem->res.usage > 0) {
		int progress;
1618 1619 1620 1621 1622

		if (signal_pending(current)) {
			ret = -EINTR;
			goto out;
		}
1623
		progress = try_to_free_mem_cgroup_pages(mem,
K
KAMEZAWA Hiroyuki 已提交
1624
						  GFP_KERNEL, false);
1625
		if (!progress) {
1626
			nr_retries--;
1627 1628 1629
			/* maybe some writeback is necessary */
			congestion_wait(WRITE, HZ/10);
		}
1630 1631

	}
K
KAMEZAWA Hiroyuki 已提交
1632
	lru_add_drain();
1633 1634 1635 1636 1637
	/* try move_account...there may be some *locked* pages. */
	if (mem->res.usage)
		goto move_account;
	ret = 0;
	goto out;
1638 1639
}

1640 1641 1642 1643 1644 1645
int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event)
{
	return mem_cgroup_force_empty(mem_cgroup_from_cont(cont), true);
}


1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683
static u64 mem_cgroup_hierarchy_read(struct cgroup *cont, struct cftype *cft)
{
	return mem_cgroup_from_cont(cont)->use_hierarchy;
}

static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft,
					u64 val)
{
	int retval = 0;
	struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
	struct cgroup *parent = cont->parent;
	struct mem_cgroup *parent_mem = NULL;

	if (parent)
		parent_mem = mem_cgroup_from_cont(parent);

	cgroup_lock();
	/*
	 * If parent's use_hiearchy is set, we can't make any modifications
	 * in the child subtrees. If it is unset, then the change can
	 * occur, provided the current cgroup has no children.
	 *
	 * For the root cgroup, parent_mem is NULL, we allow value to be
	 * set if there are no children.
	 */
	if ((!parent_mem || !parent_mem->use_hierarchy) &&
				(val == 1 || val == 0)) {
		if (list_empty(&cont->children))
			mem->use_hierarchy = val;
		else
			retval = -EBUSY;
	} else
		retval = -EINVAL;
	cgroup_unlock();

	return retval;
}

1684
static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft)
B
Balbir Singh 已提交
1685
{
1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704
	struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
	u64 val = 0;
	int type, name;

	type = MEMFILE_TYPE(cft->private);
	name = MEMFILE_ATTR(cft->private);
	switch (type) {
	case _MEM:
		val = res_counter_read_u64(&mem->res, name);
		break;
	case _MEMSWAP:
		if (do_swap_account)
			val = res_counter_read_u64(&mem->memsw, name);
		break;
	default:
		BUG();
		break;
	}
	return val;
B
Balbir Singh 已提交
1705
}
1706 1707 1708 1709
/*
 * The user of this function is...
 * RES_LIMIT.
 */
1710 1711
static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
			    const char *buffer)
B
Balbir Singh 已提交
1712
{
1713
	struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
1714
	int type, name;
1715 1716 1717
	unsigned long long val;
	int ret;

1718 1719 1720
	type = MEMFILE_TYPE(cft->private);
	name = MEMFILE_ATTR(cft->private);
	switch (name) {
1721 1722 1723
	case RES_LIMIT:
		/* This function does all necessary parse...reuse it */
		ret = res_counter_memparse_write_strategy(buffer, &val);
1724 1725 1726
		if (ret)
			break;
		if (type == _MEM)
1727
			ret = mem_cgroup_resize_limit(memcg, val);
1728 1729
		else
			ret = mem_cgroup_resize_memsw_limit(memcg, val);
1730 1731 1732 1733 1734 1735
		break;
	default:
		ret = -EINVAL; /* should be BUG() ? */
		break;
	}
	return ret;
B
Balbir Singh 已提交
1736 1737
}

1738
static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
1739 1740
{
	struct mem_cgroup *mem;
1741
	int type, name;
1742 1743

	mem = mem_cgroup_from_cont(cont);
1744 1745 1746
	type = MEMFILE_TYPE(event);
	name = MEMFILE_ATTR(event);
	switch (name) {
1747
	case RES_MAX_USAGE:
1748 1749 1750 1751
		if (type == _MEM)
			res_counter_reset_max(&mem->res);
		else
			res_counter_reset_max(&mem->memsw);
1752 1753
		break;
	case RES_FAILCNT:
1754 1755 1756 1757
		if (type == _MEM)
			res_counter_reset_failcnt(&mem->res);
		else
			res_counter_reset_failcnt(&mem->memsw);
1758 1759
		break;
	}
1760
	return 0;
1761 1762
}

1763 1764 1765 1766 1767 1768
static const struct mem_cgroup_stat_desc {
	const char *msg;
	u64 unit;
} mem_cgroup_stat_desc[] = {
	[MEM_CGROUP_STAT_CACHE] = { "cache", PAGE_SIZE, },
	[MEM_CGROUP_STAT_RSS] = { "rss", PAGE_SIZE, },
1769 1770
	[MEM_CGROUP_STAT_PGPGIN_COUNT] = {"pgpgin", 1, },
	[MEM_CGROUP_STAT_PGPGOUT_COUNT] = {"pgpgout", 1, },
1771 1772
};

1773 1774
static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
				 struct cgroup_map_cb *cb)
1775 1776 1777 1778 1779 1780 1781 1782 1783 1784
{
	struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont);
	struct mem_cgroup_stat *stat = &mem_cont->stat;
	int i;

	for (i = 0; i < ARRAY_SIZE(stat->cpustat[0].count); i++) {
		s64 val;

		val = mem_cgroup_read_stat(stat, i);
		val *= mem_cgroup_stat_desc[i].unit;
1785
		cb->fill(cb, mem_cgroup_stat_desc[i].msg, val);
1786
	}
1787 1788
	/* showing # of active pages */
	{
1789 1790
		unsigned long active_anon, inactive_anon;
		unsigned long active_file, inactive_file;
L
Lee Schermerhorn 已提交
1791
		unsigned long unevictable;
1792 1793 1794 1795 1796 1797 1798 1799 1800

		inactive_anon = mem_cgroup_get_all_zonestat(mem_cont,
						LRU_INACTIVE_ANON);
		active_anon = mem_cgroup_get_all_zonestat(mem_cont,
						LRU_ACTIVE_ANON);
		inactive_file = mem_cgroup_get_all_zonestat(mem_cont,
						LRU_INACTIVE_FILE);
		active_file = mem_cgroup_get_all_zonestat(mem_cont,
						LRU_ACTIVE_FILE);
L
Lee Schermerhorn 已提交
1801 1802 1803
		unevictable = mem_cgroup_get_all_zonestat(mem_cont,
							LRU_UNEVICTABLE);

1804 1805 1806 1807
		cb->fill(cb, "active_anon", (active_anon) * PAGE_SIZE);
		cb->fill(cb, "inactive_anon", (inactive_anon) * PAGE_SIZE);
		cb->fill(cb, "active_file", (active_file) * PAGE_SIZE);
		cb->fill(cb, "inactive_file", (inactive_file) * PAGE_SIZE);
L
Lee Schermerhorn 已提交
1808 1809
		cb->fill(cb, "unevictable", unevictable * PAGE_SIZE);

1810
	}
1811 1812 1813
	return 0;
}

1814

B
Balbir Singh 已提交
1815 1816
static struct cftype mem_cgroup_files[] = {
	{
1817
		.name = "usage_in_bytes",
1818
		.private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
1819
		.read_u64 = mem_cgroup_read,
B
Balbir Singh 已提交
1820
	},
1821 1822
	{
		.name = "max_usage_in_bytes",
1823
		.private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
1824
		.trigger = mem_cgroup_reset,
1825 1826
		.read_u64 = mem_cgroup_read,
	},
B
Balbir Singh 已提交
1827
	{
1828
		.name = "limit_in_bytes",
1829
		.private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
1830
		.write_string = mem_cgroup_write,
1831
		.read_u64 = mem_cgroup_read,
B
Balbir Singh 已提交
1832 1833 1834
	},
	{
		.name = "failcnt",
1835
		.private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
1836
		.trigger = mem_cgroup_reset,
1837
		.read_u64 = mem_cgroup_read,
B
Balbir Singh 已提交
1838
	},
1839 1840
	{
		.name = "stat",
1841
		.read_map = mem_control_stat_show,
1842
	},
1843 1844 1845 1846
	{
		.name = "force_empty",
		.trigger = mem_cgroup_force_empty_write,
	},
1847 1848 1849 1850 1851
	{
		.name = "use_hierarchy",
		.write_u64 = mem_cgroup_hierarchy_write,
		.read_u64 = mem_cgroup_hierarchy_read,
	},
B
Balbir Singh 已提交
1852 1853
};

1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894
#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
static struct cftype memsw_cgroup_files[] = {
	{
		.name = "memsw.usage_in_bytes",
		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
		.read_u64 = mem_cgroup_read,
	},
	{
		.name = "memsw.max_usage_in_bytes",
		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
		.trigger = mem_cgroup_reset,
		.read_u64 = mem_cgroup_read,
	},
	{
		.name = "memsw.limit_in_bytes",
		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
		.write_string = mem_cgroup_write,
		.read_u64 = mem_cgroup_read,
	},
	{
		.name = "memsw.failcnt",
		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
		.trigger = mem_cgroup_reset,
		.read_u64 = mem_cgroup_read,
	},
};

static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
{
	if (!do_swap_account)
		return 0;
	return cgroup_add_files(cont, ss, memsw_cgroup_files,
				ARRAY_SIZE(memsw_cgroup_files));
};
#else
static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
{
	return 0;
}
#endif

1895 1896 1897
static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
{
	struct mem_cgroup_per_node *pn;
1898
	struct mem_cgroup_per_zone *mz;
1899
	enum lru_list l;
1900
	int zone, tmp = node;
1901 1902 1903 1904 1905 1906 1907 1908
	/*
	 * This routine is called against possible nodes.
	 * But it's BUG to call kmalloc() against offline node.
	 *
	 * TODO: this routine can waste much memory for nodes which will
	 *       never be onlined. It's better to use memory hotplug callback
	 *       function.
	 */
1909 1910 1911
	if (!node_state(node, N_NORMAL_MEMORY))
		tmp = -1;
	pn = kmalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
1912 1913
	if (!pn)
		return 1;
1914

1915 1916
	mem->info.nodeinfo[node] = pn;
	memset(pn, 0, sizeof(*pn));
1917 1918 1919

	for (zone = 0; zone < MAX_NR_ZONES; zone++) {
		mz = &pn->zoneinfo[zone];
1920 1921
		for_each_lru(l)
			INIT_LIST_HEAD(&mz->lists[l]);
1922
	}
1923 1924 1925
	return 0;
}

1926 1927 1928 1929 1930
static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
{
	kfree(mem->info.nodeinfo[node]);
}

1931 1932 1933 1934 1935 1936
static int mem_cgroup_size(void)
{
	int cpustat_size = nr_cpu_ids * sizeof(struct mem_cgroup_stat_cpu);
	return sizeof(struct mem_cgroup) + cpustat_size;
}

1937 1938 1939
static struct mem_cgroup *mem_cgroup_alloc(void)
{
	struct mem_cgroup *mem;
1940
	int size = mem_cgroup_size();
1941

1942 1943
	if (size < PAGE_SIZE)
		mem = kmalloc(size, GFP_KERNEL);
1944
	else
1945
		mem = vmalloc(size);
1946 1947

	if (mem)
1948
		memset(mem, 0, size);
1949 1950 1951
	return mem;
}

1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965
/*
 * At destroying mem_cgroup, references from swap_cgroup can remain.
 * (scanning all at force_empty is too costly...)
 *
 * Instead of clearing all references at force_empty, we remember
 * the number of reference from swap_cgroup and free mem_cgroup when
 * it goes down to 0.
 *
 * When mem_cgroup is destroyed, mem->obsolete will be set to 0 and
 * entry which points to this memcg will be ignore at swapin.
 *
 * Removal of cgroup itself succeeds regardless of refs from swap.
 */

1966 1967
static void mem_cgroup_free(struct mem_cgroup *mem)
{
K
KAMEZAWA Hiroyuki 已提交
1968 1969
	int node;

1970 1971
	if (atomic_read(&mem->refcnt) > 0)
		return;
K
KAMEZAWA Hiroyuki 已提交
1972 1973 1974 1975 1976


	for_each_node_state(node, N_POSSIBLE)
		free_mem_cgroup_per_zone_info(mem, node);

1977
	if (mem_cgroup_size() < PAGE_SIZE)
1978 1979 1980 1981 1982
		kfree(mem);
	else
		vfree(mem);
}

1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996
static void mem_cgroup_get(struct mem_cgroup *mem)
{
	atomic_inc(&mem->refcnt);
}

static void mem_cgroup_put(struct mem_cgroup *mem)
{
	if (atomic_dec_and_test(&mem->refcnt)) {
		if (!mem->obsolete)
			return;
		mem_cgroup_free(mem);
	}
}

1997

1998 1999 2000
#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
static void __init enable_swap_cgroup(void)
{
2001
	if (!mem_cgroup_disabled() && really_do_swap_account)
2002 2003 2004 2005 2006 2007 2008 2009
		do_swap_account = 1;
}
#else
static void __init enable_swap_cgroup(void)
{
}
#endif

B
Balbir Singh 已提交
2010 2011 2012
static struct cgroup_subsys_state *
mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
{
2013
	struct mem_cgroup *mem, *parent;
2014
	int node;
B
Balbir Singh 已提交
2015

2016 2017 2018
	mem = mem_cgroup_alloc();
	if (!mem)
		return ERR_PTR(-ENOMEM);
2019

2020 2021 2022
	for_each_node_state(node, N_POSSIBLE)
		if (alloc_mem_cgroup_per_zone_info(mem, node))
			goto free_out;
2023
	/* root ? */
2024
	if (cont->parent == NULL) {
2025
		enable_swap_cgroup();
2026
		parent = NULL;
2027
	} else {
2028
		parent = mem_cgroup_from_cont(cont->parent);
2029 2030
		mem->use_hierarchy = parent->use_hierarchy;
	}
2031

2032 2033 2034 2035 2036 2037 2038
	if (parent && parent->use_hierarchy) {
		res_counter_init(&mem->res, &parent->res);
		res_counter_init(&mem->memsw, &parent->memsw);
	} else {
		res_counter_init(&mem->res, NULL);
		res_counter_init(&mem->memsw, NULL);
	}
2039
	mem_cgroup_set_inactive_ratio(mem);
2040 2041
	mem->last_scanned_child = NULL;

B
Balbir Singh 已提交
2042
	return &mem->css;
2043 2044
free_out:
	for_each_node_state(node, N_POSSIBLE)
2045
		free_mem_cgroup_per_zone_info(mem, node);
2046
	mem_cgroup_free(mem);
2047
	return ERR_PTR(-ENOMEM);
B
Balbir Singh 已提交
2048 2049
}

2050 2051 2052 2053
static void mem_cgroup_pre_destroy(struct cgroup_subsys *ss,
					struct cgroup *cont)
{
	struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
2054
	mem->obsolete = 1;
2055
	mem_cgroup_force_empty(mem, false);
2056 2057
}

B
Balbir Singh 已提交
2058 2059 2060
static void mem_cgroup_destroy(struct cgroup_subsys *ss,
				struct cgroup *cont)
{
2061
	mem_cgroup_free(mem_cgroup_from_cont(cont));
B
Balbir Singh 已提交
2062 2063 2064 2065 2066
}

static int mem_cgroup_populate(struct cgroup_subsys *ss,
				struct cgroup *cont)
{
2067 2068 2069 2070 2071 2072 2073 2074
	int ret;

	ret = cgroup_add_files(cont, ss, mem_cgroup_files,
				ARRAY_SIZE(mem_cgroup_files));

	if (!ret)
		ret = register_memsw_files(cont, ss);
	return ret;
B
Balbir Singh 已提交
2075 2076
}

B
Balbir Singh 已提交
2077 2078 2079 2080 2081 2082
static void mem_cgroup_move_task(struct cgroup_subsys *ss,
				struct cgroup *cont,
				struct cgroup *old_cont,
				struct task_struct *p)
{
	/*
2083 2084
	 * FIXME: It's better to move charges of this process from old
	 * memcg to new memcg. But it's just on TODO-List now.
B
Balbir Singh 已提交
2085 2086 2087
	 */
}

B
Balbir Singh 已提交
2088 2089 2090 2091
struct cgroup_subsys mem_cgroup_subsys = {
	.name = "memory",
	.subsys_id = mem_cgroup_subsys_id,
	.create = mem_cgroup_create,
2092
	.pre_destroy = mem_cgroup_pre_destroy,
B
Balbir Singh 已提交
2093 2094
	.destroy = mem_cgroup_destroy,
	.populate = mem_cgroup_populate,
B
Balbir Singh 已提交
2095
	.attach = mem_cgroup_move_task,
2096
	.early_init = 0,
B
Balbir Singh 已提交
2097
};
2098 2099 2100 2101 2102 2103 2104 2105 2106 2107

#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP

static int __init disable_swap_account(char *s)
{
	really_do_swap_account = 0;
	return 1;
}
__setup("noswapaccount", disable_swap_account);
#endif