memcontrol.c 28.2 KB
Newer Older
B
Balbir Singh 已提交
1 2 3 4 5
/* memcontrol.c - Memory Controller
 *
 * Copyright IBM Corporation, 2007
 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
 *
6 7 8
 * Copyright 2007 OpenVZ SWsoft Inc
 * Author: Pavel Emelianov <xemul@openvz.org>
 *
B
Balbir Singh 已提交
9 10 11 12 13 14 15 16 17 18 19 20 21 22
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 */

#include <linux/res_counter.h>
#include <linux/memcontrol.h>
#include <linux/cgroup.h>
23
#include <linux/mm.h>
24
#include <linux/smp.h>
25
#include <linux/page-flags.h>
26
#include <linux/backing-dev.h>
27 28
#include <linux/bit_spinlock.h>
#include <linux/rcupdate.h>
29
#include <linux/slab.h>
30 31 32
#include <linux/swap.h>
#include <linux/spinlock.h>
#include <linux/fs.h>
33
#include <linux/seq_file.h>
34
#include <linux/vmalloc.h>
B
Balbir Singh 已提交
35

36 37
#include <asm/uaccess.h>

38 39 40
struct cgroup_subsys mem_cgroup_subsys __read_mostly;
static struct kmem_cache *page_cgroup_cache __read_mostly;
#define MEM_CGROUP_RECLAIM_RETRIES	5
B
Balbir Singh 已提交
41

42 43 44 45 46 47 48 49 50
/*
 * Statistics for memory cgroup.
 */
enum mem_cgroup_stat_index {
	/*
	 * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
	 */
	MEM_CGROUP_STAT_CACHE, 	   /* # of pages charged as cache */
	MEM_CGROUP_STAT_RSS,	   /* # of pages charged as rss */
51 52
	MEM_CGROUP_STAT_PGPGIN_COUNT,	/* # of pages paged in */
	MEM_CGROUP_STAT_PGPGOUT_COUNT,	/* # of pages paged out */
53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84

	MEM_CGROUP_STAT_NSTATS,
};

struct mem_cgroup_stat_cpu {
	s64 count[MEM_CGROUP_STAT_NSTATS];
} ____cacheline_aligned_in_smp;

struct mem_cgroup_stat {
	struct mem_cgroup_stat_cpu cpustat[NR_CPUS];
};

/*
 * For accounting under irq disable, no need for increment preempt count.
 */
static void __mem_cgroup_stat_add_safe(struct mem_cgroup_stat *stat,
		enum mem_cgroup_stat_index idx, int val)
{
	int cpu = smp_processor_id();
	stat->cpustat[cpu].count[idx] += val;
}

static s64 mem_cgroup_read_stat(struct mem_cgroup_stat *stat,
		enum mem_cgroup_stat_index idx)
{
	int cpu;
	s64 ret = 0;
	for_each_possible_cpu(cpu)
		ret += stat->cpustat[cpu].count[idx];
	return ret;
}

85 86 87 88 89 90 91 92 93 94 95 96
/*
 * per-zone information in memory controller.
 */

enum mem_cgroup_zstat_index {
	MEM_CGROUP_ZSTAT_ACTIVE,
	MEM_CGROUP_ZSTAT_INACTIVE,

	NR_MEM_CGROUP_ZSTAT,
};

struct mem_cgroup_per_zone {
97 98 99 100
	/*
	 * spin_lock to protect the per cgroup LRU
	 */
	spinlock_t		lru_lock;
101 102
	struct list_head	active_list;
	struct list_head	inactive_list;
103 104 105 106 107 108 109 110 111 112 113 114 115
	unsigned long count[NR_MEM_CGROUP_ZSTAT];
};
/* Macro for accessing counter */
#define MEM_CGROUP_ZSTAT(mz, idx)	((mz)->count[(idx)])

struct mem_cgroup_per_node {
	struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
};

struct mem_cgroup_lru_info {
	struct mem_cgroup_per_node *nodeinfo[MAX_NUMNODES];
};

B
Balbir Singh 已提交
116 117 118 119 120 121 122
/*
 * The memory controller data structure. The memory controller controls both
 * page cache and RSS per cgroup. We would eventually like to provide
 * statistics based on the statistics developed by Rik Van Riel for clock-pro,
 * to help the administrator determine what knobs to tune.
 *
 * TODO: Add a water mark for the memory controller. Reclaim will begin when
123 124 125
 * we hit the water mark. May be even add a low water mark, such that
 * no reclaim occurs from a cgroup at it's low water mark, this is
 * a feature that will be implemented much later in the future.
B
Balbir Singh 已提交
126 127 128 129 130 131 132
 */
struct mem_cgroup {
	struct cgroup_subsys_state css;
	/*
	 * the counter to account for memory usage
	 */
	struct res_counter res;
133 134 135 136
	/*
	 * Per cgroup active and inactive list, similar to the
	 * per zone LRU lists.
	 */
137
	struct mem_cgroup_lru_info info;
138

139
	int	prev_priority;	/* for recording reclaim priority */
140 141 142 143
	/*
	 * statistics.
	 */
	struct mem_cgroup_stat stat;
B
Balbir Singh 已提交
144
};
145
static struct mem_cgroup init_mem_cgroup;
B
Balbir Singh 已提交
146

147 148
/*
 * We use the lower bit of the page->page_cgroup pointer as a bit spin
149 150 151 152
 * lock.  We need to ensure that page->page_cgroup is at least two
 * byte aligned (based on comments from Nick Piggin).  But since
 * bit_spin_lock doesn't actually set that lock bit in a non-debug
 * uniprocessor kernel, we should avoid setting it here too.
153 154
 */
#define PAGE_CGROUP_LOCK_BIT 	0x0
155 156 157 158 159
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
#define PAGE_CGROUP_LOCK 	(1 << PAGE_CGROUP_LOCK_BIT)
#else
#define PAGE_CGROUP_LOCK	0x0
#endif
160

B
Balbir Singh 已提交
161 162 163 164 165 166 167 168
/*
 * A page_cgroup page is associated with every page descriptor. The
 * page_cgroup helps us identify information about the cgroup
 */
struct page_cgroup {
	struct list_head lru;		/* per cgroup LRU list */
	struct page *page;
	struct mem_cgroup *mem_cgroup;
169
	int flags;
B
Balbir Singh 已提交
170
};
171
#define PAGE_CGROUP_FLAG_CACHE	(0x1)	/* charged as cache */
172
#define PAGE_CGROUP_FLAG_ACTIVE (0x2)	/* page is active in this cgroup */
B
Balbir Singh 已提交
173

174
static int page_cgroup_nid(struct page_cgroup *pc)
175 176 177 178
{
	return page_to_nid(pc->page);
}

179
static enum zone_type page_cgroup_zid(struct page_cgroup *pc)
180 181 182 183
{
	return page_zonenum(pc->page);
}

184 185 186
enum charge_type {
	MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
	MEM_CGROUP_CHARGE_TYPE_MAPPED,
187
	MEM_CGROUP_CHARGE_TYPE_FORCE,	/* used by force_empty */
188 189
};

190 191 192 193 194 195 196 197 198
/*
 * Always modified under lru lock. Then, not necessary to preempt_disable()
 */
static void mem_cgroup_charge_statistics(struct mem_cgroup *mem, int flags,
					bool charge)
{
	int val = (charge)? 1 : -1;
	struct mem_cgroup_stat *stat = &mem->stat;

199
	VM_BUG_ON(!irqs_disabled());
200
	if (flags & PAGE_CGROUP_FLAG_CACHE)
201
		__mem_cgroup_stat_add_safe(stat, MEM_CGROUP_STAT_CACHE, val);
202 203
	else
		__mem_cgroup_stat_add_safe(stat, MEM_CGROUP_STAT_RSS, val);
204 205 206 207 208 209 210

	if (charge)
		__mem_cgroup_stat_add_safe(stat,
				MEM_CGROUP_STAT_PGPGIN_COUNT, 1);
	else
		__mem_cgroup_stat_add_safe(stat,
				MEM_CGROUP_STAT_PGPGOUT_COUNT, 1);
211 212
}

213
static struct mem_cgroup_per_zone *
214 215 216 217 218
mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid)
{
	return &mem->info.nodeinfo[nid]->zoneinfo[zid];
}

219
static struct mem_cgroup_per_zone *
220 221 222 223 224
page_cgroup_zoneinfo(struct page_cgroup *pc)
{
	struct mem_cgroup *mem = pc->mem_cgroup;
	int nid = page_cgroup_nid(pc);
	int zid = page_cgroup_zid(pc);
225

226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241
	return mem_cgroup_zoneinfo(mem, nid, zid);
}

static unsigned long mem_cgroup_get_all_zonestat(struct mem_cgroup *mem,
					enum mem_cgroup_zstat_index idx)
{
	int nid, zid;
	struct mem_cgroup_per_zone *mz;
	u64 total = 0;

	for_each_online_node(nid)
		for (zid = 0; zid < MAX_NR_ZONES; zid++) {
			mz = mem_cgroup_zoneinfo(mem, nid, zid);
			total += MEM_CGROUP_ZSTAT(mz, idx);
		}
	return total;
242 243
}

244
static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
B
Balbir Singh 已提交
245 246 247 248 249 250
{
	return container_of(cgroup_subsys_state(cont,
				mem_cgroup_subsys_id), struct mem_cgroup,
				css);
}

251
struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
252 253 254 255 256
{
	return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
				struct mem_cgroup, css);
}

257 258
static inline int page_cgroup_locked(struct page *page)
{
259
	return bit_spin_is_locked(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
260 261
}

262
static void page_assign_page_cgroup(struct page *page, struct page_cgroup *pc)
263
{
264 265
	VM_BUG_ON(!page_cgroup_locked(page));
	page->page_cgroup = ((unsigned long)pc | PAGE_CGROUP_LOCK);
266 267 268 269
}

struct page_cgroup *page_get_page_cgroup(struct page *page)
{
270
	return (struct page_cgroup *) (page->page_cgroup & ~PAGE_CGROUP_LOCK);
271 272
}

273
static void lock_page_cgroup(struct page *page)
274 275 276 277
{
	bit_spin_lock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
}

278 279 280 281 282
static int try_lock_page_cgroup(struct page *page)
{
	return bit_spin_trylock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
}

283
static void unlock_page_cgroup(struct page *page)
284 285 286 287
{
	bit_spin_unlock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
}

288 289
static void __mem_cgroup_remove_list(struct mem_cgroup_per_zone *mz,
			struct page_cgroup *pc)
290 291 292 293 294 295 296 297 298
{
	int from = pc->flags & PAGE_CGROUP_FLAG_ACTIVE;

	if (from)
		MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) -= 1;
	else
		MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) -= 1;

	mem_cgroup_charge_statistics(pc->mem_cgroup, pc->flags, false);
299
	list_del(&pc->lru);
300 301
}

302 303
static void __mem_cgroup_add_list(struct mem_cgroup_per_zone *mz,
				struct page_cgroup *pc)
304 305 306 307 308
{
	int to = pc->flags & PAGE_CGROUP_FLAG_ACTIVE;

	if (!to) {
		MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) += 1;
309
		list_add(&pc->lru, &mz->inactive_list);
310 311
	} else {
		MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) += 1;
312
		list_add(&pc->lru, &mz->active_list);
313 314 315 316
	}
	mem_cgroup_charge_statistics(pc->mem_cgroup, pc->flags, true);
}

317
static void __mem_cgroup_move_lists(struct page_cgroup *pc, bool active)
318
{
319 320 321 322 323 324 325 326
	int from = pc->flags & PAGE_CGROUP_FLAG_ACTIVE;
	struct mem_cgroup_per_zone *mz = page_cgroup_zoneinfo(pc);

	if (from)
		MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) -= 1;
	else
		MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) -= 1;

327
	if (active) {
328
		MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) += 1;
329
		pc->flags |= PAGE_CGROUP_FLAG_ACTIVE;
330
		list_move(&pc->lru, &mz->active_list);
331
	} else {
332
		MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) += 1;
333
		pc->flags &= ~PAGE_CGROUP_FLAG_ACTIVE;
334
		list_move(&pc->lru, &mz->inactive_list);
335
	}
336 337
}

338 339 340 341 342
int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
{
	int ret;

	task_lock(task);
343
	ret = task->mm && mm_match_cgroup(task->mm, mem);
344 345 346 347
	task_unlock(task);
	return ret;
}

348 349 350
/*
 * This routine assumes that the appropriate zone's lru lock is already held
 */
351
void mem_cgroup_move_lists(struct page *page, bool active)
352
{
353
	struct page_cgroup *pc;
354 355 356
	struct mem_cgroup_per_zone *mz;
	unsigned long flags;

357 358 359 360 361 362 363 364
	/*
	 * We cannot lock_page_cgroup while holding zone's lru_lock,
	 * because other holders of lock_page_cgroup can be interrupted
	 * with an attempt to rotate_reclaimable_page.  But we cannot
	 * safely get to page_cgroup without it, so just try_lock it:
	 * mem_cgroup_isolate_pages allows for page left on wrong list.
	 */
	if (!try_lock_page_cgroup(page))
365 366
		return;

367 368 369 370
	pc = page_get_page_cgroup(page);
	if (pc) {
		mz = page_cgroup_zoneinfo(pc);
		spin_lock_irqsave(&mz->lru_lock, flags);
371
		__mem_cgroup_move_lists(pc, active);
372
		spin_unlock_irqrestore(&mz->lru_lock, flags);
373 374
	}
	unlock_page_cgroup(page);
375 376
}

377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392
/*
 * Calculate mapped_ratio under memory controller. This will be used in
 * vmscan.c for deteremining we have to reclaim mapped pages.
 */
int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem)
{
	long total, rss;

	/*
	 * usage is recorded in bytes. But, here, we assume the number of
	 * physical pages can be represented by "long" on any arch.
	 */
	total = (long) (mem->res.usage >> PAGE_SHIFT) + 1L;
	rss = (long)mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS);
	return (int)((rss * 100L) / total);
}
393

394 395 396 397 398 399 400 401 402 403 404 405 406 407
/*
 * This function is called from vmscan.c. In page reclaiming loop. balance
 * between active and inactive list is calculated. For memory controller
 * page reclaiming, we should use using mem_cgroup's imbalance rather than
 * zone's global lru imbalance.
 */
long mem_cgroup_reclaim_imbalance(struct mem_cgroup *mem)
{
	unsigned long active, inactive;
	/* active and inactive are the number of pages. 'long' is ok.*/
	active = mem_cgroup_get_all_zonestat(mem, MEM_CGROUP_ZSTAT_ACTIVE);
	inactive = mem_cgroup_get_all_zonestat(mem, MEM_CGROUP_ZSTAT_INACTIVE);
	return (long) (active / (inactive + 1));
}
408

409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427
/*
 * prev_priority control...this will be used in memory reclaim path.
 */
int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem)
{
	return mem->prev_priority;
}

void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem, int priority)
{
	if (priority < mem->prev_priority)
		mem->prev_priority = priority;
}

void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, int priority)
{
	mem->prev_priority = priority;
}

428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459
/*
 * Calculate # of pages to be scanned in this priority/zone.
 * See also vmscan.c
 *
 * priority starts from "DEF_PRIORITY" and decremented in each loop.
 * (see include/linux/mmzone.h)
 */

long mem_cgroup_calc_reclaim_active(struct mem_cgroup *mem,
				   struct zone *zone, int priority)
{
	long nr_active;
	int nid = zone->zone_pgdat->node_id;
	int zid = zone_idx(zone);
	struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(mem, nid, zid);

	nr_active = MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE);
	return (nr_active >> priority);
}

long mem_cgroup_calc_reclaim_inactive(struct mem_cgroup *mem,
					struct zone *zone, int priority)
{
	long nr_inactive;
	int nid = zone->zone_pgdat->node_id;
	int zid = zone_idx(zone);
	struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(mem, nid, zid);

	nr_inactive = MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE);
	return (nr_inactive >> priority);
}

460 461 462 463 464 465 466 467 468 469 470 471
unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
					struct list_head *dst,
					unsigned long *scanned, int order,
					int mode, struct zone *z,
					struct mem_cgroup *mem_cont,
					int active)
{
	unsigned long nr_taken = 0;
	struct page *page;
	unsigned long scan;
	LIST_HEAD(pc_list);
	struct list_head *src;
472
	struct page_cgroup *pc, *tmp;
473 474 475
	int nid = z->zone_pgdat->node_id;
	int zid = zone_idx(z);
	struct mem_cgroup_per_zone *mz;
476

477
	BUG_ON(!mem_cont);
478
	mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
479
	if (active)
480
		src = &mz->active_list;
481
	else
482 483
		src = &mz->inactive_list;

484

485
	spin_lock(&mz->lru_lock);
486 487
	scan = 0;
	list_for_each_entry_safe_reverse(pc, tmp, src, lru) {
H
Hugh Dickins 已提交
488
		if (scan >= nr_to_scan)
489
			break;
490 491
		page = pc->page;

H
Hugh Dickins 已提交
492
		if (unlikely(!PageLRU(page)))
493 494
			continue;

495 496 497 498 499 500 501 502 503
		if (PageActive(page) && !active) {
			__mem_cgroup_move_lists(pc, true);
			continue;
		}
		if (!PageActive(page) && active) {
			__mem_cgroup_move_lists(pc, false);
			continue;
		}

H
Hugh Dickins 已提交
504 505
		scan++;
		list_move(&pc->lru, &pc_list);
506 507 508 509 510 511 512 513

		if (__isolate_lru_page(page, mode) == 0) {
			list_move(&page->lru, dst);
			nr_taken++;
		}
	}

	list_splice(&pc_list, src);
514
	spin_unlock(&mz->lru_lock);
515 516 517 518 519

	*scanned = scan;
	return nr_taken;
}

520 521 522 523 524 525
/*
 * Charge the memory controller for page usage.
 * Return
 * 0 if the charge was successful
 * < 0 if the cgroup is over its limit
 */
526
static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
527 528
				gfp_t gfp_mask, enum charge_type ctype,
				struct mem_cgroup *memcg)
529 530
{
	struct mem_cgroup *mem;
531
	struct page_cgroup *pc;
532 533
	unsigned long flags;
	unsigned long nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
534
	struct mem_cgroup_per_zone *mz;
535

536 537 538
	if (mem_cgroup_subsys.disabled)
		return 0;

539 540 541 542 543 544 545
	/*
	 * Should page_cgroup's go to their own slab?
	 * One could optimize the performance of the charging routine
	 * by saving a bit in the page_flags and using it as a lock
	 * to see if the cgroup page already has a page_cgroup associated
	 * with it
	 */
546
retry:
547 548 549 550 551 552 553
	lock_page_cgroup(page);
	pc = page_get_page_cgroup(page);
	/*
	 * The page_cgroup exists and
	 * the page has already been accounted.
	 */
	if (pc) {
554
		VM_BUG_ON(pc->page != page);
555
		VM_BUG_ON(!pc->mem_cgroup);
556 557
		unlock_page_cgroup(page);
		goto done;
558
	}
559
	unlock_page_cgroup(page);
560

561
	pc = kmem_cache_alloc(page_cgroup_cache, gfp_mask);
562 563 564 565
	if (pc == NULL)
		goto err;

	/*
566 567
	 * We always charge the cgroup the mm_struct belongs to.
	 * The mm_struct's mem_cgroup changes on task migration if the
568 569 570
	 * thread group leader migrates. It's possible that mm is not
	 * set, if so charge the init_mm (happens for pagecache usage).
	 */
571
	if (likely(!memcg)) {
572 573 574 575 576 577 578 579 580 581 582
		rcu_read_lock();
		mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
		/*
		 * For every charge from the cgroup, increment reference count
		 */
		css_get(&mem->css);
		rcu_read_unlock();
	} else {
		mem = memcg;
		css_get(&memcg->css);
	}
583

584
	while (res_counter_charge(&mem->res, PAGE_SIZE)) {
585 586
		if (!(gfp_mask & __GFP_WAIT))
			goto out;
587 588

		if (try_to_free_mem_cgroup_pages(mem, gfp_mask))
589 590 591
			continue;

		/*
592 593 594 595 596 597
		 * try_to_free_mem_cgroup_pages() might not give us a full
		 * picture of reclaim. Some pages are reclaimed and might be
		 * moved to swap cache or just unmapped from the cgroup.
		 * Check the limit again to see if the reclaim reduced the
		 * current usage of the cgroup before giving up
		 */
598 599
		if (res_counter_check_under_limit(&mem->res))
			continue;
600 601 602 603

		if (!nr_retries--) {
			mem_cgroup_out_of_memory(mem, gfp_mask);
			goto out;
604
		}
605 606 607 608
	}

	pc->mem_cgroup = mem;
	pc->page = page;
609 610 611 612
	/*
	 * If a page is accounted as a page cache, insert to inactive list.
	 * If anon, insert to active list.
	 */
613
	if (ctype == MEM_CGROUP_CHARGE_TYPE_CACHE)
614
		pc->flags = PAGE_CGROUP_FLAG_CACHE;
615 616
	else
		pc->flags = PAGE_CGROUP_FLAG_ACTIVE;
617

618 619 620
	lock_page_cgroup(page);
	if (page_get_page_cgroup(page)) {
		unlock_page_cgroup(page);
621
		/*
622 623
		 * Another charge has been added to this page already.
		 * We take lock_page_cgroup(page) again and read
624 625 626 627
		 * page->cgroup, increment refcnt.... just retry is OK.
		 */
		res_counter_uncharge(&mem->res, PAGE_SIZE);
		css_put(&mem->css);
628
		kmem_cache_free(page_cgroup_cache, pc);
629 630
		goto retry;
	}
631
	page_assign_page_cgroup(page, pc);
632

633 634
	mz = page_cgroup_zoneinfo(pc);
	spin_lock_irqsave(&mz->lru_lock, flags);
635
	__mem_cgroup_add_list(mz, pc);
636
	spin_unlock_irqrestore(&mz->lru_lock, flags);
637

H
Hugh Dickins 已提交
638
	unlock_page_cgroup(page);
639 640
done:
	return 0;
641 642
out:
	css_put(&mem->css);
643
	kmem_cache_free(page_cgroup_cache, pc);
644 645 646 647
err:
	return -ENOMEM;
}

648
int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask)
649
{
650 651 652 653 654 655 656 657 658 659 660
	/*
	 * If already mapped, we don't have to account.
	 * If page cache, page->mapping has address_space.
	 * But page->mapping may have out-of-use anon_vma pointer,
	 * detecit it by PageAnon() check. newly-mapped-anon's page->mapping
	 * is NULL.
  	 */
	if (page_mapped(page) || (page->mapping && !PageAnon(page)))
		return 0;
	if (unlikely(!mm))
		mm = &init_mm;
661
	return mem_cgroup_charge_common(page, mm, gfp_mask,
662
				MEM_CGROUP_CHARGE_TYPE_MAPPED, NULL);
663 664
}

665 666
int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
				gfp_t gfp_mask)
667
{
668
	if (unlikely(!mm))
669
		mm = &init_mm;
670
	return mem_cgroup_charge_common(page, mm, gfp_mask,
671 672 673
				MEM_CGROUP_CHARGE_TYPE_CACHE, NULL);
}

674
/*
675
 * uncharge if !page_mapped(page)
676
 */
677 678
static void
__mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
679
{
H
Hugh Dickins 已提交
680
	struct page_cgroup *pc;
681
	struct mem_cgroup *mem;
682
	struct mem_cgroup_per_zone *mz;
683
	unsigned long flags;
684

685 686 687
	if (mem_cgroup_subsys.disabled)
		return;

688
	/*
689
	 * Check if our page_cgroup is valid
690
	 */
H
Hugh Dickins 已提交
691 692
	lock_page_cgroup(page);
	pc = page_get_page_cgroup(page);
693
	if (!pc)
H
Hugh Dickins 已提交
694
		goto unlock;
695

696 697
	VM_BUG_ON(pc->page != page);

698 699 700 701
	if ((ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED)
	    && ((pc->flags & PAGE_CGROUP_FLAG_CACHE)
		|| page_mapped(page)))
		goto unlock;
702

703 704 705 706
	mz = page_cgroup_zoneinfo(pc);
	spin_lock_irqsave(&mz->lru_lock, flags);
	__mem_cgroup_remove_list(mz, pc);
	spin_unlock_irqrestore(&mz->lru_lock, flags);
H
Hugh Dickins 已提交
707

708 709
	page_assign_page_cgroup(page, NULL);
	unlock_page_cgroup(page);
H
Hugh Dickins 已提交
710

711 712 713
	mem = pc->mem_cgroup;
	res_counter_uncharge(&mem->res, PAGE_SIZE);
	css_put(&mem->css);
714

715 716
	kmem_cache_free(page_cgroup_cache, pc);
	return;
H
Hugh Dickins 已提交
717
unlock:
718 719 720
	unlock_page_cgroup(page);
}

721 722 723 724 725 726 727 728 729 730 731
void mem_cgroup_uncharge_page(struct page *page)
{
	__mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_MAPPED);
}

void mem_cgroup_uncharge_cache_page(struct page *page)
{
	VM_BUG_ON(page_mapped(page));
	__mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE);
}

732
/*
733
 * Before starting migration, account against new page.
734
 */
735
int mem_cgroup_prepare_migration(struct page *page, struct page *newpage)
736 737
{
	struct page_cgroup *pc;
738 739 740
	struct mem_cgroup *mem = NULL;
	enum charge_type ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED;
	int ret = 0;
741

742 743 744
	if (mem_cgroup_subsys.disabled)
		return 0;

745 746
	lock_page_cgroup(page);
	pc = page_get_page_cgroup(page);
747 748 749 750 751 752
	if (pc) {
		mem = pc->mem_cgroup;
		css_get(&mem->css);
		if (pc->flags & PAGE_CGROUP_FLAG_CACHE)
			ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
	}
753
	unlock_page_cgroup(page);
754 755 756 757 758 759
	if (mem) {
		ret = mem_cgroup_charge_common(newpage, NULL, GFP_KERNEL,
			ctype, mem);
		css_put(&mem->css);
	}
	return ret;
760
}
761

762
/* remove redundant charge if migration failed*/
763
void mem_cgroup_end_migration(struct page *newpage)
764
{
765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780
	/*
	 * At success, page->mapping is not NULL.
	 * special rollback care is necessary when
	 * 1. at migration failure. (newpage->mapping is cleared in this case)
	 * 2. the newpage was moved but not remapped again because the task
	 *    exits and the newpage is obsolete. In this case, the new page
	 *    may be a swapcache. So, we just call mem_cgroup_uncharge_page()
	 *    always for avoiding mess. The  page_cgroup will be removed if
	 *    unnecessary. File cache pages is still on radix-tree. Don't
	 *    care it.
	 */
	if (!newpage->mapping)
		__mem_cgroup_uncharge_common(newpage,
					 MEM_CGROUP_CHARGE_TYPE_FORCE);
	else if (PageAnon(newpage))
		mem_cgroup_uncharge_page(newpage);
781
}
782

783 784 785 786 787
/*
 * This routine traverse page_cgroup in given list and drop them all.
 * *And* this routine doesn't reclaim page itself, just removes page_cgroup.
 */
#define FORCE_UNCHARGE_BATCH	(128)
788
static void mem_cgroup_force_empty_list(struct mem_cgroup *mem,
789 790
			    struct mem_cgroup_per_zone *mz,
			    int active)
791 792 793
{
	struct page_cgroup *pc;
	struct page *page;
794
	int count = FORCE_UNCHARGE_BATCH;
795
	unsigned long flags;
796 797 798 799 800 801
	struct list_head *list;

	if (active)
		list = &mz->active_list;
	else
		list = &mz->inactive_list;
802

803
	spin_lock_irqsave(&mz->lru_lock, flags);
804
	while (!list_empty(list)) {
805 806
		pc = list_entry(list->prev, struct page_cgroup, lru);
		page = pc->page;
807 808
		get_page(page);
		spin_unlock_irqrestore(&mz->lru_lock, flags);
809 810 811 812 813
		/*
		 * Check if this page is on LRU. !LRU page can be found
		 * if it's under page migration.
		 */
		if (PageLRU(page)) {
814 815
			__mem_cgroup_uncharge_common(page,
					MEM_CGROUP_CHARGE_TYPE_FORCE);
816 817 818 819 820 821
			put_page(page);
			if (--count <= 0) {
				count = FORCE_UNCHARGE_BATCH;
				cond_resched();
			}
		} else
822 823
			cond_resched();
		spin_lock_irqsave(&mz->lru_lock, flags);
824
	}
825
	spin_unlock_irqrestore(&mz->lru_lock, flags);
826 827 828 829 830 831
}

/*
 * make mem_cgroup's charge to be 0 if there is no task.
 * This enables deleting this mem_cgroup.
 */
832
static int mem_cgroup_force_empty(struct mem_cgroup *mem)
833 834
{
	int ret = -EBUSY;
835
	int node, zid;
836

837 838 839
	if (mem_cgroup_subsys.disabled)
		return 0;

840 841 842
	css_get(&mem->css);
	/*
	 * page reclaim code (kswapd etc..) will move pages between
843
	 * active_list <-> inactive_list while we don't take a lock.
844 845
	 * So, we have to do loop here until all lists are empty.
	 */
846
	while (mem->res.usage > 0) {
847 848
		if (atomic_read(&mem->css.cgroup->count) > 0)
			goto out;
849 850 851 852 853
		for_each_node_state(node, N_POSSIBLE)
			for (zid = 0; zid < MAX_NR_ZONES; zid++) {
				struct mem_cgroup_per_zone *mz;
				mz = mem_cgroup_zoneinfo(mem, node, zid);
				/* drop all page_cgroup in active_list */
854
				mem_cgroup_force_empty_list(mem, mz, 1);
855
				/* drop all page_cgroup in inactive_list */
856
				mem_cgroup_force_empty_list(mem, mz, 0);
857
			}
858 859 860 861 862 863 864
	}
	ret = 0;
out:
	css_put(&mem->css);
	return ret;
}

865
static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft)
B
Balbir Singh 已提交
866
{
867 868
	return res_counter_read_u64(&mem_cgroup_from_cont(cont)->res,
				    cft->private);
B
Balbir Singh 已提交
869 870
}

871 872
static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
			    const char *buffer)
B
Balbir Singh 已提交
873 874
{
	return res_counter_write(&mem_cgroup_from_cont(cont)->res,
875 876
				 cft->private, buffer,
				 res_counter_memparse_write_strategy);
B
Balbir Singh 已提交
877 878
}

879
static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
880 881 882 883
{
	struct mem_cgroup *mem;

	mem = mem_cgroup_from_cont(cont);
884 885 886 887 888 889 890 891
	switch (event) {
	case RES_MAX_USAGE:
		res_counter_reset_max(&mem->res);
		break;
	case RES_FAILCNT:
		res_counter_reset_failcnt(&mem->res);
		break;
	}
892
	return 0;
893 894
}

895
static int mem_force_empty_write(struct cgroup *cont, unsigned int event)
896
{
897
	return mem_cgroup_force_empty(mem_cgroup_from_cont(cont));
898 899
}

900 901 902 903 904 905
static const struct mem_cgroup_stat_desc {
	const char *msg;
	u64 unit;
} mem_cgroup_stat_desc[] = {
	[MEM_CGROUP_STAT_CACHE] = { "cache", PAGE_SIZE, },
	[MEM_CGROUP_STAT_RSS] = { "rss", PAGE_SIZE, },
906 907
	[MEM_CGROUP_STAT_PGPGIN_COUNT] = {"pgpgin", 1, },
	[MEM_CGROUP_STAT_PGPGOUT_COUNT] = {"pgpgout", 1, },
908 909
};

910 911
static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
				 struct cgroup_map_cb *cb)
912 913 914 915 916 917 918 919 920 921
{
	struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont);
	struct mem_cgroup_stat *stat = &mem_cont->stat;
	int i;

	for (i = 0; i < ARRAY_SIZE(stat->cpustat[0].count); i++) {
		s64 val;

		val = mem_cgroup_read_stat(stat, i);
		val *= mem_cgroup_stat_desc[i].unit;
922
		cb->fill(cb, mem_cgroup_stat_desc[i].msg, val);
923
	}
924 925 926 927 928 929 930 931
	/* showing # of active pages */
	{
		unsigned long active, inactive;

		inactive = mem_cgroup_get_all_zonestat(mem_cont,
						MEM_CGROUP_ZSTAT_INACTIVE);
		active = mem_cgroup_get_all_zonestat(mem_cont,
						MEM_CGROUP_ZSTAT_ACTIVE);
932 933
		cb->fill(cb, "active", (active) * PAGE_SIZE);
		cb->fill(cb, "inactive", (inactive) * PAGE_SIZE);
934
	}
935 936 937
	return 0;
}

B
Balbir Singh 已提交
938 939
static struct cftype mem_cgroup_files[] = {
	{
940
		.name = "usage_in_bytes",
B
Balbir Singh 已提交
941
		.private = RES_USAGE,
942
		.read_u64 = mem_cgroup_read,
B
Balbir Singh 已提交
943
	},
944 945 946
	{
		.name = "max_usage_in_bytes",
		.private = RES_MAX_USAGE,
947
		.trigger = mem_cgroup_reset,
948 949
		.read_u64 = mem_cgroup_read,
	},
B
Balbir Singh 已提交
950
	{
951
		.name = "limit_in_bytes",
B
Balbir Singh 已提交
952
		.private = RES_LIMIT,
953
		.write_string = mem_cgroup_write,
954
		.read_u64 = mem_cgroup_read,
B
Balbir Singh 已提交
955 956 957 958
	},
	{
		.name = "failcnt",
		.private = RES_FAILCNT,
959
		.trigger = mem_cgroup_reset,
960
		.read_u64 = mem_cgroup_read,
B
Balbir Singh 已提交
961
	},
962 963
	{
		.name = "force_empty",
964
		.trigger = mem_force_empty_write,
965
	},
966 967
	{
		.name = "stat",
968
		.read_map = mem_control_stat_show,
969
	},
B
Balbir Singh 已提交
970 971
};

972 973 974
static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
{
	struct mem_cgroup_per_node *pn;
975
	struct mem_cgroup_per_zone *mz;
976
	int zone, tmp = node;
977 978 979 980 981 982 983 984
	/*
	 * This routine is called against possible nodes.
	 * But it's BUG to call kmalloc() against offline node.
	 *
	 * TODO: this routine can waste much memory for nodes which will
	 *       never be onlined. It's better to use memory hotplug callback
	 *       function.
	 */
985 986 987
	if (!node_state(node, N_NORMAL_MEMORY))
		tmp = -1;
	pn = kmalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
988 989
	if (!pn)
		return 1;
990

991 992
	mem->info.nodeinfo[node] = pn;
	memset(pn, 0, sizeof(*pn));
993 994 995 996 997

	for (zone = 0; zone < MAX_NR_ZONES; zone++) {
		mz = &pn->zoneinfo[zone];
		INIT_LIST_HEAD(&mz->active_list);
		INIT_LIST_HEAD(&mz->inactive_list);
998
		spin_lock_init(&mz->lru_lock);
999
	}
1000 1001 1002
	return 0;
}

1003 1004 1005 1006 1007
static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
{
	kfree(mem->info.nodeinfo[node]);
}

1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030
static struct mem_cgroup *mem_cgroup_alloc(void)
{
	struct mem_cgroup *mem;

	if (sizeof(*mem) < PAGE_SIZE)
		mem = kmalloc(sizeof(*mem), GFP_KERNEL);
	else
		mem = vmalloc(sizeof(*mem));

	if (mem)
		memset(mem, 0, sizeof(*mem));
	return mem;
}

static void mem_cgroup_free(struct mem_cgroup *mem)
{
	if (sizeof(*mem) < PAGE_SIZE)
		kfree(mem);
	else
		vfree(mem);
}


B
Balbir Singh 已提交
1031 1032 1033 1034
static struct cgroup_subsys_state *
mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
{
	struct mem_cgroup *mem;
1035
	int node;
B
Balbir Singh 已提交
1036

1037
	if (unlikely((cont->parent) == NULL)) {
1038
		mem = &init_mem_cgroup;
1039 1040
		page_cgroup_cache = KMEM_CACHE(page_cgroup, SLAB_PANIC);
	} else {
1041 1042 1043
		mem = mem_cgroup_alloc();
		if (!mem)
			return ERR_PTR(-ENOMEM);
1044
	}
1045

B
Balbir Singh 已提交
1046
	res_counter_init(&mem->res);
1047

1048 1049 1050 1051
	for_each_node_state(node, N_POSSIBLE)
		if (alloc_mem_cgroup_per_zone_info(mem, node))
			goto free_out;

B
Balbir Singh 已提交
1052
	return &mem->css;
1053 1054
free_out:
	for_each_node_state(node, N_POSSIBLE)
1055
		free_mem_cgroup_per_zone_info(mem, node);
1056
	if (cont->parent != NULL)
1057
		mem_cgroup_free(mem);
1058
	return ERR_PTR(-ENOMEM);
B
Balbir Singh 已提交
1059 1060
}

1061 1062 1063 1064 1065 1066 1067
static void mem_cgroup_pre_destroy(struct cgroup_subsys *ss,
					struct cgroup *cont)
{
	struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
	mem_cgroup_force_empty(mem);
}

B
Balbir Singh 已提交
1068 1069 1070
static void mem_cgroup_destroy(struct cgroup_subsys *ss,
				struct cgroup *cont)
{
1071 1072 1073 1074
	int node;
	struct mem_cgroup *mem = mem_cgroup_from_cont(cont);

	for_each_node_state(node, N_POSSIBLE)
1075
		free_mem_cgroup_per_zone_info(mem, node);
1076

1077
	mem_cgroup_free(mem_cgroup_from_cont(cont));
B
Balbir Singh 已提交
1078 1079 1080 1081 1082
}

static int mem_cgroup_populate(struct cgroup_subsys *ss,
				struct cgroup *cont)
{
1083 1084
	if (mem_cgroup_subsys.disabled)
		return 0;
B
Balbir Singh 已提交
1085 1086 1087 1088
	return cgroup_add_files(cont, ss, mem_cgroup_files,
					ARRAY_SIZE(mem_cgroup_files));
}

B
Balbir Singh 已提交
1089 1090 1091 1092 1093 1094 1095 1096
static void mem_cgroup_move_task(struct cgroup_subsys *ss,
				struct cgroup *cont,
				struct cgroup *old_cont,
				struct task_struct *p)
{
	struct mm_struct *mm;
	struct mem_cgroup *mem, *old_mem;

1097 1098 1099
	if (mem_cgroup_subsys.disabled)
		return;

B
Balbir Singh 已提交
1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113
	mm = get_task_mm(p);
	if (mm == NULL)
		return;

	mem = mem_cgroup_from_cont(cont);
	old_mem = mem_cgroup_from_cont(old_cont);

	if (mem == old_mem)
		goto out;

	/*
	 * Only thread group leaders are allowed to migrate, the mm_struct is
	 * in effect owned by the leader
	 */
1114
	if (!thread_group_leader(p))
B
Balbir Singh 已提交
1115 1116 1117 1118 1119 1120
		goto out;

out:
	mmput(mm);
}

B
Balbir Singh 已提交
1121 1122 1123 1124
struct cgroup_subsys mem_cgroup_subsys = {
	.name = "memory",
	.subsys_id = mem_cgroup_subsys_id,
	.create = mem_cgroup_create,
1125
	.pre_destroy = mem_cgroup_pre_destroy,
B
Balbir Singh 已提交
1126 1127
	.destroy = mem_cgroup_destroy,
	.populate = mem_cgroup_populate,
B
Balbir Singh 已提交
1128
	.attach = mem_cgroup_move_task,
1129
	.early_init = 0,
B
Balbir Singh 已提交
1130
};