memcontrol.c 26.7 KB
Newer Older
B
Balbir Singh 已提交
1 2 3 4 5
/* memcontrol.c - Memory Controller
 *
 * Copyright IBM Corporation, 2007
 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
 *
6 7 8
 * Copyright 2007 OpenVZ SWsoft Inc
 * Author: Pavel Emelianov <xemul@openvz.org>
 *
B
Balbir Singh 已提交
9 10 11 12 13 14 15 16 17 18 19 20 21 22
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 */

#include <linux/res_counter.h>
#include <linux/memcontrol.h>
#include <linux/cgroup.h>
23
#include <linux/mm.h>
24
#include <linux/smp.h>
25
#include <linux/page-flags.h>
26
#include <linux/backing-dev.h>
27 28
#include <linux/bit_spinlock.h>
#include <linux/rcupdate.h>
29 30 31
#include <linux/swap.h>
#include <linux/spinlock.h>
#include <linux/fs.h>
32
#include <linux/seq_file.h>
B
Balbir Singh 已提交
33

34 35
#include <asm/uaccess.h>

B
Balbir Singh 已提交
36
struct cgroup_subsys mem_cgroup_subsys;
37
static const int MEM_CGROUP_RECLAIM_RETRIES = 5;
B
Balbir Singh 已提交
38

39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79
/*
 * Statistics for memory cgroup.
 */
enum mem_cgroup_stat_index {
	/*
	 * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
	 */
	MEM_CGROUP_STAT_CACHE, 	   /* # of pages charged as cache */
	MEM_CGROUP_STAT_RSS,	   /* # of pages charged as rss */

	MEM_CGROUP_STAT_NSTATS,
};

struct mem_cgroup_stat_cpu {
	s64 count[MEM_CGROUP_STAT_NSTATS];
} ____cacheline_aligned_in_smp;

struct mem_cgroup_stat {
	struct mem_cgroup_stat_cpu cpustat[NR_CPUS];
};

/*
 * For accounting under irq disable, no need for increment preempt count.
 */
static void __mem_cgroup_stat_add_safe(struct mem_cgroup_stat *stat,
		enum mem_cgroup_stat_index idx, int val)
{
	int cpu = smp_processor_id();
	stat->cpustat[cpu].count[idx] += val;
}

static s64 mem_cgroup_read_stat(struct mem_cgroup_stat *stat,
		enum mem_cgroup_stat_index idx)
{
	int cpu;
	s64 ret = 0;
	for_each_possible_cpu(cpu)
		ret += stat->cpustat[cpu].count[idx];
	return ret;
}

80 81 82 83 84 85 86 87 88 89 90 91
/*
 * per-zone information in memory controller.
 */

enum mem_cgroup_zstat_index {
	MEM_CGROUP_ZSTAT_ACTIVE,
	MEM_CGROUP_ZSTAT_INACTIVE,

	NR_MEM_CGROUP_ZSTAT,
};

struct mem_cgroup_per_zone {
92 93 94 95
	/*
	 * spin_lock to protect the per cgroup LRU
	 */
	spinlock_t		lru_lock;
96 97
	struct list_head	active_list;
	struct list_head	inactive_list;
98 99 100 101 102 103 104 105 106 107 108 109 110
	unsigned long count[NR_MEM_CGROUP_ZSTAT];
};
/* Macro for accessing counter */
#define MEM_CGROUP_ZSTAT(mz, idx)	((mz)->count[(idx)])

struct mem_cgroup_per_node {
	struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
};

struct mem_cgroup_lru_info {
	struct mem_cgroup_per_node *nodeinfo[MAX_NUMNODES];
};

B
Balbir Singh 已提交
111 112 113 114 115 116 117
/*
 * The memory controller data structure. The memory controller controls both
 * page cache and RSS per cgroup. We would eventually like to provide
 * statistics based on the statistics developed by Rik Van Riel for clock-pro,
 * to help the administrator determine what knobs to tune.
 *
 * TODO: Add a water mark for the memory controller. Reclaim will begin when
118 119 120
 * we hit the water mark. May be even add a low water mark, such that
 * no reclaim occurs from a cgroup at it's low water mark, this is
 * a feature that will be implemented much later in the future.
B
Balbir Singh 已提交
121 122 123 124 125 126 127
 */
struct mem_cgroup {
	struct cgroup_subsys_state css;
	/*
	 * the counter to account for memory usage
	 */
	struct res_counter res;
128 129 130 131
	/*
	 * Per cgroup active and inactive list, similar to the
	 * per zone LRU lists.
	 */
132
	struct mem_cgroup_lru_info info;
133

134
	int	prev_priority;	/* for recording reclaim priority */
135 136 137 138
	/*
	 * statistics.
	 */
	struct mem_cgroup_stat stat;
B
Balbir Singh 已提交
139
};
140
static struct mem_cgroup init_mem_cgroup;
B
Balbir Singh 已提交
141

142 143
/*
 * We use the lower bit of the page->page_cgroup pointer as a bit spin
144 145 146 147
 * lock.  We need to ensure that page->page_cgroup is at least two
 * byte aligned (based on comments from Nick Piggin).  But since
 * bit_spin_lock doesn't actually set that lock bit in a non-debug
 * uniprocessor kernel, we should avoid setting it here too.
148 149
 */
#define PAGE_CGROUP_LOCK_BIT 	0x0
150 151 152 153 154
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
#define PAGE_CGROUP_LOCK 	(1 << PAGE_CGROUP_LOCK_BIT)
#else
#define PAGE_CGROUP_LOCK	0x0
#endif
155

B
Balbir Singh 已提交
156 157 158 159 160 161 162 163
/*
 * A page_cgroup page is associated with every page descriptor. The
 * page_cgroup helps us identify information about the cgroup
 */
struct page_cgroup {
	struct list_head lru;		/* per cgroup LRU list */
	struct page *page;
	struct mem_cgroup *mem_cgroup;
164
	int ref_cnt;			/* cached, mapped, migrating */
165
	int flags;
B
Balbir Singh 已提交
166
};
167
#define PAGE_CGROUP_FLAG_CACHE	(0x1)	/* charged as cache */
168
#define PAGE_CGROUP_FLAG_ACTIVE (0x2)	/* page is active in this cgroup */
B
Balbir Singh 已提交
169

170
static int page_cgroup_nid(struct page_cgroup *pc)
171 172 173 174
{
	return page_to_nid(pc->page);
}

175
static enum zone_type page_cgroup_zid(struct page_cgroup *pc)
176 177 178 179
{
	return page_zonenum(pc->page);
}

180 181 182 183 184
enum charge_type {
	MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
	MEM_CGROUP_CHARGE_TYPE_MAPPED,
};

185 186 187 188 189 190 191 192 193
/*
 * Always modified under lru lock. Then, not necessary to preempt_disable()
 */
static void mem_cgroup_charge_statistics(struct mem_cgroup *mem, int flags,
					bool charge)
{
	int val = (charge)? 1 : -1;
	struct mem_cgroup_stat *stat = &mem->stat;

194
	VM_BUG_ON(!irqs_disabled());
195
	if (flags & PAGE_CGROUP_FLAG_CACHE)
196
		__mem_cgroup_stat_add_safe(stat, MEM_CGROUP_STAT_CACHE, val);
197 198
	else
		__mem_cgroup_stat_add_safe(stat, MEM_CGROUP_STAT_RSS, val);
199 200
}

201
static struct mem_cgroup_per_zone *
202 203 204 205 206
mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid)
{
	return &mem->info.nodeinfo[nid]->zoneinfo[zid];
}

207
static struct mem_cgroup_per_zone *
208 209 210 211 212
page_cgroup_zoneinfo(struct page_cgroup *pc)
{
	struct mem_cgroup *mem = pc->mem_cgroup;
	int nid = page_cgroup_nid(pc);
	int zid = page_cgroup_zid(pc);
213

214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229
	return mem_cgroup_zoneinfo(mem, nid, zid);
}

static unsigned long mem_cgroup_get_all_zonestat(struct mem_cgroup *mem,
					enum mem_cgroup_zstat_index idx)
{
	int nid, zid;
	struct mem_cgroup_per_zone *mz;
	u64 total = 0;

	for_each_online_node(nid)
		for (zid = 0; zid < MAX_NR_ZONES; zid++) {
			mz = mem_cgroup_zoneinfo(mem, nid, zid);
			total += MEM_CGROUP_ZSTAT(mz, idx);
		}
	return total;
230 231
}

232
static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
B
Balbir Singh 已提交
233 234 235 236 237 238
{
	return container_of(cgroup_subsys_state(cont,
				mem_cgroup_subsys_id), struct mem_cgroup,
				css);
}

239
static struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258
{
	return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
				struct mem_cgroup, css);
}

void mm_init_cgroup(struct mm_struct *mm, struct task_struct *p)
{
	struct mem_cgroup *mem;

	mem = mem_cgroup_from_task(p);
	css_get(&mem->css);
	mm->mem_cgroup = mem;
}

void mm_free_cgroup(struct mm_struct *mm)
{
	css_put(&mm->mem_cgroup->css);
}

259 260
static inline int page_cgroup_locked(struct page *page)
{
261
	return bit_spin_is_locked(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
262 263
}

264
static void page_assign_page_cgroup(struct page *page, struct page_cgroup *pc)
265
{
266 267
	VM_BUG_ON(!page_cgroup_locked(page));
	page->page_cgroup = ((unsigned long)pc | PAGE_CGROUP_LOCK);
268 269 270 271
}

struct page_cgroup *page_get_page_cgroup(struct page *page)
{
272
	return (struct page_cgroup *) (page->page_cgroup & ~PAGE_CGROUP_LOCK);
273 274
}

275
static void lock_page_cgroup(struct page *page)
276 277 278 279
{
	bit_spin_lock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
}

280 281 282 283 284
static int try_lock_page_cgroup(struct page *page)
{
	return bit_spin_trylock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
}

285
static void unlock_page_cgroup(struct page *page)
286 287 288 289
{
	bit_spin_unlock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
}

290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310
static void __mem_cgroup_remove_list(struct page_cgroup *pc)
{
	int from = pc->flags & PAGE_CGROUP_FLAG_ACTIVE;
	struct mem_cgroup_per_zone *mz = page_cgroup_zoneinfo(pc);

	if (from)
		MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) -= 1;
	else
		MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) -= 1;

	mem_cgroup_charge_statistics(pc->mem_cgroup, pc->flags, false);
	list_del_init(&pc->lru);
}

static void __mem_cgroup_add_list(struct page_cgroup *pc)
{
	int to = pc->flags & PAGE_CGROUP_FLAG_ACTIVE;
	struct mem_cgroup_per_zone *mz = page_cgroup_zoneinfo(pc);

	if (!to) {
		MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) += 1;
311
		list_add(&pc->lru, &mz->inactive_list);
312 313
	} else {
		MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) += 1;
314
		list_add(&pc->lru, &mz->active_list);
315 316 317 318
	}
	mem_cgroup_charge_statistics(pc->mem_cgroup, pc->flags, true);
}

319
static void __mem_cgroup_move_lists(struct page_cgroup *pc, bool active)
320
{
321 322 323 324 325 326 327 328
	int from = pc->flags & PAGE_CGROUP_FLAG_ACTIVE;
	struct mem_cgroup_per_zone *mz = page_cgroup_zoneinfo(pc);

	if (from)
		MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) -= 1;
	else
		MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) -= 1;

329
	if (active) {
330
		MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) += 1;
331
		pc->flags |= PAGE_CGROUP_FLAG_ACTIVE;
332
		list_move(&pc->lru, &mz->active_list);
333
	} else {
334
		MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) += 1;
335
		pc->flags &= ~PAGE_CGROUP_FLAG_ACTIVE;
336
		list_move(&pc->lru, &mz->inactive_list);
337
	}
338 339
}

340 341 342 343 344
int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
{
	int ret;

	task_lock(task);
345
	ret = task->mm && mm_match_cgroup(task->mm, mem);
346 347 348 349
	task_unlock(task);
	return ret;
}

350 351 352
/*
 * This routine assumes that the appropriate zone's lru lock is already held
 */
353
void mem_cgroup_move_lists(struct page *page, bool active)
354
{
355
	struct page_cgroup *pc;
356 357 358
	struct mem_cgroup_per_zone *mz;
	unsigned long flags;

359 360 361 362 363 364 365 366
	/*
	 * We cannot lock_page_cgroup while holding zone's lru_lock,
	 * because other holders of lock_page_cgroup can be interrupted
	 * with an attempt to rotate_reclaimable_page.  But we cannot
	 * safely get to page_cgroup without it, so just try_lock it:
	 * mem_cgroup_isolate_pages allows for page left on wrong list.
	 */
	if (!try_lock_page_cgroup(page))
367 368
		return;

369 370 371 372
	pc = page_get_page_cgroup(page);
	if (pc) {
		mz = page_cgroup_zoneinfo(pc);
		spin_lock_irqsave(&mz->lru_lock, flags);
373
		__mem_cgroup_move_lists(pc, active);
374
		spin_unlock_irqrestore(&mz->lru_lock, flags);
375 376
	}
	unlock_page_cgroup(page);
377 378
}

379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394
/*
 * Calculate mapped_ratio under memory controller. This will be used in
 * vmscan.c for deteremining we have to reclaim mapped pages.
 */
int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem)
{
	long total, rss;

	/*
	 * usage is recorded in bytes. But, here, we assume the number of
	 * physical pages can be represented by "long" on any arch.
	 */
	total = (long) (mem->res.usage >> PAGE_SHIFT) + 1L;
	rss = (long)mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS);
	return (int)((rss * 100L) / total);
}
395

396 397 398 399 400 401 402 403 404 405 406 407 408 409
/*
 * This function is called from vmscan.c. In page reclaiming loop. balance
 * between active and inactive list is calculated. For memory controller
 * page reclaiming, we should use using mem_cgroup's imbalance rather than
 * zone's global lru imbalance.
 */
long mem_cgroup_reclaim_imbalance(struct mem_cgroup *mem)
{
	unsigned long active, inactive;
	/* active and inactive are the number of pages. 'long' is ok.*/
	active = mem_cgroup_get_all_zonestat(mem, MEM_CGROUP_ZSTAT_ACTIVE);
	inactive = mem_cgroup_get_all_zonestat(mem, MEM_CGROUP_ZSTAT_INACTIVE);
	return (long) (active / (inactive + 1));
}
410

411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429
/*
 * prev_priority control...this will be used in memory reclaim path.
 */
int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem)
{
	return mem->prev_priority;
}

void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem, int priority)
{
	if (priority < mem->prev_priority)
		mem->prev_priority = priority;
}

void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, int priority)
{
	mem->prev_priority = priority;
}

430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461
/*
 * Calculate # of pages to be scanned in this priority/zone.
 * See also vmscan.c
 *
 * priority starts from "DEF_PRIORITY" and decremented in each loop.
 * (see include/linux/mmzone.h)
 */

long mem_cgroup_calc_reclaim_active(struct mem_cgroup *mem,
				   struct zone *zone, int priority)
{
	long nr_active;
	int nid = zone->zone_pgdat->node_id;
	int zid = zone_idx(zone);
	struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(mem, nid, zid);

	nr_active = MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE);
	return (nr_active >> priority);
}

long mem_cgroup_calc_reclaim_inactive(struct mem_cgroup *mem,
					struct zone *zone, int priority)
{
	long nr_inactive;
	int nid = zone->zone_pgdat->node_id;
	int zid = zone_idx(zone);
	struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(mem, nid, zid);

	nr_inactive = MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE);
	return (nr_inactive >> priority);
}

462 463 464 465 466 467 468 469 470 471 472 473
unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
					struct list_head *dst,
					unsigned long *scanned, int order,
					int mode, struct zone *z,
					struct mem_cgroup *mem_cont,
					int active)
{
	unsigned long nr_taken = 0;
	struct page *page;
	unsigned long scan;
	LIST_HEAD(pc_list);
	struct list_head *src;
474
	struct page_cgroup *pc, *tmp;
475 476 477
	int nid = z->zone_pgdat->node_id;
	int zid = zone_idx(z);
	struct mem_cgroup_per_zone *mz;
478

479
	mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
480
	if (active)
481
		src = &mz->active_list;
482
	else
483 484
		src = &mz->inactive_list;

485

486
	spin_lock(&mz->lru_lock);
487 488
	scan = 0;
	list_for_each_entry_safe_reverse(pc, tmp, src, lru) {
H
Hugh Dickins 已提交
489
		if (scan >= nr_to_scan)
490
			break;
491 492
		page = pc->page;

H
Hugh Dickins 已提交
493
		if (unlikely(!PageLRU(page)))
494 495
			continue;

496 497 498 499 500 501 502 503 504
		if (PageActive(page) && !active) {
			__mem_cgroup_move_lists(pc, true);
			continue;
		}
		if (!PageActive(page) && active) {
			__mem_cgroup_move_lists(pc, false);
			continue;
		}

H
Hugh Dickins 已提交
505 506
		scan++;
		list_move(&pc->lru, &pc_list);
507 508 509 510 511 512 513 514

		if (__isolate_lru_page(page, mode) == 0) {
			list_move(&page->lru, dst);
			nr_taken++;
		}
	}

	list_splice(&pc_list, src);
515
	spin_unlock(&mz->lru_lock);
516 517 518 519 520

	*scanned = scan;
	return nr_taken;
}

521 522 523 524 525 526
/*
 * Charge the memory controller for page usage.
 * Return
 * 0 if the charge was successful
 * < 0 if the cgroup is over its limit
 */
527 528
static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
				gfp_t gfp_mask, enum charge_type ctype)
529 530
{
	struct mem_cgroup *mem;
531
	struct page_cgroup *pc;
532 533
	unsigned long flags;
	unsigned long nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
534
	struct mem_cgroup_per_zone *mz;
535

536 537 538
	if (mem_cgroup_subsys.disabled)
		return 0;

539 540 541 542 543 544 545
	/*
	 * Should page_cgroup's go to their own slab?
	 * One could optimize the performance of the charging routine
	 * by saving a bit in the page_flags and using it as a lock
	 * to see if the cgroup page already has a page_cgroup associated
	 * with it
	 */
546
retry:
547 548 549 550 551 552 553
	lock_page_cgroup(page);
	pc = page_get_page_cgroup(page);
	/*
	 * The page_cgroup exists and
	 * the page has already been accounted.
	 */
	if (pc) {
554 555 556 557 558 559
		VM_BUG_ON(pc->page != page);
		VM_BUG_ON(pc->ref_cnt <= 0);

		pc->ref_cnt++;
		unlock_page_cgroup(page);
		goto done;
560
	}
561
	unlock_page_cgroup(page);
562

563
	pc = kzalloc(sizeof(struct page_cgroup), gfp_mask);
564 565 566 567
	if (pc == NULL)
		goto err;

	/*
568 569
	 * We always charge the cgroup the mm_struct belongs to.
	 * The mm_struct's mem_cgroup changes on task migration if the
570 571 572 573 574 575
	 * thread group leader migrates. It's possible that mm is not
	 * set, if so charge the init_mm (happens for pagecache usage).
	 */
	if (!mm)
		mm = &init_mm;

576
	rcu_read_lock();
577 578
	mem = rcu_dereference(mm->mem_cgroup);
	/*
579
	 * For every charge from the cgroup, increment reference count
580 581 582 583
	 */
	css_get(&mem->css);
	rcu_read_unlock();

584
	while (res_counter_charge(&mem->res, PAGE_SIZE)) {
585 586
		if (!(gfp_mask & __GFP_WAIT))
			goto out;
587 588

		if (try_to_free_mem_cgroup_pages(mem, gfp_mask))
589 590 591
			continue;

		/*
592 593 594 595 596 597
		 * try_to_free_mem_cgroup_pages() might not give us a full
		 * picture of reclaim. Some pages are reclaimed and might be
		 * moved to swap cache or just unmapped from the cgroup.
		 * Check the limit again to see if the reclaim reduced the
		 * current usage of the cgroup before giving up
		 */
598 599
		if (res_counter_check_under_limit(&mem->res))
			continue;
600 601 602 603

		if (!nr_retries--) {
			mem_cgroup_out_of_memory(mem, gfp_mask);
			goto out;
604
		}
605
		congestion_wait(WRITE, HZ/10);
606 607
	}

608
	pc->ref_cnt = 1;
609 610
	pc->mem_cgroup = mem;
	pc->page = page;
611
	pc->flags = PAGE_CGROUP_FLAG_ACTIVE;
612 613
	if (ctype == MEM_CGROUP_CHARGE_TYPE_CACHE)
		pc->flags |= PAGE_CGROUP_FLAG_CACHE;
614

615 616 617
	lock_page_cgroup(page);
	if (page_get_page_cgroup(page)) {
		unlock_page_cgroup(page);
618
		/*
619 620
		 * Another charge has been added to this page already.
		 * We take lock_page_cgroup(page) again and read
621 622 623 624 625 626 627
		 * page->cgroup, increment refcnt.... just retry is OK.
		 */
		res_counter_uncharge(&mem->res, PAGE_SIZE);
		css_put(&mem->css);
		kfree(pc);
		goto retry;
	}
628
	page_assign_page_cgroup(page, pc);
629

630 631
	mz = page_cgroup_zoneinfo(pc);
	spin_lock_irqsave(&mz->lru_lock, flags);
632
	__mem_cgroup_add_list(pc);
633
	spin_unlock_irqrestore(&mz->lru_lock, flags);
634

H
Hugh Dickins 已提交
635
	unlock_page_cgroup(page);
636 637
done:
	return 0;
638 639
out:
	css_put(&mem->css);
640 641 642 643 644
	kfree(pc);
err:
	return -ENOMEM;
}

645
int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask)
646 647
{
	return mem_cgroup_charge_common(page, mm, gfp_mask,
648
				MEM_CGROUP_CHARGE_TYPE_MAPPED);
649 650
}

651 652
int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
				gfp_t gfp_mask)
653 654 655
{
	if (!mm)
		mm = &init_mm;
656
	return mem_cgroup_charge_common(page, mm, gfp_mask,
657
				MEM_CGROUP_CHARGE_TYPE_CACHE);
658 659
}

660 661
/*
 * Uncharging is always a welcome operation, we never complain, simply
H
Hugh Dickins 已提交
662
 * uncharge.
663
 */
H
Hugh Dickins 已提交
664
void mem_cgroup_uncharge_page(struct page *page)
665
{
H
Hugh Dickins 已提交
666
	struct page_cgroup *pc;
667
	struct mem_cgroup *mem;
668
	struct mem_cgroup_per_zone *mz;
669
	unsigned long flags;
670

671 672 673
	if (mem_cgroup_subsys.disabled)
		return;

674
	/*
675
	 * Check if our page_cgroup is valid
676
	 */
H
Hugh Dickins 已提交
677 678
	lock_page_cgroup(page);
	pc = page_get_page_cgroup(page);
679
	if (!pc)
H
Hugh Dickins 已提交
680
		goto unlock;
681

682 683 684 685 686 687 688 689 690
	VM_BUG_ON(pc->page != page);
	VM_BUG_ON(pc->ref_cnt <= 0);

	if (--(pc->ref_cnt) == 0) {
		mz = page_cgroup_zoneinfo(pc);
		spin_lock_irqsave(&mz->lru_lock, flags);
		__mem_cgroup_remove_list(pc);
		spin_unlock_irqrestore(&mz->lru_lock, flags);

H
Hugh Dickins 已提交
691 692 693
		page_assign_page_cgroup(page, NULL);
		unlock_page_cgroup(page);

H
Hugh Dickins 已提交
694 695 696 697
		mem = pc->mem_cgroup;
		res_counter_uncharge(&mem->res, PAGE_SIZE);
		css_put(&mem->css);

698 699
		kfree(pc);
		return;
700
	}
701

H
Hugh Dickins 已提交
702
unlock:
703 704 705
	unlock_page_cgroup(page);
}

706 707 708 709 710 711 712
/*
 * Returns non-zero if a page (under migration) has valid page_cgroup member.
 * Refcnt of page_cgroup is incremented.
 */
int mem_cgroup_prepare_migration(struct page *page)
{
	struct page_cgroup *pc;
713

714 715 716
	if (mem_cgroup_subsys.disabled)
		return 0;

717 718
	lock_page_cgroup(page);
	pc = page_get_page_cgroup(page);
719 720
	if (pc)
		pc->ref_cnt++;
721
	unlock_page_cgroup(page);
722
	return pc != NULL;
723 724 725 726
}

void mem_cgroup_end_migration(struct page *page)
{
H
Hugh Dickins 已提交
727
	mem_cgroup_uncharge_page(page);
728
}
729

730
/*
731
 * We know both *page* and *newpage* are now not-on-LRU and PG_locked.
732 733 734 735 736 737
 * And no race with uncharge() routines because page_cgroup for *page*
 * has extra one reference by mem_cgroup_prepare_migration.
 */
void mem_cgroup_page_migration(struct page *page, struct page *newpage)
{
	struct page_cgroup *pc;
738
	struct mem_cgroup_per_zone *mz;
739
	unsigned long flags;
740

741
	lock_page_cgroup(page);
742
	pc = page_get_page_cgroup(page);
743 744
	if (!pc) {
		unlock_page_cgroup(page);
745
		return;
746
	}
747

748
	mz = page_cgroup_zoneinfo(pc);
749
	spin_lock_irqsave(&mz->lru_lock, flags);
750
	__mem_cgroup_remove_list(pc);
751 752
	spin_unlock_irqrestore(&mz->lru_lock, flags);

H
Hugh Dickins 已提交
753 754 755
	page_assign_page_cgroup(page, NULL);
	unlock_page_cgroup(page);

756 757 758
	pc->page = newpage;
	lock_page_cgroup(newpage);
	page_assign_page_cgroup(newpage, pc);
759

760 761 762 763
	mz = page_cgroup_zoneinfo(pc);
	spin_lock_irqsave(&mz->lru_lock, flags);
	__mem_cgroup_add_list(pc);
	spin_unlock_irqrestore(&mz->lru_lock, flags);
H
Hugh Dickins 已提交
764 765

	unlock_page_cgroup(newpage);
766
}
767

768 769 770 771 772 773
/*
 * This routine traverse page_cgroup in given list and drop them all.
 * This routine ignores page_cgroup->ref_cnt.
 * *And* this routine doesn't reclaim page itself, just removes page_cgroup.
 */
#define FORCE_UNCHARGE_BATCH	(128)
774
static void mem_cgroup_force_empty_list(struct mem_cgroup *mem,
775 776
			    struct mem_cgroup_per_zone *mz,
			    int active)
777 778 779
{
	struct page_cgroup *pc;
	struct page *page;
780
	int count = FORCE_UNCHARGE_BATCH;
781
	unsigned long flags;
782 783 784 785 786 787
	struct list_head *list;

	if (active)
		list = &mz->active_list;
	else
		list = &mz->inactive_list;
788

789
	spin_lock_irqsave(&mz->lru_lock, flags);
790
	while (!list_empty(list)) {
791 792
		pc = list_entry(list->prev, struct page_cgroup, lru);
		page = pc->page;
793 794 795 796 797 798 799
		get_page(page);
		spin_unlock_irqrestore(&mz->lru_lock, flags);
		mem_cgroup_uncharge_page(page);
		put_page(page);
		if (--count <= 0) {
			count = FORCE_UNCHARGE_BATCH;
			cond_resched();
800
		}
801
		spin_lock_irqsave(&mz->lru_lock, flags);
802
	}
803
	spin_unlock_irqrestore(&mz->lru_lock, flags);
804 805 806 807 808 809
}

/*
 * make mem_cgroup's charge to be 0 if there is no task.
 * This enables deleting this mem_cgroup.
 */
810
static int mem_cgroup_force_empty(struct mem_cgroup *mem)
811 812
{
	int ret = -EBUSY;
813
	int node, zid;
814

815 816 817
	if (mem_cgroup_subsys.disabled)
		return 0;

818 819 820
	css_get(&mem->css);
	/*
	 * page reclaim code (kswapd etc..) will move pages between
821
	 * active_list <-> inactive_list while we don't take a lock.
822 823
	 * So, we have to do loop here until all lists are empty.
	 */
824
	while (mem->res.usage > 0) {
825 826
		if (atomic_read(&mem->css.cgroup->count) > 0)
			goto out;
827 828 829 830 831
		for_each_node_state(node, N_POSSIBLE)
			for (zid = 0; zid < MAX_NR_ZONES; zid++) {
				struct mem_cgroup_per_zone *mz;
				mz = mem_cgroup_zoneinfo(mem, node, zid);
				/* drop all page_cgroup in active_list */
832
				mem_cgroup_force_empty_list(mem, mz, 1);
833
				/* drop all page_cgroup in inactive_list */
834
				mem_cgroup_force_empty_list(mem, mz, 0);
835
			}
836 837 838 839 840 841 842
	}
	ret = 0;
out:
	css_put(&mem->css);
	return ret;
}

843
static int mem_cgroup_write_strategy(char *buf, unsigned long long *tmp)
844 845 846 847 848 849 850 851 852 853 854 855
{
	*tmp = memparse(buf, &buf);
	if (*buf != '\0')
		return -EINVAL;

	/*
	 * Round up the value to the closest page size
	 */
	*tmp = ((*tmp + PAGE_SIZE - 1) >> PAGE_SHIFT) << PAGE_SHIFT;
	return 0;
}

856
static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft)
B
Balbir Singh 已提交
857
{
858 859
	return res_counter_read_u64(&mem_cgroup_from_cont(cont)->res,
				    cft->private);
B
Balbir Singh 已提交
860 861 862 863 864 865 866
}

static ssize_t mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
				struct file *file, const char __user *userbuf,
				size_t nbytes, loff_t *ppos)
{
	return res_counter_write(&mem_cgroup_from_cont(cont)->res,
867 868
				cft->private, userbuf, nbytes, ppos,
				mem_cgroup_write_strategy);
B
Balbir Singh 已提交
869 870
}

871 872 873 874 875 876
static ssize_t mem_force_empty_write(struct cgroup *cont,
				struct cftype *cft, struct file *file,
				const char __user *userbuf,
				size_t nbytes, loff_t *ppos)
{
	struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
877
	int ret = mem_cgroup_force_empty(mem);
878 879 880 881 882
	if (!ret)
		ret = nbytes;
	return ret;
}

883 884 885 886 887 888 889 890
static const struct mem_cgroup_stat_desc {
	const char *msg;
	u64 unit;
} mem_cgroup_stat_desc[] = {
	[MEM_CGROUP_STAT_CACHE] = { "cache", PAGE_SIZE, },
	[MEM_CGROUP_STAT_RSS] = { "rss", PAGE_SIZE, },
};

891 892
static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
				 struct cgroup_map_cb *cb)
893 894 895 896 897 898 899 900 901 902
{
	struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont);
	struct mem_cgroup_stat *stat = &mem_cont->stat;
	int i;

	for (i = 0; i < ARRAY_SIZE(stat->cpustat[0].count); i++) {
		s64 val;

		val = mem_cgroup_read_stat(stat, i);
		val *= mem_cgroup_stat_desc[i].unit;
903
		cb->fill(cb, mem_cgroup_stat_desc[i].msg, val);
904
	}
905 906 907 908 909 910 911 912
	/* showing # of active pages */
	{
		unsigned long active, inactive;

		inactive = mem_cgroup_get_all_zonestat(mem_cont,
						MEM_CGROUP_ZSTAT_INACTIVE);
		active = mem_cgroup_get_all_zonestat(mem_cont,
						MEM_CGROUP_ZSTAT_ACTIVE);
913 914
		cb->fill(cb, "active", (active) * PAGE_SIZE);
		cb->fill(cb, "inactive", (inactive) * PAGE_SIZE);
915
	}
916 917 918
	return 0;
}

B
Balbir Singh 已提交
919 920
static struct cftype mem_cgroup_files[] = {
	{
921
		.name = "usage_in_bytes",
B
Balbir Singh 已提交
922
		.private = RES_USAGE,
923
		.read_u64 = mem_cgroup_read,
B
Balbir Singh 已提交
924 925
	},
	{
926
		.name = "limit_in_bytes",
B
Balbir Singh 已提交
927 928
		.private = RES_LIMIT,
		.write = mem_cgroup_write,
929
		.read_u64 = mem_cgroup_read,
B
Balbir Singh 已提交
930 931 932 933
	},
	{
		.name = "failcnt",
		.private = RES_FAILCNT,
934
		.read_u64 = mem_cgroup_read,
B
Balbir Singh 已提交
935
	},
936 937 938 939
	{
		.name = "force_empty",
		.write = mem_force_empty_write,
	},
940 941
	{
		.name = "stat",
942
		.read_map = mem_control_stat_show,
943
	},
B
Balbir Singh 已提交
944 945
};

946 947 948
static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
{
	struct mem_cgroup_per_node *pn;
949
	struct mem_cgroup_per_zone *mz;
950
	int zone, tmp = node;
951 952 953 954 955 956 957 958
	/*
	 * This routine is called against possible nodes.
	 * But it's BUG to call kmalloc() against offline node.
	 *
	 * TODO: this routine can waste much memory for nodes which will
	 *       never be onlined. It's better to use memory hotplug callback
	 *       function.
	 */
959 960 961
	if (!node_state(node, N_NORMAL_MEMORY))
		tmp = -1;
	pn = kmalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
962 963
	if (!pn)
		return 1;
964

965 966
	mem->info.nodeinfo[node] = pn;
	memset(pn, 0, sizeof(*pn));
967 968 969 970 971

	for (zone = 0; zone < MAX_NR_ZONES; zone++) {
		mz = &pn->zoneinfo[zone];
		INIT_LIST_HEAD(&mz->active_list);
		INIT_LIST_HEAD(&mz->inactive_list);
972
		spin_lock_init(&mz->lru_lock);
973
	}
974 975 976
	return 0;
}

977 978 979 980 981
static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
{
	kfree(mem->info.nodeinfo[node]);
}

B
Balbir Singh 已提交
982 983 984 985
static struct cgroup_subsys_state *
mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
{
	struct mem_cgroup *mem;
986
	int node;
B
Balbir Singh 已提交
987

988 989 990 991 992 993 994
	if (unlikely((cont->parent) == NULL)) {
		mem = &init_mem_cgroup;
		init_mm.mem_cgroup = mem;
	} else
		mem = kzalloc(sizeof(struct mem_cgroup), GFP_KERNEL);

	if (mem == NULL)
995
		return ERR_PTR(-ENOMEM);
B
Balbir Singh 已提交
996 997

	res_counter_init(&mem->res);
998

999 1000 1001 1002 1003 1004
	memset(&mem->info, 0, sizeof(mem->info));

	for_each_node_state(node, N_POSSIBLE)
		if (alloc_mem_cgroup_per_zone_info(mem, node))
			goto free_out;

B
Balbir Singh 已提交
1005
	return &mem->css;
1006 1007
free_out:
	for_each_node_state(node, N_POSSIBLE)
1008
		free_mem_cgroup_per_zone_info(mem, node);
1009 1010
	if (cont->parent != NULL)
		kfree(mem);
1011
	return ERR_PTR(-ENOMEM);
B
Balbir Singh 已提交
1012 1013
}

1014 1015 1016 1017 1018 1019 1020
static void mem_cgroup_pre_destroy(struct cgroup_subsys *ss,
					struct cgroup *cont)
{
	struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
	mem_cgroup_force_empty(mem);
}

B
Balbir Singh 已提交
1021 1022 1023
static void mem_cgroup_destroy(struct cgroup_subsys *ss,
				struct cgroup *cont)
{
1024 1025 1026 1027
	int node;
	struct mem_cgroup *mem = mem_cgroup_from_cont(cont);

	for_each_node_state(node, N_POSSIBLE)
1028
		free_mem_cgroup_per_zone_info(mem, node);
1029

B
Balbir Singh 已提交
1030 1031 1032 1033 1034 1035
	kfree(mem_cgroup_from_cont(cont));
}

static int mem_cgroup_populate(struct cgroup_subsys *ss,
				struct cgroup *cont)
{
1036 1037
	if (mem_cgroup_subsys.disabled)
		return 0;
B
Balbir Singh 已提交
1038 1039 1040 1041
	return cgroup_add_files(cont, ss, mem_cgroup_files,
					ARRAY_SIZE(mem_cgroup_files));
}

B
Balbir Singh 已提交
1042 1043 1044 1045 1046 1047 1048 1049
static void mem_cgroup_move_task(struct cgroup_subsys *ss,
				struct cgroup *cont,
				struct cgroup *old_cont,
				struct task_struct *p)
{
	struct mm_struct *mm;
	struct mem_cgroup *mem, *old_mem;

1050 1051 1052
	if (mem_cgroup_subsys.disabled)
		return;

B
Balbir Singh 已提交
1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066
	mm = get_task_mm(p);
	if (mm == NULL)
		return;

	mem = mem_cgroup_from_cont(cont);
	old_mem = mem_cgroup_from_cont(old_cont);

	if (mem == old_mem)
		goto out;

	/*
	 * Only thread group leaders are allowed to migrate, the mm_struct is
	 * in effect owned by the leader
	 */
1067
	if (!thread_group_leader(p))
B
Balbir Singh 已提交
1068 1069 1070 1071 1072 1073 1074 1075 1076 1077
		goto out;

	css_get(&mem->css);
	rcu_assign_pointer(mm->mem_cgroup, mem);
	css_put(&old_mem->css);

out:
	mmput(mm);
}

B
Balbir Singh 已提交
1078 1079 1080 1081
struct cgroup_subsys mem_cgroup_subsys = {
	.name = "memory",
	.subsys_id = mem_cgroup_subsys_id,
	.create = mem_cgroup_create,
1082
	.pre_destroy = mem_cgroup_pre_destroy,
B
Balbir Singh 已提交
1083 1084
	.destroy = mem_cgroup_destroy,
	.populate = mem_cgroup_populate,
B
Balbir Singh 已提交
1085
	.attach = mem_cgroup_move_task,
1086
	.early_init = 0,
B
Balbir Singh 已提交
1087
};