memcontrol.c 26.8 KB
Newer Older
B
Balbir Singh 已提交
1 2 3 4 5
/* memcontrol.c - Memory Controller
 *
 * Copyright IBM Corporation, 2007
 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
 *
6 7 8
 * Copyright 2007 OpenVZ SWsoft Inc
 * Author: Pavel Emelianov <xemul@openvz.org>
 *
B
Balbir Singh 已提交
9 10 11 12 13 14 15 16 17 18 19 20 21 22
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 */

#include <linux/res_counter.h>
#include <linux/memcontrol.h>
#include <linux/cgroup.h>
23
#include <linux/mm.h>
24
#include <linux/smp.h>
25
#include <linux/page-flags.h>
26
#include <linux/backing-dev.h>
27 28
#include <linux/bit_spinlock.h>
#include <linux/rcupdate.h>
29
#include <linux/slab.h>
30 31 32
#include <linux/swap.h>
#include <linux/spinlock.h>
#include <linux/fs.h>
33
#include <linux/seq_file.h>
B
Balbir Singh 已提交
34

35 36
#include <asm/uaccess.h>

B
Balbir Singh 已提交
37
struct cgroup_subsys mem_cgroup_subsys;
38
static const int MEM_CGROUP_RECLAIM_RETRIES = 5;
39
static struct kmem_cache *page_cgroup_cache;
B
Balbir Singh 已提交
40

41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81
/*
 * Statistics for memory cgroup.
 */
enum mem_cgroup_stat_index {
	/*
	 * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
	 */
	MEM_CGROUP_STAT_CACHE, 	   /* # of pages charged as cache */
	MEM_CGROUP_STAT_RSS,	   /* # of pages charged as rss */

	MEM_CGROUP_STAT_NSTATS,
};

struct mem_cgroup_stat_cpu {
	s64 count[MEM_CGROUP_STAT_NSTATS];
} ____cacheline_aligned_in_smp;

struct mem_cgroup_stat {
	struct mem_cgroup_stat_cpu cpustat[NR_CPUS];
};

/*
 * For accounting under irq disable, no need for increment preempt count.
 */
static void __mem_cgroup_stat_add_safe(struct mem_cgroup_stat *stat,
		enum mem_cgroup_stat_index idx, int val)
{
	int cpu = smp_processor_id();
	stat->cpustat[cpu].count[idx] += val;
}

static s64 mem_cgroup_read_stat(struct mem_cgroup_stat *stat,
		enum mem_cgroup_stat_index idx)
{
	int cpu;
	s64 ret = 0;
	for_each_possible_cpu(cpu)
		ret += stat->cpustat[cpu].count[idx];
	return ret;
}

82 83 84 85 86 87 88 89 90 91 92 93
/*
 * per-zone information in memory controller.
 */

enum mem_cgroup_zstat_index {
	MEM_CGROUP_ZSTAT_ACTIVE,
	MEM_CGROUP_ZSTAT_INACTIVE,

	NR_MEM_CGROUP_ZSTAT,
};

struct mem_cgroup_per_zone {
94 95 96 97
	/*
	 * spin_lock to protect the per cgroup LRU
	 */
	spinlock_t		lru_lock;
98 99
	struct list_head	active_list;
	struct list_head	inactive_list;
100 101 102 103 104 105 106 107 108 109 110 111 112
	unsigned long count[NR_MEM_CGROUP_ZSTAT];
};
/* Macro for accessing counter */
#define MEM_CGROUP_ZSTAT(mz, idx)	((mz)->count[(idx)])

struct mem_cgroup_per_node {
	struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
};

struct mem_cgroup_lru_info {
	struct mem_cgroup_per_node *nodeinfo[MAX_NUMNODES];
};

B
Balbir Singh 已提交
113 114 115 116 117 118 119
/*
 * The memory controller data structure. The memory controller controls both
 * page cache and RSS per cgroup. We would eventually like to provide
 * statistics based on the statistics developed by Rik Van Riel for clock-pro,
 * to help the administrator determine what knobs to tune.
 *
 * TODO: Add a water mark for the memory controller. Reclaim will begin when
120 121 122
 * we hit the water mark. May be even add a low water mark, such that
 * no reclaim occurs from a cgroup at it's low water mark, this is
 * a feature that will be implemented much later in the future.
B
Balbir Singh 已提交
123 124 125 126 127 128 129
 */
struct mem_cgroup {
	struct cgroup_subsys_state css;
	/*
	 * the counter to account for memory usage
	 */
	struct res_counter res;
130 131 132 133
	/*
	 * Per cgroup active and inactive list, similar to the
	 * per zone LRU lists.
	 */
134
	struct mem_cgroup_lru_info info;
135

136
	int	prev_priority;	/* for recording reclaim priority */
137 138 139 140
	/*
	 * statistics.
	 */
	struct mem_cgroup_stat stat;
B
Balbir Singh 已提交
141
};
142
static struct mem_cgroup init_mem_cgroup;
B
Balbir Singh 已提交
143

144 145
/*
 * We use the lower bit of the page->page_cgroup pointer as a bit spin
146 147 148 149
 * lock.  We need to ensure that page->page_cgroup is at least two
 * byte aligned (based on comments from Nick Piggin).  But since
 * bit_spin_lock doesn't actually set that lock bit in a non-debug
 * uniprocessor kernel, we should avoid setting it here too.
150 151
 */
#define PAGE_CGROUP_LOCK_BIT 	0x0
152 153 154 155 156
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
#define PAGE_CGROUP_LOCK 	(1 << PAGE_CGROUP_LOCK_BIT)
#else
#define PAGE_CGROUP_LOCK	0x0
#endif
157

B
Balbir Singh 已提交
158 159 160 161 162 163 164 165
/*
 * A page_cgroup page is associated with every page descriptor. The
 * page_cgroup helps us identify information about the cgroup
 */
struct page_cgroup {
	struct list_head lru;		/* per cgroup LRU list */
	struct page *page;
	struct mem_cgroup *mem_cgroup;
166
	int ref_cnt;			/* cached, mapped, migrating */
167
	int flags;
B
Balbir Singh 已提交
168
};
169
#define PAGE_CGROUP_FLAG_CACHE	(0x1)	/* charged as cache */
170
#define PAGE_CGROUP_FLAG_ACTIVE (0x2)	/* page is active in this cgroup */
B
Balbir Singh 已提交
171

172
static int page_cgroup_nid(struct page_cgroup *pc)
173 174 175 176
{
	return page_to_nid(pc->page);
}

177
static enum zone_type page_cgroup_zid(struct page_cgroup *pc)
178 179 180 181
{
	return page_zonenum(pc->page);
}

182 183 184 185 186
enum charge_type {
	MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
	MEM_CGROUP_CHARGE_TYPE_MAPPED,
};

187 188 189 190 191 192 193 194 195
/*
 * Always modified under lru lock. Then, not necessary to preempt_disable()
 */
static void mem_cgroup_charge_statistics(struct mem_cgroup *mem, int flags,
					bool charge)
{
	int val = (charge)? 1 : -1;
	struct mem_cgroup_stat *stat = &mem->stat;

196
	VM_BUG_ON(!irqs_disabled());
197
	if (flags & PAGE_CGROUP_FLAG_CACHE)
198
		__mem_cgroup_stat_add_safe(stat, MEM_CGROUP_STAT_CACHE, val);
199 200
	else
		__mem_cgroup_stat_add_safe(stat, MEM_CGROUP_STAT_RSS, val);
201 202
}

203
static struct mem_cgroup_per_zone *
204 205 206 207 208
mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid)
{
	return &mem->info.nodeinfo[nid]->zoneinfo[zid];
}

209
static struct mem_cgroup_per_zone *
210 211 212 213 214
page_cgroup_zoneinfo(struct page_cgroup *pc)
{
	struct mem_cgroup *mem = pc->mem_cgroup;
	int nid = page_cgroup_nid(pc);
	int zid = page_cgroup_zid(pc);
215

216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231
	return mem_cgroup_zoneinfo(mem, nid, zid);
}

static unsigned long mem_cgroup_get_all_zonestat(struct mem_cgroup *mem,
					enum mem_cgroup_zstat_index idx)
{
	int nid, zid;
	struct mem_cgroup_per_zone *mz;
	u64 total = 0;

	for_each_online_node(nid)
		for (zid = 0; zid < MAX_NR_ZONES; zid++) {
			mz = mem_cgroup_zoneinfo(mem, nid, zid);
			total += MEM_CGROUP_ZSTAT(mz, idx);
		}
	return total;
232 233
}

234
static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
B
Balbir Singh 已提交
235 236 237 238 239 240
{
	return container_of(cgroup_subsys_state(cont,
				mem_cgroup_subsys_id), struct mem_cgroup,
				css);
}

241
struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
242 243 244 245 246
{
	return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
				struct mem_cgroup, css);
}

247 248
static inline int page_cgroup_locked(struct page *page)
{
249
	return bit_spin_is_locked(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
250 251
}

252
static void page_assign_page_cgroup(struct page *page, struct page_cgroup *pc)
253
{
254 255
	VM_BUG_ON(!page_cgroup_locked(page));
	page->page_cgroup = ((unsigned long)pc | PAGE_CGROUP_LOCK);
256 257 258 259
}

struct page_cgroup *page_get_page_cgroup(struct page *page)
{
260
	return (struct page_cgroup *) (page->page_cgroup & ~PAGE_CGROUP_LOCK);
261 262
}

263
static void lock_page_cgroup(struct page *page)
264 265 266 267
{
	bit_spin_lock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
}

268 269 270 271 272
static int try_lock_page_cgroup(struct page *page)
{
	return bit_spin_trylock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
}

273
static void unlock_page_cgroup(struct page *page)
274 275 276 277
{
	bit_spin_unlock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
}

278 279
static void __mem_cgroup_remove_list(struct mem_cgroup_per_zone *mz,
			struct page_cgroup *pc)
280 281 282 283 284 285 286 287 288 289 290 291
{
	int from = pc->flags & PAGE_CGROUP_FLAG_ACTIVE;

	if (from)
		MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) -= 1;
	else
		MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) -= 1;

	mem_cgroup_charge_statistics(pc->mem_cgroup, pc->flags, false);
	list_del_init(&pc->lru);
}

292 293
static void __mem_cgroup_add_list(struct mem_cgroup_per_zone *mz,
				struct page_cgroup *pc)
294 295 296 297 298
{
	int to = pc->flags & PAGE_CGROUP_FLAG_ACTIVE;

	if (!to) {
		MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) += 1;
299
		list_add(&pc->lru, &mz->inactive_list);
300 301
	} else {
		MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) += 1;
302
		list_add(&pc->lru, &mz->active_list);
303 304 305 306
	}
	mem_cgroup_charge_statistics(pc->mem_cgroup, pc->flags, true);
}

307
static void __mem_cgroup_move_lists(struct page_cgroup *pc, bool active)
308
{
309 310 311 312 313 314 315 316
	int from = pc->flags & PAGE_CGROUP_FLAG_ACTIVE;
	struct mem_cgroup_per_zone *mz = page_cgroup_zoneinfo(pc);

	if (from)
		MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) -= 1;
	else
		MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) -= 1;

317
	if (active) {
318
		MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) += 1;
319
		pc->flags |= PAGE_CGROUP_FLAG_ACTIVE;
320
		list_move(&pc->lru, &mz->active_list);
321
	} else {
322
		MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) += 1;
323
		pc->flags &= ~PAGE_CGROUP_FLAG_ACTIVE;
324
		list_move(&pc->lru, &mz->inactive_list);
325
	}
326 327
}

328 329 330 331 332
int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
{
	int ret;

	task_lock(task);
333
	ret = task->mm && mm_match_cgroup(task->mm, mem);
334 335 336 337
	task_unlock(task);
	return ret;
}

338 339 340
/*
 * This routine assumes that the appropriate zone's lru lock is already held
 */
341
void mem_cgroup_move_lists(struct page *page, bool active)
342
{
343
	struct page_cgroup *pc;
344 345 346
	struct mem_cgroup_per_zone *mz;
	unsigned long flags;

347 348 349 350 351 352 353 354
	/*
	 * We cannot lock_page_cgroup while holding zone's lru_lock,
	 * because other holders of lock_page_cgroup can be interrupted
	 * with an attempt to rotate_reclaimable_page.  But we cannot
	 * safely get to page_cgroup without it, so just try_lock it:
	 * mem_cgroup_isolate_pages allows for page left on wrong list.
	 */
	if (!try_lock_page_cgroup(page))
355 356
		return;

357 358 359 360
	pc = page_get_page_cgroup(page);
	if (pc) {
		mz = page_cgroup_zoneinfo(pc);
		spin_lock_irqsave(&mz->lru_lock, flags);
361
		__mem_cgroup_move_lists(pc, active);
362
		spin_unlock_irqrestore(&mz->lru_lock, flags);
363 364
	}
	unlock_page_cgroup(page);
365 366
}

367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382
/*
 * Calculate mapped_ratio under memory controller. This will be used in
 * vmscan.c for deteremining we have to reclaim mapped pages.
 */
int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem)
{
	long total, rss;

	/*
	 * usage is recorded in bytes. But, here, we assume the number of
	 * physical pages can be represented by "long" on any arch.
	 */
	total = (long) (mem->res.usage >> PAGE_SHIFT) + 1L;
	rss = (long)mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS);
	return (int)((rss * 100L) / total);
}
383

384 385 386 387 388 389 390 391 392 393 394 395 396 397
/*
 * This function is called from vmscan.c. In page reclaiming loop. balance
 * between active and inactive list is calculated. For memory controller
 * page reclaiming, we should use using mem_cgroup's imbalance rather than
 * zone's global lru imbalance.
 */
long mem_cgroup_reclaim_imbalance(struct mem_cgroup *mem)
{
	unsigned long active, inactive;
	/* active and inactive are the number of pages. 'long' is ok.*/
	active = mem_cgroup_get_all_zonestat(mem, MEM_CGROUP_ZSTAT_ACTIVE);
	inactive = mem_cgroup_get_all_zonestat(mem, MEM_CGROUP_ZSTAT_INACTIVE);
	return (long) (active / (inactive + 1));
}
398

399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417
/*
 * prev_priority control...this will be used in memory reclaim path.
 */
int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem)
{
	return mem->prev_priority;
}

void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem, int priority)
{
	if (priority < mem->prev_priority)
		mem->prev_priority = priority;
}

void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, int priority)
{
	mem->prev_priority = priority;
}

418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449
/*
 * Calculate # of pages to be scanned in this priority/zone.
 * See also vmscan.c
 *
 * priority starts from "DEF_PRIORITY" and decremented in each loop.
 * (see include/linux/mmzone.h)
 */

long mem_cgroup_calc_reclaim_active(struct mem_cgroup *mem,
				   struct zone *zone, int priority)
{
	long nr_active;
	int nid = zone->zone_pgdat->node_id;
	int zid = zone_idx(zone);
	struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(mem, nid, zid);

	nr_active = MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE);
	return (nr_active >> priority);
}

long mem_cgroup_calc_reclaim_inactive(struct mem_cgroup *mem,
					struct zone *zone, int priority)
{
	long nr_inactive;
	int nid = zone->zone_pgdat->node_id;
	int zid = zone_idx(zone);
	struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(mem, nid, zid);

	nr_inactive = MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE);
	return (nr_inactive >> priority);
}

450 451 452 453 454 455 456 457 458 459 460 461
unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
					struct list_head *dst,
					unsigned long *scanned, int order,
					int mode, struct zone *z,
					struct mem_cgroup *mem_cont,
					int active)
{
	unsigned long nr_taken = 0;
	struct page *page;
	unsigned long scan;
	LIST_HEAD(pc_list);
	struct list_head *src;
462
	struct page_cgroup *pc, *tmp;
463 464 465
	int nid = z->zone_pgdat->node_id;
	int zid = zone_idx(z);
	struct mem_cgroup_per_zone *mz;
466

467
	BUG_ON(!mem_cont);
468
	mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
469
	if (active)
470
		src = &mz->active_list;
471
	else
472 473
		src = &mz->inactive_list;

474

475
	spin_lock(&mz->lru_lock);
476 477
	scan = 0;
	list_for_each_entry_safe_reverse(pc, tmp, src, lru) {
H
Hugh Dickins 已提交
478
		if (scan >= nr_to_scan)
479
			break;
480 481
		page = pc->page;

H
Hugh Dickins 已提交
482
		if (unlikely(!PageLRU(page)))
483 484
			continue;

485 486 487 488 489 490 491 492 493
		if (PageActive(page) && !active) {
			__mem_cgroup_move_lists(pc, true);
			continue;
		}
		if (!PageActive(page) && active) {
			__mem_cgroup_move_lists(pc, false);
			continue;
		}

H
Hugh Dickins 已提交
494 495
		scan++;
		list_move(&pc->lru, &pc_list);
496 497 498 499 500 501 502 503

		if (__isolate_lru_page(page, mode) == 0) {
			list_move(&page->lru, dst);
			nr_taken++;
		}
	}

	list_splice(&pc_list, src);
504
	spin_unlock(&mz->lru_lock);
505 506 507 508 509

	*scanned = scan;
	return nr_taken;
}

510 511 512 513 514 515
/*
 * Charge the memory controller for page usage.
 * Return
 * 0 if the charge was successful
 * < 0 if the cgroup is over its limit
 */
516 517
static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
				gfp_t gfp_mask, enum charge_type ctype)
518 519
{
	struct mem_cgroup *mem;
520
	struct page_cgroup *pc;
521 522
	unsigned long flags;
	unsigned long nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
523
	struct mem_cgroup_per_zone *mz;
524

525 526 527
	if (mem_cgroup_subsys.disabled)
		return 0;

528 529 530 531 532 533 534
	/*
	 * Should page_cgroup's go to their own slab?
	 * One could optimize the performance of the charging routine
	 * by saving a bit in the page_flags and using it as a lock
	 * to see if the cgroup page already has a page_cgroup associated
	 * with it
	 */
535
retry:
536 537 538 539 540 541 542
	lock_page_cgroup(page);
	pc = page_get_page_cgroup(page);
	/*
	 * The page_cgroup exists and
	 * the page has already been accounted.
	 */
	if (pc) {
543 544 545 546 547 548
		VM_BUG_ON(pc->page != page);
		VM_BUG_ON(pc->ref_cnt <= 0);

		pc->ref_cnt++;
		unlock_page_cgroup(page);
		goto done;
549
	}
550
	unlock_page_cgroup(page);
551

552
	pc = kmem_cache_zalloc(page_cgroup_cache, gfp_mask);
553 554 555 556
	if (pc == NULL)
		goto err;

	/*
557 558
	 * We always charge the cgroup the mm_struct belongs to.
	 * The mm_struct's mem_cgroup changes on task migration if the
559 560 561 562 563 564
	 * thread group leader migrates. It's possible that mm is not
	 * set, if so charge the init_mm (happens for pagecache usage).
	 */
	if (!mm)
		mm = &init_mm;

565
	rcu_read_lock();
566
	mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
567
	/*
568
	 * For every charge from the cgroup, increment reference count
569 570 571 572
	 */
	css_get(&mem->css);
	rcu_read_unlock();

573
	while (res_counter_charge(&mem->res, PAGE_SIZE)) {
574 575
		if (!(gfp_mask & __GFP_WAIT))
			goto out;
576 577

		if (try_to_free_mem_cgroup_pages(mem, gfp_mask))
578 579 580
			continue;

		/*
581 582 583 584 585 586
		 * try_to_free_mem_cgroup_pages() might not give us a full
		 * picture of reclaim. Some pages are reclaimed and might be
		 * moved to swap cache or just unmapped from the cgroup.
		 * Check the limit again to see if the reclaim reduced the
		 * current usage of the cgroup before giving up
		 */
587 588
		if (res_counter_check_under_limit(&mem->res))
			continue;
589 590 591 592

		if (!nr_retries--) {
			mem_cgroup_out_of_memory(mem, gfp_mask);
			goto out;
593
		}
594
		congestion_wait(WRITE, HZ/10);
595 596
	}

597
	pc->ref_cnt = 1;
598 599
	pc->mem_cgroup = mem;
	pc->page = page;
600
	pc->flags = PAGE_CGROUP_FLAG_ACTIVE;
601 602
	if (ctype == MEM_CGROUP_CHARGE_TYPE_CACHE)
		pc->flags |= PAGE_CGROUP_FLAG_CACHE;
603

604 605 606
	lock_page_cgroup(page);
	if (page_get_page_cgroup(page)) {
		unlock_page_cgroup(page);
607
		/*
608 609
		 * Another charge has been added to this page already.
		 * We take lock_page_cgroup(page) again and read
610 611 612 613
		 * page->cgroup, increment refcnt.... just retry is OK.
		 */
		res_counter_uncharge(&mem->res, PAGE_SIZE);
		css_put(&mem->css);
614
		kmem_cache_free(page_cgroup_cache, pc);
615 616
		goto retry;
	}
617
	page_assign_page_cgroup(page, pc);
618

619 620
	mz = page_cgroup_zoneinfo(pc);
	spin_lock_irqsave(&mz->lru_lock, flags);
621
	__mem_cgroup_add_list(mz, pc);
622
	spin_unlock_irqrestore(&mz->lru_lock, flags);
623

H
Hugh Dickins 已提交
624
	unlock_page_cgroup(page);
625 626
done:
	return 0;
627 628
out:
	css_put(&mem->css);
629
	kmem_cache_free(page_cgroup_cache, pc);
630 631 632 633
err:
	return -ENOMEM;
}

634
int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask)
635 636
{
	return mem_cgroup_charge_common(page, mm, gfp_mask,
637
				MEM_CGROUP_CHARGE_TYPE_MAPPED);
638 639
}

640 641
int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
				gfp_t gfp_mask)
642 643 644
{
	if (!mm)
		mm = &init_mm;
645
	return mem_cgroup_charge_common(page, mm, gfp_mask,
646
				MEM_CGROUP_CHARGE_TYPE_CACHE);
647 648
}

649 650
/*
 * Uncharging is always a welcome operation, we never complain, simply
H
Hugh Dickins 已提交
651
 * uncharge.
652
 */
H
Hugh Dickins 已提交
653
void mem_cgroup_uncharge_page(struct page *page)
654
{
H
Hugh Dickins 已提交
655
	struct page_cgroup *pc;
656
	struct mem_cgroup *mem;
657
	struct mem_cgroup_per_zone *mz;
658
	unsigned long flags;
659

660 661 662
	if (mem_cgroup_subsys.disabled)
		return;

663
	/*
664
	 * Check if our page_cgroup is valid
665
	 */
H
Hugh Dickins 已提交
666 667
	lock_page_cgroup(page);
	pc = page_get_page_cgroup(page);
668
	if (!pc)
H
Hugh Dickins 已提交
669
		goto unlock;
670

671 672 673 674 675 676
	VM_BUG_ON(pc->page != page);
	VM_BUG_ON(pc->ref_cnt <= 0);

	if (--(pc->ref_cnt) == 0) {
		mz = page_cgroup_zoneinfo(pc);
		spin_lock_irqsave(&mz->lru_lock, flags);
677
		__mem_cgroup_remove_list(mz, pc);
678 679
		spin_unlock_irqrestore(&mz->lru_lock, flags);

H
Hugh Dickins 已提交
680 681 682
		page_assign_page_cgroup(page, NULL);
		unlock_page_cgroup(page);

H
Hugh Dickins 已提交
683 684 685 686
		mem = pc->mem_cgroup;
		res_counter_uncharge(&mem->res, PAGE_SIZE);
		css_put(&mem->css);

687
		kmem_cache_free(page_cgroup_cache, pc);
688
		return;
689
	}
690

H
Hugh Dickins 已提交
691
unlock:
692 693 694
	unlock_page_cgroup(page);
}

695 696 697 698 699 700 701
/*
 * Returns non-zero if a page (under migration) has valid page_cgroup member.
 * Refcnt of page_cgroup is incremented.
 */
int mem_cgroup_prepare_migration(struct page *page)
{
	struct page_cgroup *pc;
702

703 704 705
	if (mem_cgroup_subsys.disabled)
		return 0;

706 707
	lock_page_cgroup(page);
	pc = page_get_page_cgroup(page);
708 709
	if (pc)
		pc->ref_cnt++;
710
	unlock_page_cgroup(page);
711
	return pc != NULL;
712 713 714 715
}

void mem_cgroup_end_migration(struct page *page)
{
H
Hugh Dickins 已提交
716
	mem_cgroup_uncharge_page(page);
717
}
718

719
/*
720
 * We know both *page* and *newpage* are now not-on-LRU and PG_locked.
721 722 723 724 725 726
 * And no race with uncharge() routines because page_cgroup for *page*
 * has extra one reference by mem_cgroup_prepare_migration.
 */
void mem_cgroup_page_migration(struct page *page, struct page *newpage)
{
	struct page_cgroup *pc;
727
	struct mem_cgroup_per_zone *mz;
728
	unsigned long flags;
729

730
	lock_page_cgroup(page);
731
	pc = page_get_page_cgroup(page);
732 733
	if (!pc) {
		unlock_page_cgroup(page);
734
		return;
735
	}
736

737
	mz = page_cgroup_zoneinfo(pc);
738
	spin_lock_irqsave(&mz->lru_lock, flags);
739
	__mem_cgroup_remove_list(mz, pc);
740 741
	spin_unlock_irqrestore(&mz->lru_lock, flags);

H
Hugh Dickins 已提交
742 743 744
	page_assign_page_cgroup(page, NULL);
	unlock_page_cgroup(page);

745 746 747
	pc->page = newpage;
	lock_page_cgroup(newpage);
	page_assign_page_cgroup(newpage, pc);
748

749 750
	mz = page_cgroup_zoneinfo(pc);
	spin_lock_irqsave(&mz->lru_lock, flags);
751
	__mem_cgroup_add_list(mz, pc);
752
	spin_unlock_irqrestore(&mz->lru_lock, flags);
H
Hugh Dickins 已提交
753 754

	unlock_page_cgroup(newpage);
755
}
756

757 758 759 760 761 762
/*
 * This routine traverse page_cgroup in given list and drop them all.
 * This routine ignores page_cgroup->ref_cnt.
 * *And* this routine doesn't reclaim page itself, just removes page_cgroup.
 */
#define FORCE_UNCHARGE_BATCH	(128)
763
static void mem_cgroup_force_empty_list(struct mem_cgroup *mem,
764 765
			    struct mem_cgroup_per_zone *mz,
			    int active)
766 767 768
{
	struct page_cgroup *pc;
	struct page *page;
769
	int count = FORCE_UNCHARGE_BATCH;
770
	unsigned long flags;
771 772 773 774 775 776
	struct list_head *list;

	if (active)
		list = &mz->active_list;
	else
		list = &mz->inactive_list;
777

778
	spin_lock_irqsave(&mz->lru_lock, flags);
779
	while (!list_empty(list)) {
780 781
		pc = list_entry(list->prev, struct page_cgroup, lru);
		page = pc->page;
782 783 784 785 786 787 788
		get_page(page);
		spin_unlock_irqrestore(&mz->lru_lock, flags);
		mem_cgroup_uncharge_page(page);
		put_page(page);
		if (--count <= 0) {
			count = FORCE_UNCHARGE_BATCH;
			cond_resched();
789
		}
790
		spin_lock_irqsave(&mz->lru_lock, flags);
791
	}
792
	spin_unlock_irqrestore(&mz->lru_lock, flags);
793 794 795 796 797 798
}

/*
 * make mem_cgroup's charge to be 0 if there is no task.
 * This enables deleting this mem_cgroup.
 */
799
static int mem_cgroup_force_empty(struct mem_cgroup *mem)
800 801
{
	int ret = -EBUSY;
802
	int node, zid;
803

804 805 806
	if (mem_cgroup_subsys.disabled)
		return 0;

807 808 809
	css_get(&mem->css);
	/*
	 * page reclaim code (kswapd etc..) will move pages between
810
	 * active_list <-> inactive_list while we don't take a lock.
811 812
	 * So, we have to do loop here until all lists are empty.
	 */
813
	while (mem->res.usage > 0) {
814 815
		if (atomic_read(&mem->css.cgroup->count) > 0)
			goto out;
816 817 818 819 820
		for_each_node_state(node, N_POSSIBLE)
			for (zid = 0; zid < MAX_NR_ZONES; zid++) {
				struct mem_cgroup_per_zone *mz;
				mz = mem_cgroup_zoneinfo(mem, node, zid);
				/* drop all page_cgroup in active_list */
821
				mem_cgroup_force_empty_list(mem, mz, 1);
822
				/* drop all page_cgroup in inactive_list */
823
				mem_cgroup_force_empty_list(mem, mz, 0);
824
			}
825 826 827 828 829 830 831
	}
	ret = 0;
out:
	css_put(&mem->css);
	return ret;
}

832
static int mem_cgroup_write_strategy(char *buf, unsigned long long *tmp)
833 834 835 836 837 838 839 840 841 842 843 844
{
	*tmp = memparse(buf, &buf);
	if (*buf != '\0')
		return -EINVAL;

	/*
	 * Round up the value to the closest page size
	 */
	*tmp = ((*tmp + PAGE_SIZE - 1) >> PAGE_SHIFT) << PAGE_SHIFT;
	return 0;
}

845
static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft)
B
Balbir Singh 已提交
846
{
847 848
	return res_counter_read_u64(&mem_cgroup_from_cont(cont)->res,
				    cft->private);
B
Balbir Singh 已提交
849 850 851 852 853 854 855
}

static ssize_t mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
				struct file *file, const char __user *userbuf,
				size_t nbytes, loff_t *ppos)
{
	return res_counter_write(&mem_cgroup_from_cont(cont)->res,
856 857
				cft->private, userbuf, nbytes, ppos,
				mem_cgroup_write_strategy);
B
Balbir Singh 已提交
858 859
}

860
static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
861 862 863 864
{
	struct mem_cgroup *mem;

	mem = mem_cgroup_from_cont(cont);
865 866 867 868 869 870 871 872
	switch (event) {
	case RES_MAX_USAGE:
		res_counter_reset_max(&mem->res);
		break;
	case RES_FAILCNT:
		res_counter_reset_failcnt(&mem->res);
		break;
	}
873
	return 0;
874 875
}

876
static int mem_force_empty_write(struct cgroup *cont, unsigned int event)
877
{
878
	return mem_cgroup_force_empty(mem_cgroup_from_cont(cont));
879 880
}

881 882 883 884 885 886 887 888
static const struct mem_cgroup_stat_desc {
	const char *msg;
	u64 unit;
} mem_cgroup_stat_desc[] = {
	[MEM_CGROUP_STAT_CACHE] = { "cache", PAGE_SIZE, },
	[MEM_CGROUP_STAT_RSS] = { "rss", PAGE_SIZE, },
};

889 890
static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
				 struct cgroup_map_cb *cb)
891 892 893 894 895 896 897 898 899 900
{
	struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont);
	struct mem_cgroup_stat *stat = &mem_cont->stat;
	int i;

	for (i = 0; i < ARRAY_SIZE(stat->cpustat[0].count); i++) {
		s64 val;

		val = mem_cgroup_read_stat(stat, i);
		val *= mem_cgroup_stat_desc[i].unit;
901
		cb->fill(cb, mem_cgroup_stat_desc[i].msg, val);
902
	}
903 904 905 906 907 908 909 910
	/* showing # of active pages */
	{
		unsigned long active, inactive;

		inactive = mem_cgroup_get_all_zonestat(mem_cont,
						MEM_CGROUP_ZSTAT_INACTIVE);
		active = mem_cgroup_get_all_zonestat(mem_cont,
						MEM_CGROUP_ZSTAT_ACTIVE);
911 912
		cb->fill(cb, "active", (active) * PAGE_SIZE);
		cb->fill(cb, "inactive", (inactive) * PAGE_SIZE);
913
	}
914 915 916
	return 0;
}

B
Balbir Singh 已提交
917 918
static struct cftype mem_cgroup_files[] = {
	{
919
		.name = "usage_in_bytes",
B
Balbir Singh 已提交
920
		.private = RES_USAGE,
921
		.read_u64 = mem_cgroup_read,
B
Balbir Singh 已提交
922
	},
923 924 925
	{
		.name = "max_usage_in_bytes",
		.private = RES_MAX_USAGE,
926
		.trigger = mem_cgroup_reset,
927 928
		.read_u64 = mem_cgroup_read,
	},
B
Balbir Singh 已提交
929
	{
930
		.name = "limit_in_bytes",
B
Balbir Singh 已提交
931 932
		.private = RES_LIMIT,
		.write = mem_cgroup_write,
933
		.read_u64 = mem_cgroup_read,
B
Balbir Singh 已提交
934 935 936 937
	},
	{
		.name = "failcnt",
		.private = RES_FAILCNT,
938
		.trigger = mem_cgroup_reset,
939
		.read_u64 = mem_cgroup_read,
B
Balbir Singh 已提交
940
	},
941 942
	{
		.name = "force_empty",
943
		.trigger = mem_force_empty_write,
944
	},
945 946
	{
		.name = "stat",
947
		.read_map = mem_control_stat_show,
948
	},
B
Balbir Singh 已提交
949 950
};

951 952 953
static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
{
	struct mem_cgroup_per_node *pn;
954
	struct mem_cgroup_per_zone *mz;
955
	int zone, tmp = node;
956 957 958 959 960 961 962 963
	/*
	 * This routine is called against possible nodes.
	 * But it's BUG to call kmalloc() against offline node.
	 *
	 * TODO: this routine can waste much memory for nodes which will
	 *       never be onlined. It's better to use memory hotplug callback
	 *       function.
	 */
964 965 966
	if (!node_state(node, N_NORMAL_MEMORY))
		tmp = -1;
	pn = kmalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
967 968
	if (!pn)
		return 1;
969

970 971
	mem->info.nodeinfo[node] = pn;
	memset(pn, 0, sizeof(*pn));
972 973 974 975 976

	for (zone = 0; zone < MAX_NR_ZONES; zone++) {
		mz = &pn->zoneinfo[zone];
		INIT_LIST_HEAD(&mz->active_list);
		INIT_LIST_HEAD(&mz->inactive_list);
977
		spin_lock_init(&mz->lru_lock);
978
	}
979 980 981
	return 0;
}

982 983 984 985 986
static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
{
	kfree(mem->info.nodeinfo[node]);
}

B
Balbir Singh 已提交
987 988 989 990
static struct cgroup_subsys_state *
mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
{
	struct mem_cgroup *mem;
991
	int node;
B
Balbir Singh 已提交
992

993
	if (unlikely((cont->parent) == NULL)) {
994
		mem = &init_mem_cgroup;
995 996
		page_cgroup_cache = KMEM_CACHE(page_cgroup, SLAB_PANIC);
	} else {
997
		mem = kzalloc(sizeof(struct mem_cgroup), GFP_KERNEL);
998
	}
999 1000

	if (mem == NULL)
1001
		return ERR_PTR(-ENOMEM);
B
Balbir Singh 已提交
1002 1003

	res_counter_init(&mem->res);
1004

1005 1006 1007 1008 1009 1010
	memset(&mem->info, 0, sizeof(mem->info));

	for_each_node_state(node, N_POSSIBLE)
		if (alloc_mem_cgroup_per_zone_info(mem, node))
			goto free_out;

B
Balbir Singh 已提交
1011
	return &mem->css;
1012 1013
free_out:
	for_each_node_state(node, N_POSSIBLE)
1014
		free_mem_cgroup_per_zone_info(mem, node);
1015 1016
	if (cont->parent != NULL)
		kfree(mem);
1017
	return ERR_PTR(-ENOMEM);
B
Balbir Singh 已提交
1018 1019
}

1020 1021 1022 1023 1024 1025 1026
static void mem_cgroup_pre_destroy(struct cgroup_subsys *ss,
					struct cgroup *cont)
{
	struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
	mem_cgroup_force_empty(mem);
}

B
Balbir Singh 已提交
1027 1028 1029
static void mem_cgroup_destroy(struct cgroup_subsys *ss,
				struct cgroup *cont)
{
1030 1031 1032 1033
	int node;
	struct mem_cgroup *mem = mem_cgroup_from_cont(cont);

	for_each_node_state(node, N_POSSIBLE)
1034
		free_mem_cgroup_per_zone_info(mem, node);
1035

B
Balbir Singh 已提交
1036 1037 1038 1039 1040 1041
	kfree(mem_cgroup_from_cont(cont));
}

static int mem_cgroup_populate(struct cgroup_subsys *ss,
				struct cgroup *cont)
{
1042 1043
	if (mem_cgroup_subsys.disabled)
		return 0;
B
Balbir Singh 已提交
1044 1045 1046 1047
	return cgroup_add_files(cont, ss, mem_cgroup_files,
					ARRAY_SIZE(mem_cgroup_files));
}

B
Balbir Singh 已提交
1048 1049 1050 1051 1052 1053 1054 1055
static void mem_cgroup_move_task(struct cgroup_subsys *ss,
				struct cgroup *cont,
				struct cgroup *old_cont,
				struct task_struct *p)
{
	struct mm_struct *mm;
	struct mem_cgroup *mem, *old_mem;

1056 1057 1058
	if (mem_cgroup_subsys.disabled)
		return;

B
Balbir Singh 已提交
1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072
	mm = get_task_mm(p);
	if (mm == NULL)
		return;

	mem = mem_cgroup_from_cont(cont);
	old_mem = mem_cgroup_from_cont(old_cont);

	if (mem == old_mem)
		goto out;

	/*
	 * Only thread group leaders are allowed to migrate, the mm_struct is
	 * in effect owned by the leader
	 */
1073
	if (!thread_group_leader(p))
B
Balbir Singh 已提交
1074 1075 1076 1077 1078 1079
		goto out;

out:
	mmput(mm);
}

B
Balbir Singh 已提交
1080 1081 1082 1083
struct cgroup_subsys mem_cgroup_subsys = {
	.name = "memory",
	.subsys_id = mem_cgroup_subsys_id,
	.create = mem_cgroup_create,
1084
	.pre_destroy = mem_cgroup_pre_destroy,
B
Balbir Singh 已提交
1085 1086
	.destroy = mem_cgroup_destroy,
	.populate = mem_cgroup_populate,
B
Balbir Singh 已提交
1087
	.attach = mem_cgroup_move_task,
1088
	.early_init = 0,
B
Balbir Singh 已提交
1089
};