memcontrol.c 30.0 KB
Newer Older
B
Balbir Singh 已提交
1 2 3 4 5
/* memcontrol.c - Memory Controller
 *
 * Copyright IBM Corporation, 2007
 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
 *
6 7 8
 * Copyright 2007 OpenVZ SWsoft Inc
 * Author: Pavel Emelianov <xemul@openvz.org>
 *
B
Balbir Singh 已提交
9 10 11 12 13 14 15 16 17 18 19 20 21 22
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 */

#include <linux/res_counter.h>
#include <linux/memcontrol.h>
#include <linux/cgroup.h>
23
#include <linux/mm.h>
24
#include <linux/smp.h>
25
#include <linux/page-flags.h>
26
#include <linux/backing-dev.h>
27 28
#include <linux/bit_spinlock.h>
#include <linux/rcupdate.h>
29
#include <linux/slab.h>
30 31 32
#include <linux/swap.h>
#include <linux/spinlock.h>
#include <linux/fs.h>
33
#include <linux/seq_file.h>
34
#include <linux/vmalloc.h>
B
Balbir Singh 已提交
35

36 37
#include <asm/uaccess.h>

38 39 40
struct cgroup_subsys mem_cgroup_subsys __read_mostly;
static struct kmem_cache *page_cgroup_cache __read_mostly;
#define MEM_CGROUP_RECLAIM_RETRIES	5
B
Balbir Singh 已提交
41

42 43 44 45 46 47 48 49 50
/*
 * Statistics for memory cgroup.
 */
enum mem_cgroup_stat_index {
	/*
	 * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
	 */
	MEM_CGROUP_STAT_CACHE, 	   /* # of pages charged as cache */
	MEM_CGROUP_STAT_RSS,	   /* # of pages charged as rss */
51 52
	MEM_CGROUP_STAT_PGPGIN_COUNT,	/* # of pages paged in */
	MEM_CGROUP_STAT_PGPGOUT_COUNT,	/* # of pages paged out */
53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84

	MEM_CGROUP_STAT_NSTATS,
};

struct mem_cgroup_stat_cpu {
	s64 count[MEM_CGROUP_STAT_NSTATS];
} ____cacheline_aligned_in_smp;

struct mem_cgroup_stat {
	struct mem_cgroup_stat_cpu cpustat[NR_CPUS];
};

/*
 * For accounting under irq disable, no need for increment preempt count.
 */
static void __mem_cgroup_stat_add_safe(struct mem_cgroup_stat *stat,
		enum mem_cgroup_stat_index idx, int val)
{
	int cpu = smp_processor_id();
	stat->cpustat[cpu].count[idx] += val;
}

static s64 mem_cgroup_read_stat(struct mem_cgroup_stat *stat,
		enum mem_cgroup_stat_index idx)
{
	int cpu;
	s64 ret = 0;
	for_each_possible_cpu(cpu)
		ret += stat->cpustat[cpu].count[idx];
	return ret;
}

85 86 87 88 89 90 91 92 93 94 95 96
/*
 * per-zone information in memory controller.
 */

enum mem_cgroup_zstat_index {
	MEM_CGROUP_ZSTAT_ACTIVE,
	MEM_CGROUP_ZSTAT_INACTIVE,

	NR_MEM_CGROUP_ZSTAT,
};

struct mem_cgroup_per_zone {
97 98 99 100
	/*
	 * spin_lock to protect the per cgroup LRU
	 */
	spinlock_t		lru_lock;
101 102
	struct list_head	active_list;
	struct list_head	inactive_list;
103 104 105 106 107 108 109 110 111 112 113 114 115
	unsigned long count[NR_MEM_CGROUP_ZSTAT];
};
/* Macro for accessing counter */
#define MEM_CGROUP_ZSTAT(mz, idx)	((mz)->count[(idx)])

struct mem_cgroup_per_node {
	struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
};

struct mem_cgroup_lru_info {
	struct mem_cgroup_per_node *nodeinfo[MAX_NUMNODES];
};

B
Balbir Singh 已提交
116 117 118 119 120 121 122
/*
 * The memory controller data structure. The memory controller controls both
 * page cache and RSS per cgroup. We would eventually like to provide
 * statistics based on the statistics developed by Rik Van Riel for clock-pro,
 * to help the administrator determine what knobs to tune.
 *
 * TODO: Add a water mark for the memory controller. Reclaim will begin when
123 124 125
 * we hit the water mark. May be even add a low water mark, such that
 * no reclaim occurs from a cgroup at it's low water mark, this is
 * a feature that will be implemented much later in the future.
B
Balbir Singh 已提交
126 127 128 129 130 131 132
 */
struct mem_cgroup {
	struct cgroup_subsys_state css;
	/*
	 * the counter to account for memory usage
	 */
	struct res_counter res;
133 134 135 136
	/*
	 * Per cgroup active and inactive list, similar to the
	 * per zone LRU lists.
	 */
137
	struct mem_cgroup_lru_info info;
138

139
	int	prev_priority;	/* for recording reclaim priority */
140 141 142 143
	/*
	 * statistics.
	 */
	struct mem_cgroup_stat stat;
B
Balbir Singh 已提交
144
};
145
static struct mem_cgroup init_mem_cgroup;
B
Balbir Singh 已提交
146

147 148
/*
 * We use the lower bit of the page->page_cgroup pointer as a bit spin
149 150 151 152
 * lock.  We need to ensure that page->page_cgroup is at least two
 * byte aligned (based on comments from Nick Piggin).  But since
 * bit_spin_lock doesn't actually set that lock bit in a non-debug
 * uniprocessor kernel, we should avoid setting it here too.
153 154
 */
#define PAGE_CGROUP_LOCK_BIT 	0x0
155 156 157 158 159
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
#define PAGE_CGROUP_LOCK 	(1 << PAGE_CGROUP_LOCK_BIT)
#else
#define PAGE_CGROUP_LOCK	0x0
#endif
160

B
Balbir Singh 已提交
161 162 163 164 165 166 167 168
/*
 * A page_cgroup page is associated with every page descriptor. The
 * page_cgroup helps us identify information about the cgroup
 */
struct page_cgroup {
	struct list_head lru;		/* per cgroup LRU list */
	struct page *page;
	struct mem_cgroup *mem_cgroup;
169
	int flags;
B
Balbir Singh 已提交
170
};
171
#define PAGE_CGROUP_FLAG_CACHE	(0x1)	/* charged as cache */
172
#define PAGE_CGROUP_FLAG_ACTIVE (0x2)	/* page is active in this cgroup */
B
Balbir Singh 已提交
173

174
static int page_cgroup_nid(struct page_cgroup *pc)
175 176 177 178
{
	return page_to_nid(pc->page);
}

179
static enum zone_type page_cgroup_zid(struct page_cgroup *pc)
180 181 182 183
{
	return page_zonenum(pc->page);
}

184 185 186
enum charge_type {
	MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
	MEM_CGROUP_CHARGE_TYPE_MAPPED,
187
	MEM_CGROUP_CHARGE_TYPE_FORCE,	/* used by force_empty */
188 189
};

190 191 192 193 194 195 196 197 198
/*
 * Always modified under lru lock. Then, not necessary to preempt_disable()
 */
static void mem_cgroup_charge_statistics(struct mem_cgroup *mem, int flags,
					bool charge)
{
	int val = (charge)? 1 : -1;
	struct mem_cgroup_stat *stat = &mem->stat;

199
	VM_BUG_ON(!irqs_disabled());
200
	if (flags & PAGE_CGROUP_FLAG_CACHE)
201
		__mem_cgroup_stat_add_safe(stat, MEM_CGROUP_STAT_CACHE, val);
202 203
	else
		__mem_cgroup_stat_add_safe(stat, MEM_CGROUP_STAT_RSS, val);
204 205 206 207 208 209 210

	if (charge)
		__mem_cgroup_stat_add_safe(stat,
				MEM_CGROUP_STAT_PGPGIN_COUNT, 1);
	else
		__mem_cgroup_stat_add_safe(stat,
				MEM_CGROUP_STAT_PGPGOUT_COUNT, 1);
211 212
}

213
static struct mem_cgroup_per_zone *
214 215 216 217 218
mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid)
{
	return &mem->info.nodeinfo[nid]->zoneinfo[zid];
}

219
static struct mem_cgroup_per_zone *
220 221 222 223 224
page_cgroup_zoneinfo(struct page_cgroup *pc)
{
	struct mem_cgroup *mem = pc->mem_cgroup;
	int nid = page_cgroup_nid(pc);
	int zid = page_cgroup_zid(pc);
225

226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241
	return mem_cgroup_zoneinfo(mem, nid, zid);
}

static unsigned long mem_cgroup_get_all_zonestat(struct mem_cgroup *mem,
					enum mem_cgroup_zstat_index idx)
{
	int nid, zid;
	struct mem_cgroup_per_zone *mz;
	u64 total = 0;

	for_each_online_node(nid)
		for (zid = 0; zid < MAX_NR_ZONES; zid++) {
			mz = mem_cgroup_zoneinfo(mem, nid, zid);
			total += MEM_CGROUP_ZSTAT(mz, idx);
		}
	return total;
242 243
}

244
static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
B
Balbir Singh 已提交
245 246 247 248 249 250
{
	return container_of(cgroup_subsys_state(cont,
				mem_cgroup_subsys_id), struct mem_cgroup,
				css);
}

251
struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
252
{
253 254 255 256 257 258 259 260
	/*
	 * mm_update_next_owner() may clear mm->owner to NULL
	 * if it races with swapoff, page migration, etc.
	 * So this can be called with p == NULL.
	 */
	if (unlikely(!p))
		return NULL;

261 262 263 264
	return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
				struct mem_cgroup, css);
}

265 266
static inline int page_cgroup_locked(struct page *page)
{
267
	return bit_spin_is_locked(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
268 269
}

270
static void page_assign_page_cgroup(struct page *page, struct page_cgroup *pc)
271
{
272 273
	VM_BUG_ON(!page_cgroup_locked(page));
	page->page_cgroup = ((unsigned long)pc | PAGE_CGROUP_LOCK);
274 275 276 277
}

struct page_cgroup *page_get_page_cgroup(struct page *page)
{
278
	return (struct page_cgroup *) (page->page_cgroup & ~PAGE_CGROUP_LOCK);
279 280
}

281
static void lock_page_cgroup(struct page *page)
282 283 284 285
{
	bit_spin_lock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
}

286 287 288 289 290
static int try_lock_page_cgroup(struct page *page)
{
	return bit_spin_trylock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
}

291
static void unlock_page_cgroup(struct page *page)
292 293 294 295
{
	bit_spin_unlock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
}

296 297
static void __mem_cgroup_remove_list(struct mem_cgroup_per_zone *mz,
			struct page_cgroup *pc)
298 299 300 301 302 303 304 305 306
{
	int from = pc->flags & PAGE_CGROUP_FLAG_ACTIVE;

	if (from)
		MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) -= 1;
	else
		MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) -= 1;

	mem_cgroup_charge_statistics(pc->mem_cgroup, pc->flags, false);
307
	list_del(&pc->lru);
308 309
}

310 311
static void __mem_cgroup_add_list(struct mem_cgroup_per_zone *mz,
				struct page_cgroup *pc)
312 313 314 315 316
{
	int to = pc->flags & PAGE_CGROUP_FLAG_ACTIVE;

	if (!to) {
		MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) += 1;
317
		list_add(&pc->lru, &mz->inactive_list);
318 319
	} else {
		MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) += 1;
320
		list_add(&pc->lru, &mz->active_list);
321 322 323 324
	}
	mem_cgroup_charge_statistics(pc->mem_cgroup, pc->flags, true);
}

325
static void __mem_cgroup_move_lists(struct page_cgroup *pc, bool active)
326
{
327 328 329 330 331 332 333 334
	int from = pc->flags & PAGE_CGROUP_FLAG_ACTIVE;
	struct mem_cgroup_per_zone *mz = page_cgroup_zoneinfo(pc);

	if (from)
		MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) -= 1;
	else
		MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) -= 1;

335
	if (active) {
336
		MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) += 1;
337
		pc->flags |= PAGE_CGROUP_FLAG_ACTIVE;
338
		list_move(&pc->lru, &mz->active_list);
339
	} else {
340
		MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) += 1;
341
		pc->flags &= ~PAGE_CGROUP_FLAG_ACTIVE;
342
		list_move(&pc->lru, &mz->inactive_list);
343
	}
344 345
}

346 347 348 349 350
int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
{
	int ret;

	task_lock(task);
351
	ret = task->mm && mm_match_cgroup(task->mm, mem);
352 353 354 355
	task_unlock(task);
	return ret;
}

356 357 358
/*
 * This routine assumes that the appropriate zone's lru lock is already held
 */
359
void mem_cgroup_move_lists(struct page *page, bool active)
360
{
361
	struct page_cgroup *pc;
362 363 364
	struct mem_cgroup_per_zone *mz;
	unsigned long flags;

365 366 367
	if (mem_cgroup_subsys.disabled)
		return;

368 369 370 371 372 373 374 375
	/*
	 * We cannot lock_page_cgroup while holding zone's lru_lock,
	 * because other holders of lock_page_cgroup can be interrupted
	 * with an attempt to rotate_reclaimable_page.  But we cannot
	 * safely get to page_cgroup without it, so just try_lock it:
	 * mem_cgroup_isolate_pages allows for page left on wrong list.
	 */
	if (!try_lock_page_cgroup(page))
376 377
		return;

378 379 380 381
	pc = page_get_page_cgroup(page);
	if (pc) {
		mz = page_cgroup_zoneinfo(pc);
		spin_lock_irqsave(&mz->lru_lock, flags);
382
		__mem_cgroup_move_lists(pc, active);
383
		spin_unlock_irqrestore(&mz->lru_lock, flags);
384 385
	}
	unlock_page_cgroup(page);
386 387
}

388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403
/*
 * Calculate mapped_ratio under memory controller. This will be used in
 * vmscan.c for deteremining we have to reclaim mapped pages.
 */
int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem)
{
	long total, rss;

	/*
	 * usage is recorded in bytes. But, here, we assume the number of
	 * physical pages can be represented by "long" on any arch.
	 */
	total = (long) (mem->res.usage >> PAGE_SHIFT) + 1L;
	rss = (long)mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS);
	return (int)((rss * 100L) / total);
}
404

405 406 407 408 409 410 411 412 413 414 415 416 417 418
/*
 * This function is called from vmscan.c. In page reclaiming loop. balance
 * between active and inactive list is calculated. For memory controller
 * page reclaiming, we should use using mem_cgroup's imbalance rather than
 * zone's global lru imbalance.
 */
long mem_cgroup_reclaim_imbalance(struct mem_cgroup *mem)
{
	unsigned long active, inactive;
	/* active and inactive are the number of pages. 'long' is ok.*/
	active = mem_cgroup_get_all_zonestat(mem, MEM_CGROUP_ZSTAT_ACTIVE);
	inactive = mem_cgroup_get_all_zonestat(mem, MEM_CGROUP_ZSTAT_INACTIVE);
	return (long) (active / (inactive + 1));
}
419

420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438
/*
 * prev_priority control...this will be used in memory reclaim path.
 */
int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem)
{
	return mem->prev_priority;
}

void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem, int priority)
{
	if (priority < mem->prev_priority)
		mem->prev_priority = priority;
}

void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, int priority)
{
	mem->prev_priority = priority;
}

439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470
/*
 * Calculate # of pages to be scanned in this priority/zone.
 * See also vmscan.c
 *
 * priority starts from "DEF_PRIORITY" and decremented in each loop.
 * (see include/linux/mmzone.h)
 */

long mem_cgroup_calc_reclaim_active(struct mem_cgroup *mem,
				   struct zone *zone, int priority)
{
	long nr_active;
	int nid = zone->zone_pgdat->node_id;
	int zid = zone_idx(zone);
	struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(mem, nid, zid);

	nr_active = MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE);
	return (nr_active >> priority);
}

long mem_cgroup_calc_reclaim_inactive(struct mem_cgroup *mem,
					struct zone *zone, int priority)
{
	long nr_inactive;
	int nid = zone->zone_pgdat->node_id;
	int zid = zone_idx(zone);
	struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(mem, nid, zid);

	nr_inactive = MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE);
	return (nr_inactive >> priority);
}

471 472 473 474 475 476 477 478 479 480 481 482
unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
					struct list_head *dst,
					unsigned long *scanned, int order,
					int mode, struct zone *z,
					struct mem_cgroup *mem_cont,
					int active)
{
	unsigned long nr_taken = 0;
	struct page *page;
	unsigned long scan;
	LIST_HEAD(pc_list);
	struct list_head *src;
483
	struct page_cgroup *pc, *tmp;
484 485 486
	int nid = z->zone_pgdat->node_id;
	int zid = zone_idx(z);
	struct mem_cgroup_per_zone *mz;
487

488
	BUG_ON(!mem_cont);
489
	mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
490
	if (active)
491
		src = &mz->active_list;
492
	else
493 494
		src = &mz->inactive_list;

495

496
	spin_lock(&mz->lru_lock);
497 498
	scan = 0;
	list_for_each_entry_safe_reverse(pc, tmp, src, lru) {
H
Hugh Dickins 已提交
499
		if (scan >= nr_to_scan)
500
			break;
501 502
		page = pc->page;

H
Hugh Dickins 已提交
503
		if (unlikely(!PageLRU(page)))
504 505
			continue;

506 507 508 509 510 511 512 513 514
		if (PageActive(page) && !active) {
			__mem_cgroup_move_lists(pc, true);
			continue;
		}
		if (!PageActive(page) && active) {
			__mem_cgroup_move_lists(pc, false);
			continue;
		}

H
Hugh Dickins 已提交
515 516
		scan++;
		list_move(&pc->lru, &pc_list);
517 518 519 520 521 522 523 524

		if (__isolate_lru_page(page, mode) == 0) {
			list_move(&page->lru, dst);
			nr_taken++;
		}
	}

	list_splice(&pc_list, src);
525
	spin_unlock(&mz->lru_lock);
526 527 528 529 530

	*scanned = scan;
	return nr_taken;
}

531 532 533 534 535 536
/*
 * Charge the memory controller for page usage.
 * Return
 * 0 if the charge was successful
 * < 0 if the cgroup is over its limit
 */
537
static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
538 539
				gfp_t gfp_mask, enum charge_type ctype,
				struct mem_cgroup *memcg)
540 541
{
	struct mem_cgroup *mem;
542
	struct page_cgroup *pc;
543 544
	unsigned long flags;
	unsigned long nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
545
	struct mem_cgroup_per_zone *mz;
546

547
	pc = kmem_cache_alloc(page_cgroup_cache, gfp_mask);
K
KAMEZAWA Hiroyuki 已提交
548
	if (unlikely(pc == NULL))
549 550 551
		goto err;

	/*
552 553
	 * We always charge the cgroup the mm_struct belongs to.
	 * The mm_struct's mem_cgroup changes on task migration if the
554 555 556
	 * thread group leader migrates. It's possible that mm is not
	 * set, if so charge the init_mm (happens for pagecache usage).
	 */
557
	if (likely(!memcg)) {
558 559
		rcu_read_lock();
		mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
560 561 562 563 564
		if (unlikely(!mem)) {
			rcu_read_unlock();
			kmem_cache_free(page_cgroup_cache, pc);
			return 0;
		}
565 566 567 568 569 570 571 572 573
		/*
		 * For every charge from the cgroup, increment reference count
		 */
		css_get(&mem->css);
		rcu_read_unlock();
	} else {
		mem = memcg;
		css_get(&memcg->css);
	}
574

575
	while (res_counter_charge(&mem->res, PAGE_SIZE)) {
576 577
		if (!(gfp_mask & __GFP_WAIT))
			goto out;
578 579

		if (try_to_free_mem_cgroup_pages(mem, gfp_mask))
580 581 582
			continue;

		/*
583 584 585 586 587 588
		 * try_to_free_mem_cgroup_pages() might not give us a full
		 * picture of reclaim. Some pages are reclaimed and might be
		 * moved to swap cache or just unmapped from the cgroup.
		 * Check the limit again to see if the reclaim reduced the
		 * current usage of the cgroup before giving up
		 */
589 590
		if (res_counter_check_under_limit(&mem->res))
			continue;
591 592 593 594

		if (!nr_retries--) {
			mem_cgroup_out_of_memory(mem, gfp_mask);
			goto out;
595
		}
596 597 598 599
	}

	pc->mem_cgroup = mem;
	pc->page = page;
600 601 602 603
	/*
	 * If a page is accounted as a page cache, insert to inactive list.
	 * If anon, insert to active list.
	 */
604
	if (ctype == MEM_CGROUP_CHARGE_TYPE_CACHE)
605
		pc->flags = PAGE_CGROUP_FLAG_CACHE;
606 607
	else
		pc->flags = PAGE_CGROUP_FLAG_ACTIVE;
608

609
	lock_page_cgroup(page);
K
KAMEZAWA Hiroyuki 已提交
610
	if (unlikely(page_get_page_cgroup(page))) {
611
		unlock_page_cgroup(page);
612 613
		res_counter_uncharge(&mem->res, PAGE_SIZE);
		css_put(&mem->css);
614
		kmem_cache_free(page_cgroup_cache, pc);
615
		goto done;
616
	}
617
	page_assign_page_cgroup(page, pc);
618

619 620
	mz = page_cgroup_zoneinfo(pc);
	spin_lock_irqsave(&mz->lru_lock, flags);
621
	__mem_cgroup_add_list(mz, pc);
622
	spin_unlock_irqrestore(&mz->lru_lock, flags);
623

H
Hugh Dickins 已提交
624
	unlock_page_cgroup(page);
625 626
done:
	return 0;
627 628
out:
	css_put(&mem->css);
629
	kmem_cache_free(page_cgroup_cache, pc);
630 631 632 633
err:
	return -ENOMEM;
}

634
int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask)
635
{
636 637 638
	if (mem_cgroup_subsys.disabled)
		return 0;

639 640 641 642 643 644 645 646 647 648 649
	/*
	 * If already mapped, we don't have to account.
	 * If page cache, page->mapping has address_space.
	 * But page->mapping may have out-of-use anon_vma pointer,
	 * detecit it by PageAnon() check. newly-mapped-anon's page->mapping
	 * is NULL.
  	 */
	if (page_mapped(page) || (page->mapping && !PageAnon(page)))
		return 0;
	if (unlikely(!mm))
		mm = &init_mm;
650
	return mem_cgroup_charge_common(page, mm, gfp_mask,
651
				MEM_CGROUP_CHARGE_TYPE_MAPPED, NULL);
652 653
}

654 655
int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
				gfp_t gfp_mask)
656
{
657 658 659
	if (mem_cgroup_subsys.disabled)
		return 0;

660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682
	/*
	 * Corner case handling. This is called from add_to_page_cache()
	 * in usual. But some FS (shmem) precharges this page before calling it
	 * and call add_to_page_cache() with GFP_NOWAIT.
	 *
	 * For GFP_NOWAIT case, the page may be pre-charged before calling
	 * add_to_page_cache(). (See shmem.c) check it here and avoid to call
	 * charge twice. (It works but has to pay a bit larger cost.)
	 */
	if (!(gfp_mask & __GFP_WAIT)) {
		struct page_cgroup *pc;

		lock_page_cgroup(page);
		pc = page_get_page_cgroup(page);
		if (pc) {
			VM_BUG_ON(pc->page != page);
			VM_BUG_ON(!pc->mem_cgroup);
			unlock_page_cgroup(page);
			return 0;
		}
		unlock_page_cgroup(page);
	}

683
	if (unlikely(!mm))
684
		mm = &init_mm;
685

686
	return mem_cgroup_charge_common(page, mm, gfp_mask,
687 688 689
				MEM_CGROUP_CHARGE_TYPE_CACHE, NULL);
}

690
/*
691
 * uncharge if !page_mapped(page)
692
 */
693 694
static void
__mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
695
{
H
Hugh Dickins 已提交
696
	struct page_cgroup *pc;
697
	struct mem_cgroup *mem;
698
	struct mem_cgroup_per_zone *mz;
699
	unsigned long flags;
700

701 702 703
	if (mem_cgroup_subsys.disabled)
		return;

704
	/*
705
	 * Check if our page_cgroup is valid
706
	 */
H
Hugh Dickins 已提交
707 708
	lock_page_cgroup(page);
	pc = page_get_page_cgroup(page);
K
KAMEZAWA Hiroyuki 已提交
709
	if (unlikely(!pc))
H
Hugh Dickins 已提交
710
		goto unlock;
711

712 713
	VM_BUG_ON(pc->page != page);

714 715 716 717
	if ((ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED)
	    && ((pc->flags & PAGE_CGROUP_FLAG_CACHE)
		|| page_mapped(page)))
		goto unlock;
718

719 720 721 722
	mz = page_cgroup_zoneinfo(pc);
	spin_lock_irqsave(&mz->lru_lock, flags);
	__mem_cgroup_remove_list(mz, pc);
	spin_unlock_irqrestore(&mz->lru_lock, flags);
H
Hugh Dickins 已提交
723

724 725
	page_assign_page_cgroup(page, NULL);
	unlock_page_cgroup(page);
H
Hugh Dickins 已提交
726

727 728 729
	mem = pc->mem_cgroup;
	res_counter_uncharge(&mem->res, PAGE_SIZE);
	css_put(&mem->css);
730

731 732
	kmem_cache_free(page_cgroup_cache, pc);
	return;
H
Hugh Dickins 已提交
733
unlock:
734 735 736
	unlock_page_cgroup(page);
}

737 738 739 740 741 742 743 744 745 746 747
void mem_cgroup_uncharge_page(struct page *page)
{
	__mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_MAPPED);
}

void mem_cgroup_uncharge_cache_page(struct page *page)
{
	VM_BUG_ON(page_mapped(page));
	__mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE);
}

748
/*
749
 * Before starting migration, account against new page.
750
 */
751
int mem_cgroup_prepare_migration(struct page *page, struct page *newpage)
752 753
{
	struct page_cgroup *pc;
754 755 756
	struct mem_cgroup *mem = NULL;
	enum charge_type ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED;
	int ret = 0;
757

758 759 760
	if (mem_cgroup_subsys.disabled)
		return 0;

761 762
	lock_page_cgroup(page);
	pc = page_get_page_cgroup(page);
763 764 765 766 767 768
	if (pc) {
		mem = pc->mem_cgroup;
		css_get(&mem->css);
		if (pc->flags & PAGE_CGROUP_FLAG_CACHE)
			ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
	}
769
	unlock_page_cgroup(page);
770 771 772 773 774 775
	if (mem) {
		ret = mem_cgroup_charge_common(newpage, NULL, GFP_KERNEL,
			ctype, mem);
		css_put(&mem->css);
	}
	return ret;
776
}
777

778
/* remove redundant charge if migration failed*/
779
void mem_cgroup_end_migration(struct page *newpage)
780
{
781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796
	/*
	 * At success, page->mapping is not NULL.
	 * special rollback care is necessary when
	 * 1. at migration failure. (newpage->mapping is cleared in this case)
	 * 2. the newpage was moved but not remapped again because the task
	 *    exits and the newpage is obsolete. In this case, the new page
	 *    may be a swapcache. So, we just call mem_cgroup_uncharge_page()
	 *    always for avoiding mess. The  page_cgroup will be removed if
	 *    unnecessary. File cache pages is still on radix-tree. Don't
	 *    care it.
	 */
	if (!newpage->mapping)
		__mem_cgroup_uncharge_common(newpage,
					 MEM_CGROUP_CHARGE_TYPE_FORCE);
	else if (PageAnon(newpage))
		mem_cgroup_uncharge_page(newpage);
797
}
798

799 800 801 802 803 804 805 806 807 808 809
/*
 * A call to try to shrink memory usage under specified resource controller.
 * This is typically used for page reclaiming for shmem for reducing side
 * effect of page allocation from shmem, which is used by some mem_cgroup.
 */
int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask)
{
	struct mem_cgroup *mem;
	int progress = 0;
	int retry = MEM_CGROUP_RECLAIM_RETRIES;

810 811
	if (mem_cgroup_subsys.disabled)
		return 0;
812 813
	if (!mm)
		return 0;
814

815 816
	rcu_read_lock();
	mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
817 818 819 820
	if (unlikely(!mem)) {
		rcu_read_unlock();
		return 0;
	}
821 822 823 824 825
	css_get(&mem->css);
	rcu_read_unlock();

	do {
		progress = try_to_free_mem_cgroup_pages(mem, gfp_mask);
826
		progress += res_counter_check_under_limit(&mem->res);
827 828 829 830 831 832 833 834
	} while (!progress && --retry);

	css_put(&mem->css);
	if (!retry)
		return -ENOMEM;
	return 0;
}

835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858
int mem_cgroup_resize_limit(struct mem_cgroup *memcg, unsigned long long val)
{

	int retry_count = MEM_CGROUP_RECLAIM_RETRIES;
	int progress;
	int ret = 0;

	while (res_counter_set_limit(&memcg->res, val)) {
		if (signal_pending(current)) {
			ret = -EINTR;
			break;
		}
		if (!retry_count) {
			ret = -EBUSY;
			break;
		}
		progress = try_to_free_mem_cgroup_pages(memcg, GFP_KERNEL);
		if (!progress)
			retry_count--;
	}
	return ret;
}


859 860 861 862 863
/*
 * This routine traverse page_cgroup in given list and drop them all.
 * *And* this routine doesn't reclaim page itself, just removes page_cgroup.
 */
#define FORCE_UNCHARGE_BATCH	(128)
864
static void mem_cgroup_force_empty_list(struct mem_cgroup *mem,
865 866
			    struct mem_cgroup_per_zone *mz,
			    int active)
867 868 869
{
	struct page_cgroup *pc;
	struct page *page;
870
	int count = FORCE_UNCHARGE_BATCH;
871
	unsigned long flags;
872 873 874 875 876 877
	struct list_head *list;

	if (active)
		list = &mz->active_list;
	else
		list = &mz->inactive_list;
878

879
	spin_lock_irqsave(&mz->lru_lock, flags);
880
	while (!list_empty(list)) {
881 882
		pc = list_entry(list->prev, struct page_cgroup, lru);
		page = pc->page;
883 884
		get_page(page);
		spin_unlock_irqrestore(&mz->lru_lock, flags);
885 886 887 888 889
		/*
		 * Check if this page is on LRU. !LRU page can be found
		 * if it's under page migration.
		 */
		if (PageLRU(page)) {
890 891
			__mem_cgroup_uncharge_common(page,
					MEM_CGROUP_CHARGE_TYPE_FORCE);
892 893 894 895 896 897
			put_page(page);
			if (--count <= 0) {
				count = FORCE_UNCHARGE_BATCH;
				cond_resched();
			}
		} else
898 899
			cond_resched();
		spin_lock_irqsave(&mz->lru_lock, flags);
900
	}
901
	spin_unlock_irqrestore(&mz->lru_lock, flags);
902 903 904 905 906 907
}

/*
 * make mem_cgroup's charge to be 0 if there is no task.
 * This enables deleting this mem_cgroup.
 */
908
static int mem_cgroup_force_empty(struct mem_cgroup *mem)
909 910
{
	int ret = -EBUSY;
911
	int node, zid;
912

913 914 915
	css_get(&mem->css);
	/*
	 * page reclaim code (kswapd etc..) will move pages between
916
	 * active_list <-> inactive_list while we don't take a lock.
917 918
	 * So, we have to do loop here until all lists are empty.
	 */
919
	while (mem->res.usage > 0) {
920 921
		if (atomic_read(&mem->css.cgroup->count) > 0)
			goto out;
922 923 924 925 926
		for_each_node_state(node, N_POSSIBLE)
			for (zid = 0; zid < MAX_NR_ZONES; zid++) {
				struct mem_cgroup_per_zone *mz;
				mz = mem_cgroup_zoneinfo(mem, node, zid);
				/* drop all page_cgroup in active_list */
927
				mem_cgroup_force_empty_list(mem, mz, 1);
928
				/* drop all page_cgroup in inactive_list */
929
				mem_cgroup_force_empty_list(mem, mz, 0);
930
			}
931 932 933 934 935 936 937
	}
	ret = 0;
out:
	css_put(&mem->css);
	return ret;
}

938
static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft)
B
Balbir Singh 已提交
939
{
940 941
	return res_counter_read_u64(&mem_cgroup_from_cont(cont)->res,
				    cft->private);
B
Balbir Singh 已提交
942
}
943 944 945 946
/*
 * The user of this function is...
 * RES_LIMIT.
 */
947 948
static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
			    const char *buffer)
B
Balbir Singh 已提交
949
{
950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965
	struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
	unsigned long long val;
	int ret;

	switch (cft->private) {
	case RES_LIMIT:
		/* This function does all necessary parse...reuse it */
		ret = res_counter_memparse_write_strategy(buffer, &val);
		if (!ret)
			ret = mem_cgroup_resize_limit(memcg, val);
		break;
	default:
		ret = -EINVAL; /* should be BUG() ? */
		break;
	}
	return ret;
B
Balbir Singh 已提交
966 967
}

968
static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
969 970 971 972
{
	struct mem_cgroup *mem;

	mem = mem_cgroup_from_cont(cont);
973 974 975 976 977 978 979 980
	switch (event) {
	case RES_MAX_USAGE:
		res_counter_reset_max(&mem->res);
		break;
	case RES_FAILCNT:
		res_counter_reset_failcnt(&mem->res);
		break;
	}
981
	return 0;
982 983
}

984
static int mem_force_empty_write(struct cgroup *cont, unsigned int event)
985
{
986
	return mem_cgroup_force_empty(mem_cgroup_from_cont(cont));
987 988
}

989 990 991 992 993 994
static const struct mem_cgroup_stat_desc {
	const char *msg;
	u64 unit;
} mem_cgroup_stat_desc[] = {
	[MEM_CGROUP_STAT_CACHE] = { "cache", PAGE_SIZE, },
	[MEM_CGROUP_STAT_RSS] = { "rss", PAGE_SIZE, },
995 996
	[MEM_CGROUP_STAT_PGPGIN_COUNT] = {"pgpgin", 1, },
	[MEM_CGROUP_STAT_PGPGOUT_COUNT] = {"pgpgout", 1, },
997 998
};

999 1000
static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
				 struct cgroup_map_cb *cb)
1001 1002 1003 1004 1005 1006 1007 1008 1009 1010
{
	struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont);
	struct mem_cgroup_stat *stat = &mem_cont->stat;
	int i;

	for (i = 0; i < ARRAY_SIZE(stat->cpustat[0].count); i++) {
		s64 val;

		val = mem_cgroup_read_stat(stat, i);
		val *= mem_cgroup_stat_desc[i].unit;
1011
		cb->fill(cb, mem_cgroup_stat_desc[i].msg, val);
1012
	}
1013 1014 1015 1016 1017 1018 1019 1020
	/* showing # of active pages */
	{
		unsigned long active, inactive;

		inactive = mem_cgroup_get_all_zonestat(mem_cont,
						MEM_CGROUP_ZSTAT_INACTIVE);
		active = mem_cgroup_get_all_zonestat(mem_cont,
						MEM_CGROUP_ZSTAT_ACTIVE);
1021 1022
		cb->fill(cb, "active", (active) * PAGE_SIZE);
		cb->fill(cb, "inactive", (inactive) * PAGE_SIZE);
1023
	}
1024 1025 1026
	return 0;
}

B
Balbir Singh 已提交
1027 1028
static struct cftype mem_cgroup_files[] = {
	{
1029
		.name = "usage_in_bytes",
B
Balbir Singh 已提交
1030
		.private = RES_USAGE,
1031
		.read_u64 = mem_cgroup_read,
B
Balbir Singh 已提交
1032
	},
1033 1034 1035
	{
		.name = "max_usage_in_bytes",
		.private = RES_MAX_USAGE,
1036
		.trigger = mem_cgroup_reset,
1037 1038
		.read_u64 = mem_cgroup_read,
	},
B
Balbir Singh 已提交
1039
	{
1040
		.name = "limit_in_bytes",
B
Balbir Singh 已提交
1041
		.private = RES_LIMIT,
1042
		.write_string = mem_cgroup_write,
1043
		.read_u64 = mem_cgroup_read,
B
Balbir Singh 已提交
1044 1045 1046 1047
	},
	{
		.name = "failcnt",
		.private = RES_FAILCNT,
1048
		.trigger = mem_cgroup_reset,
1049
		.read_u64 = mem_cgroup_read,
B
Balbir Singh 已提交
1050
	},
1051 1052
	{
		.name = "force_empty",
1053
		.trigger = mem_force_empty_write,
1054
	},
1055 1056
	{
		.name = "stat",
1057
		.read_map = mem_control_stat_show,
1058
	},
B
Balbir Singh 已提交
1059 1060
};

1061 1062 1063
static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
{
	struct mem_cgroup_per_node *pn;
1064
	struct mem_cgroup_per_zone *mz;
1065
	int zone, tmp = node;
1066 1067 1068 1069 1070 1071 1072 1073
	/*
	 * This routine is called against possible nodes.
	 * But it's BUG to call kmalloc() against offline node.
	 *
	 * TODO: this routine can waste much memory for nodes which will
	 *       never be onlined. It's better to use memory hotplug callback
	 *       function.
	 */
1074 1075 1076
	if (!node_state(node, N_NORMAL_MEMORY))
		tmp = -1;
	pn = kmalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
1077 1078
	if (!pn)
		return 1;
1079

1080 1081
	mem->info.nodeinfo[node] = pn;
	memset(pn, 0, sizeof(*pn));
1082 1083 1084 1085 1086

	for (zone = 0; zone < MAX_NR_ZONES; zone++) {
		mz = &pn->zoneinfo[zone];
		INIT_LIST_HEAD(&mz->active_list);
		INIT_LIST_HEAD(&mz->inactive_list);
1087
		spin_lock_init(&mz->lru_lock);
1088
	}
1089 1090 1091
	return 0;
}

1092 1093 1094 1095 1096
static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
{
	kfree(mem->info.nodeinfo[node]);
}

1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119
static struct mem_cgroup *mem_cgroup_alloc(void)
{
	struct mem_cgroup *mem;

	if (sizeof(*mem) < PAGE_SIZE)
		mem = kmalloc(sizeof(*mem), GFP_KERNEL);
	else
		mem = vmalloc(sizeof(*mem));

	if (mem)
		memset(mem, 0, sizeof(*mem));
	return mem;
}

static void mem_cgroup_free(struct mem_cgroup *mem)
{
	if (sizeof(*mem) < PAGE_SIZE)
		kfree(mem);
	else
		vfree(mem);
}


B
Balbir Singh 已提交
1120 1121 1122 1123
static struct cgroup_subsys_state *
mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
{
	struct mem_cgroup *mem;
1124
	int node;
B
Balbir Singh 已提交
1125

1126
	if (unlikely((cont->parent) == NULL)) {
1127
		mem = &init_mem_cgroup;
1128 1129
		page_cgroup_cache = KMEM_CACHE(page_cgroup, SLAB_PANIC);
	} else {
1130 1131 1132
		mem = mem_cgroup_alloc();
		if (!mem)
			return ERR_PTR(-ENOMEM);
1133
	}
1134

B
Balbir Singh 已提交
1135
	res_counter_init(&mem->res);
1136

1137 1138 1139 1140
	for_each_node_state(node, N_POSSIBLE)
		if (alloc_mem_cgroup_per_zone_info(mem, node))
			goto free_out;

B
Balbir Singh 已提交
1141
	return &mem->css;
1142 1143
free_out:
	for_each_node_state(node, N_POSSIBLE)
1144
		free_mem_cgroup_per_zone_info(mem, node);
1145
	if (cont->parent != NULL)
1146
		mem_cgroup_free(mem);
1147
	return ERR_PTR(-ENOMEM);
B
Balbir Singh 已提交
1148 1149
}

1150 1151 1152 1153 1154 1155 1156
static void mem_cgroup_pre_destroy(struct cgroup_subsys *ss,
					struct cgroup *cont)
{
	struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
	mem_cgroup_force_empty(mem);
}

B
Balbir Singh 已提交
1157 1158 1159
static void mem_cgroup_destroy(struct cgroup_subsys *ss,
				struct cgroup *cont)
{
1160 1161 1162 1163
	int node;
	struct mem_cgroup *mem = mem_cgroup_from_cont(cont);

	for_each_node_state(node, N_POSSIBLE)
1164
		free_mem_cgroup_per_zone_info(mem, node);
1165

1166
	mem_cgroup_free(mem_cgroup_from_cont(cont));
B
Balbir Singh 已提交
1167 1168 1169 1170 1171 1172 1173 1174 1175
}

static int mem_cgroup_populate(struct cgroup_subsys *ss,
				struct cgroup *cont)
{
	return cgroup_add_files(cont, ss, mem_cgroup_files,
					ARRAY_SIZE(mem_cgroup_files));
}

B
Balbir Singh 已提交
1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194
static void mem_cgroup_move_task(struct cgroup_subsys *ss,
				struct cgroup *cont,
				struct cgroup *old_cont,
				struct task_struct *p)
{
	struct mm_struct *mm;
	struct mem_cgroup *mem, *old_mem;

	mm = get_task_mm(p);
	if (mm == NULL)
		return;

	mem = mem_cgroup_from_cont(cont);
	old_mem = mem_cgroup_from_cont(old_cont);

	/*
	 * Only thread group leaders are allowed to migrate, the mm_struct is
	 * in effect owned by the leader
	 */
1195
	if (!thread_group_leader(p))
B
Balbir Singh 已提交
1196 1197 1198 1199 1200 1201
		goto out;

out:
	mmput(mm);
}

B
Balbir Singh 已提交
1202 1203 1204 1205
struct cgroup_subsys mem_cgroup_subsys = {
	.name = "memory",
	.subsys_id = mem_cgroup_subsys_id,
	.create = mem_cgroup_create,
1206
	.pre_destroy = mem_cgroup_pre_destroy,
B
Balbir Singh 已提交
1207 1208
	.destroy = mem_cgroup_destroy,
	.populate = mem_cgroup_populate,
B
Balbir Singh 已提交
1209
	.attach = mem_cgroup_move_task,
1210
	.early_init = 0,
B
Balbir Singh 已提交
1211
};