memcontrol.c 29.6 KB
Newer Older
B
Balbir Singh 已提交
1 2 3 4 5
/* memcontrol.c - Memory Controller
 *
 * Copyright IBM Corporation, 2007
 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
 *
6 7 8
 * Copyright 2007 OpenVZ SWsoft Inc
 * Author: Pavel Emelianov <xemul@openvz.org>
 *
B
Balbir Singh 已提交
9 10 11 12 13 14 15 16 17 18 19 20 21 22
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 */

#include <linux/res_counter.h>
#include <linux/memcontrol.h>
#include <linux/cgroup.h>
23
#include <linux/mm.h>
24
#include <linux/smp.h>
25
#include <linux/page-flags.h>
26
#include <linux/backing-dev.h>
27 28
#include <linux/bit_spinlock.h>
#include <linux/rcupdate.h>
29
#include <linux/slab.h>
30 31 32
#include <linux/swap.h>
#include <linux/spinlock.h>
#include <linux/fs.h>
33
#include <linux/seq_file.h>
34
#include <linux/vmalloc.h>
B
Balbir Singh 已提交
35

36 37
#include <asm/uaccess.h>

38 39 40
struct cgroup_subsys mem_cgroup_subsys __read_mostly;
static struct kmem_cache *page_cgroup_cache __read_mostly;
#define MEM_CGROUP_RECLAIM_RETRIES	5
B
Balbir Singh 已提交
41

42 43 44 45 46 47 48 49 50
/*
 * Statistics for memory cgroup.
 */
enum mem_cgroup_stat_index {
	/*
	 * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
	 */
	MEM_CGROUP_STAT_CACHE, 	   /* # of pages charged as cache */
	MEM_CGROUP_STAT_RSS,	   /* # of pages charged as rss */
51 52
	MEM_CGROUP_STAT_PGPGIN_COUNT,	/* # of pages paged in */
	MEM_CGROUP_STAT_PGPGOUT_COUNT,	/* # of pages paged out */
53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84

	MEM_CGROUP_STAT_NSTATS,
};

struct mem_cgroup_stat_cpu {
	s64 count[MEM_CGROUP_STAT_NSTATS];
} ____cacheline_aligned_in_smp;

struct mem_cgroup_stat {
	struct mem_cgroup_stat_cpu cpustat[NR_CPUS];
};

/*
 * For accounting under irq disable, no need for increment preempt count.
 */
static void __mem_cgroup_stat_add_safe(struct mem_cgroup_stat *stat,
		enum mem_cgroup_stat_index idx, int val)
{
	int cpu = smp_processor_id();
	stat->cpustat[cpu].count[idx] += val;
}

static s64 mem_cgroup_read_stat(struct mem_cgroup_stat *stat,
		enum mem_cgroup_stat_index idx)
{
	int cpu;
	s64 ret = 0;
	for_each_possible_cpu(cpu)
		ret += stat->cpustat[cpu].count[idx];
	return ret;
}

85 86 87 88 89 90 91 92 93 94 95 96
/*
 * per-zone information in memory controller.
 */

enum mem_cgroup_zstat_index {
	MEM_CGROUP_ZSTAT_ACTIVE,
	MEM_CGROUP_ZSTAT_INACTIVE,

	NR_MEM_CGROUP_ZSTAT,
};

struct mem_cgroup_per_zone {
97 98 99 100
	/*
	 * spin_lock to protect the per cgroup LRU
	 */
	spinlock_t		lru_lock;
101 102
	struct list_head	active_list;
	struct list_head	inactive_list;
103 104 105 106 107 108 109 110 111 112 113 114 115
	unsigned long count[NR_MEM_CGROUP_ZSTAT];
};
/* Macro for accessing counter */
#define MEM_CGROUP_ZSTAT(mz, idx)	((mz)->count[(idx)])

struct mem_cgroup_per_node {
	struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
};

struct mem_cgroup_lru_info {
	struct mem_cgroup_per_node *nodeinfo[MAX_NUMNODES];
};

B
Balbir Singh 已提交
116 117 118 119 120 121 122
/*
 * The memory controller data structure. The memory controller controls both
 * page cache and RSS per cgroup. We would eventually like to provide
 * statistics based on the statistics developed by Rik Van Riel for clock-pro,
 * to help the administrator determine what knobs to tune.
 *
 * TODO: Add a water mark for the memory controller. Reclaim will begin when
123 124 125
 * we hit the water mark. May be even add a low water mark, such that
 * no reclaim occurs from a cgroup at it's low water mark, this is
 * a feature that will be implemented much later in the future.
B
Balbir Singh 已提交
126 127 128 129 130 131 132
 */
struct mem_cgroup {
	struct cgroup_subsys_state css;
	/*
	 * the counter to account for memory usage
	 */
	struct res_counter res;
133 134 135 136
	/*
	 * Per cgroup active and inactive list, similar to the
	 * per zone LRU lists.
	 */
137
	struct mem_cgroup_lru_info info;
138

139
	int	prev_priority;	/* for recording reclaim priority */
140 141 142 143
	/*
	 * statistics.
	 */
	struct mem_cgroup_stat stat;
B
Balbir Singh 已提交
144
};
145
static struct mem_cgroup init_mem_cgroup;
B
Balbir Singh 已提交
146

147 148
/*
 * We use the lower bit of the page->page_cgroup pointer as a bit spin
149 150 151 152
 * lock.  We need to ensure that page->page_cgroup is at least two
 * byte aligned (based on comments from Nick Piggin).  But since
 * bit_spin_lock doesn't actually set that lock bit in a non-debug
 * uniprocessor kernel, we should avoid setting it here too.
153 154
 */
#define PAGE_CGROUP_LOCK_BIT 	0x0
155 156 157 158 159
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
#define PAGE_CGROUP_LOCK 	(1 << PAGE_CGROUP_LOCK_BIT)
#else
#define PAGE_CGROUP_LOCK	0x0
#endif
160

B
Balbir Singh 已提交
161 162 163 164 165 166 167 168
/*
 * A page_cgroup page is associated with every page descriptor. The
 * page_cgroup helps us identify information about the cgroup
 */
struct page_cgroup {
	struct list_head lru;		/* per cgroup LRU list */
	struct page *page;
	struct mem_cgroup *mem_cgroup;
169
	int flags;
B
Balbir Singh 已提交
170
};
171
#define PAGE_CGROUP_FLAG_CACHE	(0x1)	/* charged as cache */
172
#define PAGE_CGROUP_FLAG_ACTIVE (0x2)	/* page is active in this cgroup */
B
Balbir Singh 已提交
173

174
static int page_cgroup_nid(struct page_cgroup *pc)
175 176 177 178
{
	return page_to_nid(pc->page);
}

179
static enum zone_type page_cgroup_zid(struct page_cgroup *pc)
180 181 182 183
{
	return page_zonenum(pc->page);
}

184 185 186
enum charge_type {
	MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
	MEM_CGROUP_CHARGE_TYPE_MAPPED,
187
	MEM_CGROUP_CHARGE_TYPE_FORCE,	/* used by force_empty */
188 189
};

190 191 192 193 194 195 196 197 198
/*
 * Always modified under lru lock. Then, not necessary to preempt_disable()
 */
static void mem_cgroup_charge_statistics(struct mem_cgroup *mem, int flags,
					bool charge)
{
	int val = (charge)? 1 : -1;
	struct mem_cgroup_stat *stat = &mem->stat;

199
	VM_BUG_ON(!irqs_disabled());
200
	if (flags & PAGE_CGROUP_FLAG_CACHE)
201
		__mem_cgroup_stat_add_safe(stat, MEM_CGROUP_STAT_CACHE, val);
202 203
	else
		__mem_cgroup_stat_add_safe(stat, MEM_CGROUP_STAT_RSS, val);
204 205 206 207 208 209 210

	if (charge)
		__mem_cgroup_stat_add_safe(stat,
				MEM_CGROUP_STAT_PGPGIN_COUNT, 1);
	else
		__mem_cgroup_stat_add_safe(stat,
				MEM_CGROUP_STAT_PGPGOUT_COUNT, 1);
211 212
}

213
static struct mem_cgroup_per_zone *
214 215 216 217 218
mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid)
{
	return &mem->info.nodeinfo[nid]->zoneinfo[zid];
}

219
static struct mem_cgroup_per_zone *
220 221 222 223 224
page_cgroup_zoneinfo(struct page_cgroup *pc)
{
	struct mem_cgroup *mem = pc->mem_cgroup;
	int nid = page_cgroup_nid(pc);
	int zid = page_cgroup_zid(pc);
225

226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241
	return mem_cgroup_zoneinfo(mem, nid, zid);
}

static unsigned long mem_cgroup_get_all_zonestat(struct mem_cgroup *mem,
					enum mem_cgroup_zstat_index idx)
{
	int nid, zid;
	struct mem_cgroup_per_zone *mz;
	u64 total = 0;

	for_each_online_node(nid)
		for (zid = 0; zid < MAX_NR_ZONES; zid++) {
			mz = mem_cgroup_zoneinfo(mem, nid, zid);
			total += MEM_CGROUP_ZSTAT(mz, idx);
		}
	return total;
242 243
}

244
static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
B
Balbir Singh 已提交
245 246 247 248 249 250
{
	return container_of(cgroup_subsys_state(cont,
				mem_cgroup_subsys_id), struct mem_cgroup,
				css);
}

251
struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
252 253 254 255 256
{
	return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
				struct mem_cgroup, css);
}

257 258
static inline int page_cgroup_locked(struct page *page)
{
259
	return bit_spin_is_locked(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
260 261
}

262
static void page_assign_page_cgroup(struct page *page, struct page_cgroup *pc)
263
{
264 265
	VM_BUG_ON(!page_cgroup_locked(page));
	page->page_cgroup = ((unsigned long)pc | PAGE_CGROUP_LOCK);
266 267 268 269
}

struct page_cgroup *page_get_page_cgroup(struct page *page)
{
270
	return (struct page_cgroup *) (page->page_cgroup & ~PAGE_CGROUP_LOCK);
271 272
}

273
static void lock_page_cgroup(struct page *page)
274 275 276 277
{
	bit_spin_lock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
}

278 279 280 281 282
static int try_lock_page_cgroup(struct page *page)
{
	return bit_spin_trylock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
}

283
static void unlock_page_cgroup(struct page *page)
284 285 286 287
{
	bit_spin_unlock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
}

288 289
static void __mem_cgroup_remove_list(struct mem_cgroup_per_zone *mz,
			struct page_cgroup *pc)
290 291 292 293 294 295 296 297 298
{
	int from = pc->flags & PAGE_CGROUP_FLAG_ACTIVE;

	if (from)
		MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) -= 1;
	else
		MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) -= 1;

	mem_cgroup_charge_statistics(pc->mem_cgroup, pc->flags, false);
299
	list_del(&pc->lru);
300 301
}

302 303
static void __mem_cgroup_add_list(struct mem_cgroup_per_zone *mz,
				struct page_cgroup *pc)
304 305 306 307 308
{
	int to = pc->flags & PAGE_CGROUP_FLAG_ACTIVE;

	if (!to) {
		MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) += 1;
309
		list_add(&pc->lru, &mz->inactive_list);
310 311
	} else {
		MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) += 1;
312
		list_add(&pc->lru, &mz->active_list);
313 314 315 316
	}
	mem_cgroup_charge_statistics(pc->mem_cgroup, pc->flags, true);
}

317
static void __mem_cgroup_move_lists(struct page_cgroup *pc, bool active)
318
{
319 320 321 322 323 324 325 326
	int from = pc->flags & PAGE_CGROUP_FLAG_ACTIVE;
	struct mem_cgroup_per_zone *mz = page_cgroup_zoneinfo(pc);

	if (from)
		MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) -= 1;
	else
		MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) -= 1;

327
	if (active) {
328
		MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) += 1;
329
		pc->flags |= PAGE_CGROUP_FLAG_ACTIVE;
330
		list_move(&pc->lru, &mz->active_list);
331
	} else {
332
		MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) += 1;
333
		pc->flags &= ~PAGE_CGROUP_FLAG_ACTIVE;
334
		list_move(&pc->lru, &mz->inactive_list);
335
	}
336 337
}

338 339 340 341 342
int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
{
	int ret;

	task_lock(task);
343
	ret = task->mm && mm_match_cgroup(task->mm, mem);
344 345 346 347
	task_unlock(task);
	return ret;
}

348 349 350
/*
 * This routine assumes that the appropriate zone's lru lock is already held
 */
351
void mem_cgroup_move_lists(struct page *page, bool active)
352
{
353
	struct page_cgroup *pc;
354 355 356
	struct mem_cgroup_per_zone *mz;
	unsigned long flags;

357 358 359
	if (mem_cgroup_subsys.disabled)
		return;

360 361 362 363 364 365 366 367
	/*
	 * We cannot lock_page_cgroup while holding zone's lru_lock,
	 * because other holders of lock_page_cgroup can be interrupted
	 * with an attempt to rotate_reclaimable_page.  But we cannot
	 * safely get to page_cgroup without it, so just try_lock it:
	 * mem_cgroup_isolate_pages allows for page left on wrong list.
	 */
	if (!try_lock_page_cgroup(page))
368 369
		return;

370 371 372 373
	pc = page_get_page_cgroup(page);
	if (pc) {
		mz = page_cgroup_zoneinfo(pc);
		spin_lock_irqsave(&mz->lru_lock, flags);
374
		__mem_cgroup_move_lists(pc, active);
375
		spin_unlock_irqrestore(&mz->lru_lock, flags);
376 377
	}
	unlock_page_cgroup(page);
378 379
}

380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395
/*
 * Calculate mapped_ratio under memory controller. This will be used in
 * vmscan.c for deteremining we have to reclaim mapped pages.
 */
int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem)
{
	long total, rss;

	/*
	 * usage is recorded in bytes. But, here, we assume the number of
	 * physical pages can be represented by "long" on any arch.
	 */
	total = (long) (mem->res.usage >> PAGE_SHIFT) + 1L;
	rss = (long)mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS);
	return (int)((rss * 100L) / total);
}
396

397 398 399 400 401 402 403 404 405 406 407 408 409 410
/*
 * This function is called from vmscan.c. In page reclaiming loop. balance
 * between active and inactive list is calculated. For memory controller
 * page reclaiming, we should use using mem_cgroup's imbalance rather than
 * zone's global lru imbalance.
 */
long mem_cgroup_reclaim_imbalance(struct mem_cgroup *mem)
{
	unsigned long active, inactive;
	/* active and inactive are the number of pages. 'long' is ok.*/
	active = mem_cgroup_get_all_zonestat(mem, MEM_CGROUP_ZSTAT_ACTIVE);
	inactive = mem_cgroup_get_all_zonestat(mem, MEM_CGROUP_ZSTAT_INACTIVE);
	return (long) (active / (inactive + 1));
}
411

412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430
/*
 * prev_priority control...this will be used in memory reclaim path.
 */
int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem)
{
	return mem->prev_priority;
}

void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem, int priority)
{
	if (priority < mem->prev_priority)
		mem->prev_priority = priority;
}

void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, int priority)
{
	mem->prev_priority = priority;
}

431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462
/*
 * Calculate # of pages to be scanned in this priority/zone.
 * See also vmscan.c
 *
 * priority starts from "DEF_PRIORITY" and decremented in each loop.
 * (see include/linux/mmzone.h)
 */

long mem_cgroup_calc_reclaim_active(struct mem_cgroup *mem,
				   struct zone *zone, int priority)
{
	long nr_active;
	int nid = zone->zone_pgdat->node_id;
	int zid = zone_idx(zone);
	struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(mem, nid, zid);

	nr_active = MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE);
	return (nr_active >> priority);
}

long mem_cgroup_calc_reclaim_inactive(struct mem_cgroup *mem,
					struct zone *zone, int priority)
{
	long nr_inactive;
	int nid = zone->zone_pgdat->node_id;
	int zid = zone_idx(zone);
	struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(mem, nid, zid);

	nr_inactive = MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE);
	return (nr_inactive >> priority);
}

463 464 465 466 467 468 469 470 471 472 473 474
unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
					struct list_head *dst,
					unsigned long *scanned, int order,
					int mode, struct zone *z,
					struct mem_cgroup *mem_cont,
					int active)
{
	unsigned long nr_taken = 0;
	struct page *page;
	unsigned long scan;
	LIST_HEAD(pc_list);
	struct list_head *src;
475
	struct page_cgroup *pc, *tmp;
476 477 478
	int nid = z->zone_pgdat->node_id;
	int zid = zone_idx(z);
	struct mem_cgroup_per_zone *mz;
479

480
	BUG_ON(!mem_cont);
481
	mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
482
	if (active)
483
		src = &mz->active_list;
484
	else
485 486
		src = &mz->inactive_list;

487

488
	spin_lock(&mz->lru_lock);
489 490
	scan = 0;
	list_for_each_entry_safe_reverse(pc, tmp, src, lru) {
H
Hugh Dickins 已提交
491
		if (scan >= nr_to_scan)
492
			break;
493 494
		page = pc->page;

H
Hugh Dickins 已提交
495
		if (unlikely(!PageLRU(page)))
496 497
			continue;

498 499 500 501 502 503 504 505 506
		if (PageActive(page) && !active) {
			__mem_cgroup_move_lists(pc, true);
			continue;
		}
		if (!PageActive(page) && active) {
			__mem_cgroup_move_lists(pc, false);
			continue;
		}

H
Hugh Dickins 已提交
507 508
		scan++;
		list_move(&pc->lru, &pc_list);
509 510 511 512 513 514 515 516

		if (__isolate_lru_page(page, mode) == 0) {
			list_move(&page->lru, dst);
			nr_taken++;
		}
	}

	list_splice(&pc_list, src);
517
	spin_unlock(&mz->lru_lock);
518 519 520 521 522

	*scanned = scan;
	return nr_taken;
}

523 524 525 526 527 528
/*
 * Charge the memory controller for page usage.
 * Return
 * 0 if the charge was successful
 * < 0 if the cgroup is over its limit
 */
529
static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
530 531
				gfp_t gfp_mask, enum charge_type ctype,
				struct mem_cgroup *memcg)
532 533
{
	struct mem_cgroup *mem;
534
	struct page_cgroup *pc;
535 536
	unsigned long flags;
	unsigned long nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
537
	struct mem_cgroup_per_zone *mz;
538

539
	pc = kmem_cache_alloc(page_cgroup_cache, gfp_mask);
K
KAMEZAWA Hiroyuki 已提交
540
	if (unlikely(pc == NULL))
541 542 543
		goto err;

	/*
544 545
	 * We always charge the cgroup the mm_struct belongs to.
	 * The mm_struct's mem_cgroup changes on task migration if the
546 547 548
	 * thread group leader migrates. It's possible that mm is not
	 * set, if so charge the init_mm (happens for pagecache usage).
	 */
549
	if (likely(!memcg)) {
550 551 552 553 554 555 556 557 558 559 560
		rcu_read_lock();
		mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
		/*
		 * For every charge from the cgroup, increment reference count
		 */
		css_get(&mem->css);
		rcu_read_unlock();
	} else {
		mem = memcg;
		css_get(&memcg->css);
	}
561

562
	while (res_counter_charge(&mem->res, PAGE_SIZE)) {
563 564
		if (!(gfp_mask & __GFP_WAIT))
			goto out;
565 566

		if (try_to_free_mem_cgroup_pages(mem, gfp_mask))
567 568 569
			continue;

		/*
570 571 572 573 574 575
		 * try_to_free_mem_cgroup_pages() might not give us a full
		 * picture of reclaim. Some pages are reclaimed and might be
		 * moved to swap cache or just unmapped from the cgroup.
		 * Check the limit again to see if the reclaim reduced the
		 * current usage of the cgroup before giving up
		 */
576 577
		if (res_counter_check_under_limit(&mem->res))
			continue;
578 579 580 581

		if (!nr_retries--) {
			mem_cgroup_out_of_memory(mem, gfp_mask);
			goto out;
582
		}
583 584 585 586
	}

	pc->mem_cgroup = mem;
	pc->page = page;
587 588 589 590
	/*
	 * If a page is accounted as a page cache, insert to inactive list.
	 * If anon, insert to active list.
	 */
591
	if (ctype == MEM_CGROUP_CHARGE_TYPE_CACHE)
592
		pc->flags = PAGE_CGROUP_FLAG_CACHE;
593 594
	else
		pc->flags = PAGE_CGROUP_FLAG_ACTIVE;
595

596
	lock_page_cgroup(page);
K
KAMEZAWA Hiroyuki 已提交
597
	if (unlikely(page_get_page_cgroup(page))) {
598
		unlock_page_cgroup(page);
599 600
		res_counter_uncharge(&mem->res, PAGE_SIZE);
		css_put(&mem->css);
601
		kmem_cache_free(page_cgroup_cache, pc);
602
		goto done;
603
	}
604
	page_assign_page_cgroup(page, pc);
605

606 607
	mz = page_cgroup_zoneinfo(pc);
	spin_lock_irqsave(&mz->lru_lock, flags);
608
	__mem_cgroup_add_list(mz, pc);
609
	spin_unlock_irqrestore(&mz->lru_lock, flags);
610

H
Hugh Dickins 已提交
611
	unlock_page_cgroup(page);
612 613
done:
	return 0;
614 615
out:
	css_put(&mem->css);
616
	kmem_cache_free(page_cgroup_cache, pc);
617 618 619 620
err:
	return -ENOMEM;
}

621
int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask)
622
{
623 624 625
	if (mem_cgroup_subsys.disabled)
		return 0;

626 627 628 629 630 631 632 633 634 635 636
	/*
	 * If already mapped, we don't have to account.
	 * If page cache, page->mapping has address_space.
	 * But page->mapping may have out-of-use anon_vma pointer,
	 * detecit it by PageAnon() check. newly-mapped-anon's page->mapping
	 * is NULL.
  	 */
	if (page_mapped(page) || (page->mapping && !PageAnon(page)))
		return 0;
	if (unlikely(!mm))
		mm = &init_mm;
637
	return mem_cgroup_charge_common(page, mm, gfp_mask,
638
				MEM_CGROUP_CHARGE_TYPE_MAPPED, NULL);
639 640
}

641 642
int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
				gfp_t gfp_mask)
643
{
644 645 646
	if (mem_cgroup_subsys.disabled)
		return 0;

647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669
	/*
	 * Corner case handling. This is called from add_to_page_cache()
	 * in usual. But some FS (shmem) precharges this page before calling it
	 * and call add_to_page_cache() with GFP_NOWAIT.
	 *
	 * For GFP_NOWAIT case, the page may be pre-charged before calling
	 * add_to_page_cache(). (See shmem.c) check it here and avoid to call
	 * charge twice. (It works but has to pay a bit larger cost.)
	 */
	if (!(gfp_mask & __GFP_WAIT)) {
		struct page_cgroup *pc;

		lock_page_cgroup(page);
		pc = page_get_page_cgroup(page);
		if (pc) {
			VM_BUG_ON(pc->page != page);
			VM_BUG_ON(!pc->mem_cgroup);
			unlock_page_cgroup(page);
			return 0;
		}
		unlock_page_cgroup(page);
	}

670
	if (unlikely(!mm))
671
		mm = &init_mm;
672

673
	return mem_cgroup_charge_common(page, mm, gfp_mask,
674 675 676
				MEM_CGROUP_CHARGE_TYPE_CACHE, NULL);
}

677
/*
678
 * uncharge if !page_mapped(page)
679
 */
680 681
static void
__mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
682
{
H
Hugh Dickins 已提交
683
	struct page_cgroup *pc;
684
	struct mem_cgroup *mem;
685
	struct mem_cgroup_per_zone *mz;
686
	unsigned long flags;
687

688 689 690
	if (mem_cgroup_subsys.disabled)
		return;

691
	/*
692
	 * Check if our page_cgroup is valid
693
	 */
H
Hugh Dickins 已提交
694 695
	lock_page_cgroup(page);
	pc = page_get_page_cgroup(page);
K
KAMEZAWA Hiroyuki 已提交
696
	if (unlikely(!pc))
H
Hugh Dickins 已提交
697
		goto unlock;
698

699 700
	VM_BUG_ON(pc->page != page);

701 702 703 704
	if ((ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED)
	    && ((pc->flags & PAGE_CGROUP_FLAG_CACHE)
		|| page_mapped(page)))
		goto unlock;
705

706 707 708 709
	mz = page_cgroup_zoneinfo(pc);
	spin_lock_irqsave(&mz->lru_lock, flags);
	__mem_cgroup_remove_list(mz, pc);
	spin_unlock_irqrestore(&mz->lru_lock, flags);
H
Hugh Dickins 已提交
710

711 712
	page_assign_page_cgroup(page, NULL);
	unlock_page_cgroup(page);
H
Hugh Dickins 已提交
713

714 715 716
	mem = pc->mem_cgroup;
	res_counter_uncharge(&mem->res, PAGE_SIZE);
	css_put(&mem->css);
717

718 719
	kmem_cache_free(page_cgroup_cache, pc);
	return;
H
Hugh Dickins 已提交
720
unlock:
721 722 723
	unlock_page_cgroup(page);
}

724 725 726 727 728 729 730 731 732 733 734
void mem_cgroup_uncharge_page(struct page *page)
{
	__mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_MAPPED);
}

void mem_cgroup_uncharge_cache_page(struct page *page)
{
	VM_BUG_ON(page_mapped(page));
	__mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE);
}

735
/*
736
 * Before starting migration, account against new page.
737
 */
738
int mem_cgroup_prepare_migration(struct page *page, struct page *newpage)
739 740
{
	struct page_cgroup *pc;
741 742 743
	struct mem_cgroup *mem = NULL;
	enum charge_type ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED;
	int ret = 0;
744

745 746 747
	if (mem_cgroup_subsys.disabled)
		return 0;

748 749
	lock_page_cgroup(page);
	pc = page_get_page_cgroup(page);
750 751 752 753 754 755
	if (pc) {
		mem = pc->mem_cgroup;
		css_get(&mem->css);
		if (pc->flags & PAGE_CGROUP_FLAG_CACHE)
			ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
	}
756
	unlock_page_cgroup(page);
757 758 759 760 761 762
	if (mem) {
		ret = mem_cgroup_charge_common(newpage, NULL, GFP_KERNEL,
			ctype, mem);
		css_put(&mem->css);
	}
	return ret;
763
}
764

765
/* remove redundant charge if migration failed*/
766
void mem_cgroup_end_migration(struct page *newpage)
767
{
768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783
	/*
	 * At success, page->mapping is not NULL.
	 * special rollback care is necessary when
	 * 1. at migration failure. (newpage->mapping is cleared in this case)
	 * 2. the newpage was moved but not remapped again because the task
	 *    exits and the newpage is obsolete. In this case, the new page
	 *    may be a swapcache. So, we just call mem_cgroup_uncharge_page()
	 *    always for avoiding mess. The  page_cgroup will be removed if
	 *    unnecessary. File cache pages is still on radix-tree. Don't
	 *    care it.
	 */
	if (!newpage->mapping)
		__mem_cgroup_uncharge_common(newpage,
					 MEM_CGROUP_CHARGE_TYPE_FORCE);
	else if (PageAnon(newpage))
		mem_cgroup_uncharge_page(newpage);
784
}
785

786 787 788 789 790 791 792 793 794 795 796
/*
 * A call to try to shrink memory usage under specified resource controller.
 * This is typically used for page reclaiming for shmem for reducing side
 * effect of page allocation from shmem, which is used by some mem_cgroup.
 */
int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask)
{
	struct mem_cgroup *mem;
	int progress = 0;
	int retry = MEM_CGROUP_RECLAIM_RETRIES;

797 798
	if (mem_cgroup_subsys.disabled)
		return 0;
799 800
	if (!mm)
		return 0;
801

802 803 804 805 806 807 808 809 810 811 812 813 814 815 816
	rcu_read_lock();
	mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
	css_get(&mem->css);
	rcu_read_unlock();

	do {
		progress = try_to_free_mem_cgroup_pages(mem, gfp_mask);
	} while (!progress && --retry);

	css_put(&mem->css);
	if (!retry)
		return -ENOMEM;
	return 0;
}

817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840
int mem_cgroup_resize_limit(struct mem_cgroup *memcg, unsigned long long val)
{

	int retry_count = MEM_CGROUP_RECLAIM_RETRIES;
	int progress;
	int ret = 0;

	while (res_counter_set_limit(&memcg->res, val)) {
		if (signal_pending(current)) {
			ret = -EINTR;
			break;
		}
		if (!retry_count) {
			ret = -EBUSY;
			break;
		}
		progress = try_to_free_mem_cgroup_pages(memcg, GFP_KERNEL);
		if (!progress)
			retry_count--;
	}
	return ret;
}


841 842 843 844 845
/*
 * This routine traverse page_cgroup in given list and drop them all.
 * *And* this routine doesn't reclaim page itself, just removes page_cgroup.
 */
#define FORCE_UNCHARGE_BATCH	(128)
846
static void mem_cgroup_force_empty_list(struct mem_cgroup *mem,
847 848
			    struct mem_cgroup_per_zone *mz,
			    int active)
849 850 851
{
	struct page_cgroup *pc;
	struct page *page;
852
	int count = FORCE_UNCHARGE_BATCH;
853
	unsigned long flags;
854 855 856 857 858 859
	struct list_head *list;

	if (active)
		list = &mz->active_list;
	else
		list = &mz->inactive_list;
860

861
	spin_lock_irqsave(&mz->lru_lock, flags);
862
	while (!list_empty(list)) {
863 864
		pc = list_entry(list->prev, struct page_cgroup, lru);
		page = pc->page;
865 866
		get_page(page);
		spin_unlock_irqrestore(&mz->lru_lock, flags);
867 868 869 870 871
		/*
		 * Check if this page is on LRU. !LRU page can be found
		 * if it's under page migration.
		 */
		if (PageLRU(page)) {
872 873
			__mem_cgroup_uncharge_common(page,
					MEM_CGROUP_CHARGE_TYPE_FORCE);
874 875 876 877 878 879
			put_page(page);
			if (--count <= 0) {
				count = FORCE_UNCHARGE_BATCH;
				cond_resched();
			}
		} else
880 881
			cond_resched();
		spin_lock_irqsave(&mz->lru_lock, flags);
882
	}
883
	spin_unlock_irqrestore(&mz->lru_lock, flags);
884 885 886 887 888 889
}

/*
 * make mem_cgroup's charge to be 0 if there is no task.
 * This enables deleting this mem_cgroup.
 */
890
static int mem_cgroup_force_empty(struct mem_cgroup *mem)
891 892
{
	int ret = -EBUSY;
893
	int node, zid;
894

895 896 897
	css_get(&mem->css);
	/*
	 * page reclaim code (kswapd etc..) will move pages between
898
	 * active_list <-> inactive_list while we don't take a lock.
899 900
	 * So, we have to do loop here until all lists are empty.
	 */
901
	while (mem->res.usage > 0) {
902 903
		if (atomic_read(&mem->css.cgroup->count) > 0)
			goto out;
904 905 906 907 908
		for_each_node_state(node, N_POSSIBLE)
			for (zid = 0; zid < MAX_NR_ZONES; zid++) {
				struct mem_cgroup_per_zone *mz;
				mz = mem_cgroup_zoneinfo(mem, node, zid);
				/* drop all page_cgroup in active_list */
909
				mem_cgroup_force_empty_list(mem, mz, 1);
910
				/* drop all page_cgroup in inactive_list */
911
				mem_cgroup_force_empty_list(mem, mz, 0);
912
			}
913 914 915 916 917 918 919
	}
	ret = 0;
out:
	css_put(&mem->css);
	return ret;
}

920
static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft)
B
Balbir Singh 已提交
921
{
922 923
	return res_counter_read_u64(&mem_cgroup_from_cont(cont)->res,
				    cft->private);
B
Balbir Singh 已提交
924
}
925 926 927 928
/*
 * The user of this function is...
 * RES_LIMIT.
 */
929 930
static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
			    const char *buffer)
B
Balbir Singh 已提交
931
{
932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947
	struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
	unsigned long long val;
	int ret;

	switch (cft->private) {
	case RES_LIMIT:
		/* This function does all necessary parse...reuse it */
		ret = res_counter_memparse_write_strategy(buffer, &val);
		if (!ret)
			ret = mem_cgroup_resize_limit(memcg, val);
		break;
	default:
		ret = -EINVAL; /* should be BUG() ? */
		break;
	}
	return ret;
B
Balbir Singh 已提交
948 949
}

950
static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
951 952 953 954
{
	struct mem_cgroup *mem;

	mem = mem_cgroup_from_cont(cont);
955 956 957 958 959 960 961 962
	switch (event) {
	case RES_MAX_USAGE:
		res_counter_reset_max(&mem->res);
		break;
	case RES_FAILCNT:
		res_counter_reset_failcnt(&mem->res);
		break;
	}
963
	return 0;
964 965
}

966
static int mem_force_empty_write(struct cgroup *cont, unsigned int event)
967
{
968
	return mem_cgroup_force_empty(mem_cgroup_from_cont(cont));
969 970
}

971 972 973 974 975 976
static const struct mem_cgroup_stat_desc {
	const char *msg;
	u64 unit;
} mem_cgroup_stat_desc[] = {
	[MEM_CGROUP_STAT_CACHE] = { "cache", PAGE_SIZE, },
	[MEM_CGROUP_STAT_RSS] = { "rss", PAGE_SIZE, },
977 978
	[MEM_CGROUP_STAT_PGPGIN_COUNT] = {"pgpgin", 1, },
	[MEM_CGROUP_STAT_PGPGOUT_COUNT] = {"pgpgout", 1, },
979 980
};

981 982
static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
				 struct cgroup_map_cb *cb)
983 984 985 986 987 988 989 990 991 992
{
	struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont);
	struct mem_cgroup_stat *stat = &mem_cont->stat;
	int i;

	for (i = 0; i < ARRAY_SIZE(stat->cpustat[0].count); i++) {
		s64 val;

		val = mem_cgroup_read_stat(stat, i);
		val *= mem_cgroup_stat_desc[i].unit;
993
		cb->fill(cb, mem_cgroup_stat_desc[i].msg, val);
994
	}
995 996 997 998 999 1000 1001 1002
	/* showing # of active pages */
	{
		unsigned long active, inactive;

		inactive = mem_cgroup_get_all_zonestat(mem_cont,
						MEM_CGROUP_ZSTAT_INACTIVE);
		active = mem_cgroup_get_all_zonestat(mem_cont,
						MEM_CGROUP_ZSTAT_ACTIVE);
1003 1004
		cb->fill(cb, "active", (active) * PAGE_SIZE);
		cb->fill(cb, "inactive", (inactive) * PAGE_SIZE);
1005
	}
1006 1007 1008
	return 0;
}

B
Balbir Singh 已提交
1009 1010
static struct cftype mem_cgroup_files[] = {
	{
1011
		.name = "usage_in_bytes",
B
Balbir Singh 已提交
1012
		.private = RES_USAGE,
1013
		.read_u64 = mem_cgroup_read,
B
Balbir Singh 已提交
1014
	},
1015 1016 1017
	{
		.name = "max_usage_in_bytes",
		.private = RES_MAX_USAGE,
1018
		.trigger = mem_cgroup_reset,
1019 1020
		.read_u64 = mem_cgroup_read,
	},
B
Balbir Singh 已提交
1021
	{
1022
		.name = "limit_in_bytes",
B
Balbir Singh 已提交
1023
		.private = RES_LIMIT,
1024
		.write_string = mem_cgroup_write,
1025
		.read_u64 = mem_cgroup_read,
B
Balbir Singh 已提交
1026 1027 1028 1029
	},
	{
		.name = "failcnt",
		.private = RES_FAILCNT,
1030
		.trigger = mem_cgroup_reset,
1031
		.read_u64 = mem_cgroup_read,
B
Balbir Singh 已提交
1032
	},
1033 1034
	{
		.name = "force_empty",
1035
		.trigger = mem_force_empty_write,
1036
	},
1037 1038
	{
		.name = "stat",
1039
		.read_map = mem_control_stat_show,
1040
	},
B
Balbir Singh 已提交
1041 1042
};

1043 1044 1045
static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
{
	struct mem_cgroup_per_node *pn;
1046
	struct mem_cgroup_per_zone *mz;
1047
	int zone, tmp = node;
1048 1049 1050 1051 1052 1053 1054 1055
	/*
	 * This routine is called against possible nodes.
	 * But it's BUG to call kmalloc() against offline node.
	 *
	 * TODO: this routine can waste much memory for nodes which will
	 *       never be onlined. It's better to use memory hotplug callback
	 *       function.
	 */
1056 1057 1058
	if (!node_state(node, N_NORMAL_MEMORY))
		tmp = -1;
	pn = kmalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
1059 1060
	if (!pn)
		return 1;
1061

1062 1063
	mem->info.nodeinfo[node] = pn;
	memset(pn, 0, sizeof(*pn));
1064 1065 1066 1067 1068

	for (zone = 0; zone < MAX_NR_ZONES; zone++) {
		mz = &pn->zoneinfo[zone];
		INIT_LIST_HEAD(&mz->active_list);
		INIT_LIST_HEAD(&mz->inactive_list);
1069
		spin_lock_init(&mz->lru_lock);
1070
	}
1071 1072 1073
	return 0;
}

1074 1075 1076 1077 1078
static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
{
	kfree(mem->info.nodeinfo[node]);
}

1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101
static struct mem_cgroup *mem_cgroup_alloc(void)
{
	struct mem_cgroup *mem;

	if (sizeof(*mem) < PAGE_SIZE)
		mem = kmalloc(sizeof(*mem), GFP_KERNEL);
	else
		mem = vmalloc(sizeof(*mem));

	if (mem)
		memset(mem, 0, sizeof(*mem));
	return mem;
}

static void mem_cgroup_free(struct mem_cgroup *mem)
{
	if (sizeof(*mem) < PAGE_SIZE)
		kfree(mem);
	else
		vfree(mem);
}


B
Balbir Singh 已提交
1102 1103 1104 1105
static struct cgroup_subsys_state *
mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
{
	struct mem_cgroup *mem;
1106
	int node;
B
Balbir Singh 已提交
1107

1108
	if (unlikely((cont->parent) == NULL)) {
1109
		mem = &init_mem_cgroup;
1110 1111
		page_cgroup_cache = KMEM_CACHE(page_cgroup, SLAB_PANIC);
	} else {
1112 1113 1114
		mem = mem_cgroup_alloc();
		if (!mem)
			return ERR_PTR(-ENOMEM);
1115
	}
1116

B
Balbir Singh 已提交
1117
	res_counter_init(&mem->res);
1118

1119 1120 1121 1122
	for_each_node_state(node, N_POSSIBLE)
		if (alloc_mem_cgroup_per_zone_info(mem, node))
			goto free_out;

B
Balbir Singh 已提交
1123
	return &mem->css;
1124 1125
free_out:
	for_each_node_state(node, N_POSSIBLE)
1126
		free_mem_cgroup_per_zone_info(mem, node);
1127
	if (cont->parent != NULL)
1128
		mem_cgroup_free(mem);
1129
	return ERR_PTR(-ENOMEM);
B
Balbir Singh 已提交
1130 1131
}

1132 1133 1134 1135 1136 1137 1138
static void mem_cgroup_pre_destroy(struct cgroup_subsys *ss,
					struct cgroup *cont)
{
	struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
	mem_cgroup_force_empty(mem);
}

B
Balbir Singh 已提交
1139 1140 1141
static void mem_cgroup_destroy(struct cgroup_subsys *ss,
				struct cgroup *cont)
{
1142 1143 1144 1145
	int node;
	struct mem_cgroup *mem = mem_cgroup_from_cont(cont);

	for_each_node_state(node, N_POSSIBLE)
1146
		free_mem_cgroup_per_zone_info(mem, node);
1147

1148
	mem_cgroup_free(mem_cgroup_from_cont(cont));
B
Balbir Singh 已提交
1149 1150 1151 1152 1153 1154 1155 1156 1157
}

static int mem_cgroup_populate(struct cgroup_subsys *ss,
				struct cgroup *cont)
{
	return cgroup_add_files(cont, ss, mem_cgroup_files,
					ARRAY_SIZE(mem_cgroup_files));
}

B
Balbir Singh 已提交
1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176
static void mem_cgroup_move_task(struct cgroup_subsys *ss,
				struct cgroup *cont,
				struct cgroup *old_cont,
				struct task_struct *p)
{
	struct mm_struct *mm;
	struct mem_cgroup *mem, *old_mem;

	mm = get_task_mm(p);
	if (mm == NULL)
		return;

	mem = mem_cgroup_from_cont(cont);
	old_mem = mem_cgroup_from_cont(old_cont);

	/*
	 * Only thread group leaders are allowed to migrate, the mm_struct is
	 * in effect owned by the leader
	 */
1177
	if (!thread_group_leader(p))
B
Balbir Singh 已提交
1178 1179 1180 1181 1182 1183
		goto out;

out:
	mmput(mm);
}

B
Balbir Singh 已提交
1184 1185 1186 1187
struct cgroup_subsys mem_cgroup_subsys = {
	.name = "memory",
	.subsys_id = mem_cgroup_subsys_id,
	.create = mem_cgroup_create,
1188
	.pre_destroy = mem_cgroup_pre_destroy,
B
Balbir Singh 已提交
1189 1190
	.destroy = mem_cgroup_destroy,
	.populate = mem_cgroup_populate,
B
Balbir Singh 已提交
1191
	.attach = mem_cgroup_move_task,
1192
	.early_init = 0,
B
Balbir Singh 已提交
1193
};