提交 7a81b88c 编写于 作者: K KAMEZAWA Hiroyuki 提交者: Linus Torvalds

memcg: introduce charge-commit-cancel style of functions

There is a small race in do_swap_page().  When the page swapped-in is
charged, the mapcount can be greater than 0.  But, at the same time some
process (shares it ) call unmap and make mapcount 1->0 and the page is
uncharged.

      CPUA 			CPUB
       mapcount == 1.
   (1) charge if mapcount==0     zap_pte_range()
                                (2) mapcount 1 => 0.
			        (3) uncharge(). (success)
   (4) set page's rmap()
       mapcount 0=>1

Then, this swap page's account is leaked.

For fixing this, I added a new interface.
  - charge
   account to res_counter by PAGE_SIZE and try to free pages if necessary.
  - commit
   register page_cgroup and add to LRU if necessary.
  - cancel
   uncharge PAGE_SIZE because of do_swap_page failure.

     CPUA
  (1) charge (always)
  (2) set page's rmap (mapcount > 0)
  (3) commit charge was necessary or not after set_pte().

This protocol uses PCG_USED bit on page_cgroup for avoiding over accounting.
Usual mem_cgroup_charge_common() does charge -> commit at a time.

And this patch also adds following function to clarify all charges.

  - mem_cgroup_newpage_charge() ....replacement for mem_cgroup_charge()
	called against newly allocated anon pages.

  - mem_cgroup_charge_migrate_fixup()
        called only from remove_migration_ptes().
	we'll have to rewrite this later.(this patch just keeps old behavior)
	This function will be removed by additional patch to make migration
	clearer.

Good for clarifying "what we do"

Then, we have 4 following charge points.
  - newpage
  - swap-in
  - add-to-cache.
  - migration.

[akpm@linux-foundation.org: add missing inline directives to stubs]
Signed-off-by: NKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Reviewed-by: NDaisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Cc: Balbir Singh <balbir@in.ibm.com>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 0b82ac37
...@@ -27,8 +27,17 @@ struct mm_struct; ...@@ -27,8 +27,17 @@ struct mm_struct;
#ifdef CONFIG_CGROUP_MEM_RES_CTLR #ifdef CONFIG_CGROUP_MEM_RES_CTLR
extern int mem_cgroup_charge(struct page *page, struct mm_struct *mm, extern int mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm,
gfp_t gfp_mask); gfp_t gfp_mask);
extern int mem_cgroup_charge_migrate_fixup(struct page *page,
struct mm_struct *mm, gfp_t gfp_mask);
/* for swap handling */
extern int mem_cgroup_try_charge(struct mm_struct *mm,
gfp_t gfp_mask, struct mem_cgroup **ptr);
extern void mem_cgroup_commit_charge_swapin(struct page *page,
struct mem_cgroup *ptr);
extern void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *ptr);
extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
gfp_t gfp_mask); gfp_t gfp_mask);
extern void mem_cgroup_move_lists(struct page *page, enum lru_list lru); extern void mem_cgroup_move_lists(struct page *page, enum lru_list lru);
...@@ -71,7 +80,9 @@ extern long mem_cgroup_calc_reclaim(struct mem_cgroup *mem, struct zone *zone, ...@@ -71,7 +80,9 @@ extern long mem_cgroup_calc_reclaim(struct mem_cgroup *mem, struct zone *zone,
#else /* CONFIG_CGROUP_MEM_RES_CTLR */ #else /* CONFIG_CGROUP_MEM_RES_CTLR */
static inline int mem_cgroup_charge(struct page *page, struct mem_cgroup;
static inline int mem_cgroup_newpage_charge(struct page *page,
struct mm_struct *mm, gfp_t gfp_mask) struct mm_struct *mm, gfp_t gfp_mask)
{ {
return 0; return 0;
...@@ -83,6 +94,27 @@ static inline int mem_cgroup_cache_charge(struct page *page, ...@@ -83,6 +94,27 @@ static inline int mem_cgroup_cache_charge(struct page *page,
return 0; return 0;
} }
static inline int mem_cgroup_charge_migrate_fixup(struct page *page,
struct mm_struct *mm, gfp_t gfp_mask)
{
return 0;
}
static inline int mem_cgroup_try_charge(struct mm_struct *mm,
gfp_t gfp_mask, struct mem_cgroup **ptr)
{
return 0;
}
static inline void mem_cgroup_commit_charge_swapin(struct page *page,
struct mem_cgroup *ptr)
{
}
static inline void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *ptr)
{
}
static inline void mem_cgroup_uncharge_page(struct page *page) static inline void mem_cgroup_uncharge_page(struct page *page)
{ {
} }
......
...@@ -467,35 +467,31 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, ...@@ -467,35 +467,31 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
return nr_taken; return nr_taken;
} }
/*
* Charge the memory controller for page usage. /**
* Return * mem_cgroup_try_charge - get charge of PAGE_SIZE.
* 0 if the charge was successful * @mm: an mm_struct which is charged against. (when *memcg is NULL)
* < 0 if the cgroup is over its limit * @gfp_mask: gfp_mask for reclaim.
* @memcg: a pointer to memory cgroup which is charged against.
*
* charge against memory cgroup pointed by *memcg. if *memcg == NULL, estimated
* memory cgroup from @mm is got and stored in *memcg.
*
* Returns 0 if success. -ENOMEM at failure.
*/ */
static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
gfp_t gfp_mask, enum charge_type ctype, int mem_cgroup_try_charge(struct mm_struct *mm,
struct mem_cgroup *memcg) gfp_t gfp_mask, struct mem_cgroup **memcg)
{ {
struct mem_cgroup *mem; struct mem_cgroup *mem;
struct page_cgroup *pc; int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
unsigned long nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
struct mem_cgroup_per_zone *mz;
unsigned long flags;
pc = lookup_page_cgroup(page);
/* can happen at boot */
if (unlikely(!pc))
return 0;
prefetchw(pc);
/* /*
* We always charge the cgroup the mm_struct belongs to. * We always charge the cgroup the mm_struct belongs to.
* The mm_struct's mem_cgroup changes on task migration if the * The mm_struct's mem_cgroup changes on task migration if the
* thread group leader migrates. It's possible that mm is not * thread group leader migrates. It's possible that mm is not
* set, if so charge the init_mm (happens for pagecache usage). * set, if so charge the init_mm (happens for pagecache usage).
*/ */
if (likely(!*memcg)) {
if (likely(!memcg)) {
rcu_read_lock(); rcu_read_lock();
mem = mem_cgroup_from_task(rcu_dereference(mm->owner)); mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
if (unlikely(!mem)) { if (unlikely(!mem)) {
...@@ -506,15 +502,17 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm, ...@@ -506,15 +502,17 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
* For every charge from the cgroup, increment reference count * For every charge from the cgroup, increment reference count
*/ */
css_get(&mem->css); css_get(&mem->css);
*memcg = mem;
rcu_read_unlock(); rcu_read_unlock();
} else { } else {
mem = memcg; mem = *memcg;
css_get(&memcg->css); css_get(&mem->css);
} }
while (unlikely(res_counter_charge(&mem->res, PAGE_SIZE))) { while (unlikely(res_counter_charge(&mem->res, PAGE_SIZE))) {
if (!(gfp_mask & __GFP_WAIT)) if (!(gfp_mask & __GFP_WAIT))
goto out; goto nomem;
if (try_to_free_mem_cgroup_pages(mem, gfp_mask)) if (try_to_free_mem_cgroup_pages(mem, gfp_mask))
continue; continue;
...@@ -531,18 +529,37 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm, ...@@ -531,18 +529,37 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
if (!nr_retries--) { if (!nr_retries--) {
mem_cgroup_out_of_memory(mem, gfp_mask); mem_cgroup_out_of_memory(mem, gfp_mask);
goto out; goto nomem;
} }
} }
return 0;
nomem:
css_put(&mem->css);
return -ENOMEM;
}
/*
* commit a charge got by mem_cgroup_try_charge() and makes page_cgroup to be
* USED state. If already USED, uncharge and return.
*/
static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
struct page_cgroup *pc,
enum charge_type ctype)
{
struct mem_cgroup_per_zone *mz;
unsigned long flags;
/* try_charge() can return NULL to *memcg, taking care of it. */
if (!mem)
return;
lock_page_cgroup(pc); lock_page_cgroup(pc);
if (unlikely(PageCgroupUsed(pc))) { if (unlikely(PageCgroupUsed(pc))) {
unlock_page_cgroup(pc); unlock_page_cgroup(pc);
res_counter_uncharge(&mem->res, PAGE_SIZE); res_counter_uncharge(&mem->res, PAGE_SIZE);
css_put(&mem->css); css_put(&mem->css);
return;
goto done;
} }
pc->mem_cgroup = mem; pc->mem_cgroup = mem;
/* /*
...@@ -557,15 +574,39 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm, ...@@ -557,15 +574,39 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
__mem_cgroup_add_list(mz, pc); __mem_cgroup_add_list(mz, pc);
spin_unlock_irqrestore(&mz->lru_lock, flags); spin_unlock_irqrestore(&mz->lru_lock, flags);
unlock_page_cgroup(pc); unlock_page_cgroup(pc);
}
done: /*
* Charge the memory controller for page usage.
* Return
* 0 if the charge was successful
* < 0 if the cgroup is over its limit
*/
static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
gfp_t gfp_mask, enum charge_type ctype,
struct mem_cgroup *memcg)
{
struct mem_cgroup *mem;
struct page_cgroup *pc;
int ret;
pc = lookup_page_cgroup(page);
/* can happen at boot */
if (unlikely(!pc))
return 0;
prefetchw(pc);
mem = memcg;
ret = mem_cgroup_try_charge(mm, gfp_mask, &mem);
if (ret)
return ret;
__mem_cgroup_commit_charge(mem, pc, ctype);
return 0; return 0;
out:
css_put(&mem->css);
return -ENOMEM;
} }
int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask) int mem_cgroup_newpage_charge(struct page *page,
struct mm_struct *mm, gfp_t gfp_mask)
{ {
if (mem_cgroup_subsys.disabled) if (mem_cgroup_subsys.disabled)
return 0; return 0;
...@@ -586,6 +627,34 @@ int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask) ...@@ -586,6 +627,34 @@ int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask)
MEM_CGROUP_CHARGE_TYPE_MAPPED, NULL); MEM_CGROUP_CHARGE_TYPE_MAPPED, NULL);
} }
/*
* same as mem_cgroup_newpage_charge(), now.
* But what we assume is different from newpage, and this is special case.
* treat this in special function. easy for maintenance.
*/
int mem_cgroup_charge_migrate_fixup(struct page *page,
struct mm_struct *mm, gfp_t gfp_mask)
{
if (mem_cgroup_subsys.disabled)
return 0;
if (PageCompound(page))
return 0;
if (page_mapped(page) || (page->mapping && !PageAnon(page)))
return 0;
if (unlikely(!mm))
mm = &init_mm;
return mem_cgroup_charge_common(page, mm, gfp_mask,
MEM_CGROUP_CHARGE_TYPE_MAPPED, NULL);
}
int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
gfp_t gfp_mask) gfp_t gfp_mask)
{ {
...@@ -628,6 +697,30 @@ int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, ...@@ -628,6 +697,30 @@ int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
MEM_CGROUP_CHARGE_TYPE_SHMEM, NULL); MEM_CGROUP_CHARGE_TYPE_SHMEM, NULL);
} }
void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
{
struct page_cgroup *pc;
if (mem_cgroup_subsys.disabled)
return;
if (!ptr)
return;
pc = lookup_page_cgroup(page);
__mem_cgroup_commit_charge(ptr, pc, MEM_CGROUP_CHARGE_TYPE_MAPPED);
}
void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem)
{
if (mem_cgroup_subsys.disabled)
return;
if (!mem)
return;
res_counter_uncharge(&mem->res, PAGE_SIZE);
css_put(&mem->css);
}
/* /*
* uncharge if !page_mapped(page) * uncharge if !page_mapped(page)
*/ */
......
...@@ -2000,7 +2000,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -2000,7 +2000,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
cow_user_page(new_page, old_page, address, vma); cow_user_page(new_page, old_page, address, vma);
__SetPageUptodate(new_page); __SetPageUptodate(new_page);
if (mem_cgroup_charge(new_page, mm, GFP_KERNEL)) if (mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))
goto oom_free_new; goto oom_free_new;
/* /*
...@@ -2392,6 +2392,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -2392,6 +2392,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
struct page *page; struct page *page;
swp_entry_t entry; swp_entry_t entry;
pte_t pte; pte_t pte;
struct mem_cgroup *ptr = NULL;
int ret = 0; int ret = 0;
if (!pte_unmap_same(mm, pmd, page_table, orig_pte)) if (!pte_unmap_same(mm, pmd, page_table, orig_pte))
...@@ -2430,7 +2431,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -2430,7 +2431,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
lock_page(page); lock_page(page);
delayacct_clear_flag(DELAYACCT_PF_SWAPIN); delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
if (mem_cgroup_charge(page, mm, GFP_KERNEL)) { if (mem_cgroup_try_charge(mm, GFP_KERNEL, &ptr) == -ENOMEM) {
ret = VM_FAULT_OOM; ret = VM_FAULT_OOM;
unlock_page(page); unlock_page(page);
goto out; goto out;
...@@ -2460,6 +2461,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -2460,6 +2461,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
flush_icache_page(vma, page); flush_icache_page(vma, page);
set_pte_at(mm, address, page_table, pte); set_pte_at(mm, address, page_table, pte);
page_add_anon_rmap(page, vma, address); page_add_anon_rmap(page, vma, address);
mem_cgroup_commit_charge_swapin(page, ptr);
swap_free(entry); swap_free(entry);
if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page)) if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
...@@ -2480,7 +2482,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -2480,7 +2482,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
out: out:
return ret; return ret;
out_nomap: out_nomap:
mem_cgroup_uncharge_page(page); mem_cgroup_cancel_charge_swapin(ptr);
pte_unmap_unlock(page_table, ptl); pte_unmap_unlock(page_table, ptl);
unlock_page(page); unlock_page(page);
page_cache_release(page); page_cache_release(page);
...@@ -2510,7 +2512,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -2510,7 +2512,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
goto oom; goto oom;
__SetPageUptodate(page); __SetPageUptodate(page);
if (mem_cgroup_charge(page, mm, GFP_KERNEL)) if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))
goto oom_free_page; goto oom_free_page;
entry = mk_pte(page, vma->vm_page_prot); entry = mk_pte(page, vma->vm_page_prot);
...@@ -2601,7 +2603,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -2601,7 +2603,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
ret = VM_FAULT_OOM; ret = VM_FAULT_OOM;
goto out; goto out;
} }
if (mem_cgroup_charge(page, mm, GFP_KERNEL)) { if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL)) {
ret = VM_FAULT_OOM; ret = VM_FAULT_OOM;
page_cache_release(page); page_cache_release(page);
goto out; goto out;
......
...@@ -133,7 +133,7 @@ static void remove_migration_pte(struct vm_area_struct *vma, ...@@ -133,7 +133,7 @@ static void remove_migration_pte(struct vm_area_struct *vma,
* be reliable, and this charge can actually fail: oh well, we don't * be reliable, and this charge can actually fail: oh well, we don't
* make the situation any worse by proceeding as if it had succeeded. * make the situation any worse by proceeding as if it had succeeded.
*/ */
mem_cgroup_charge(new, mm, GFP_ATOMIC); mem_cgroup_charge_migrate_fixup(new, mm, GFP_ATOMIC);
get_page(new); get_page(new);
pte = pte_mkold(mk_pte(new, vma->vm_page_prot)); pte = pte_mkold(mk_pte(new, vma->vm_page_prot));
......
...@@ -690,17 +690,18 @@ unsigned int count_swap_pages(int type, int free) ...@@ -690,17 +690,18 @@ unsigned int count_swap_pages(int type, int free)
static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, swp_entry_t entry, struct page *page) unsigned long addr, swp_entry_t entry, struct page *page)
{ {
struct mem_cgroup *ptr = NULL;
spinlock_t *ptl; spinlock_t *ptl;
pte_t *pte; pte_t *pte;
int ret = 1; int ret = 1;
if (mem_cgroup_charge(page, vma->vm_mm, GFP_KERNEL)) if (mem_cgroup_try_charge(vma->vm_mm, GFP_KERNEL, &ptr))
ret = -ENOMEM; ret = -ENOMEM;
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
if (unlikely(!pte_same(*pte, swp_entry_to_pte(entry)))) { if (unlikely(!pte_same(*pte, swp_entry_to_pte(entry)))) {
if (ret > 0) if (ret > 0)
mem_cgroup_uncharge_page(page); mem_cgroup_cancel_charge_swapin(ptr);
ret = 0; ret = 0;
goto out; goto out;
} }
...@@ -710,6 +711,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, ...@@ -710,6 +711,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
set_pte_at(vma->vm_mm, addr, pte, set_pte_at(vma->vm_mm, addr, pte,
pte_mkold(mk_pte(page, vma->vm_page_prot))); pte_mkold(mk_pte(page, vma->vm_page_prot)));
page_add_anon_rmap(page, vma, addr); page_add_anon_rmap(page, vma, addr);
mem_cgroup_commit_charge_swapin(page, ptr);
swap_free(entry); swap_free(entry);
/* /*
* Move the page to the active list so it is not * Move the page to the active list so it is not
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册