提交 7e924aaf 编写于 作者: H Hugh Dickins 提交者: Linus Torvalds

memcg: mem_cgroup_charge never NULL

My memcgroup patch to fix hang with shmem/tmpfs added NULL page handling to
mem_cgroup_charge_common.  It seemed convenient at the time, but hard to
justify now: there's a perfectly appropriate swappage to charge and uncharge
instead, this is not on any hot path through shmem_getpage, and no performance
hit was observed from the slight extra overhead.

So revert that NULL page handling from mem_cgroup_charge_common; and make it
clearer by bringing page_cgroup_assign_new_page_cgroup into its body - that
was a helper I found more of a hindrance to understanding.
Signed-off-by: NHugh Dickins <hugh@veritas.com>
Cc: David Rientjes <rientjes@google.com>
Acked-by: NBalbir Singh <balbir@linux.vnet.ibm.com>
Acked-by: NKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Hirokazu Takahashi <taka@valinux.co.jp>
Cc: YAMAMOTO Takashi <yamamoto@valinux.co.jp>
Cc: Paul Menage <menage@google.com>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 9442ec9d
...@@ -300,25 +300,6 @@ static void __always_inline unlock_page_cgroup(struct page *page) ...@@ -300,25 +300,6 @@ static void __always_inline unlock_page_cgroup(struct page *page)
bit_spin_unlock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup); bit_spin_unlock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
} }
/*
* Tie new page_cgroup to struct page under lock_page_cgroup()
* This can fail if the page has been tied to a page_cgroup.
* If success, returns 0.
*/
static int page_cgroup_assign_new_page_cgroup(struct page *page,
struct page_cgroup *pc)
{
int ret = 0;
lock_page_cgroup(page);
if (!page_get_page_cgroup(page))
page_assign_page_cgroup(page, pc);
else /* A page is tied to other pc. */
ret = 1;
unlock_page_cgroup(page);
return ret;
}
/* /*
* Clear page->page_cgroup member under lock_page_cgroup(). * Clear page->page_cgroup member under lock_page_cgroup().
* If given "pc" value is different from one page->page_cgroup, * If given "pc" value is different from one page->page_cgroup,
...@@ -585,26 +566,24 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm, ...@@ -585,26 +566,24 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
* with it * with it
*/ */
retry: retry:
if (page) { lock_page_cgroup(page);
lock_page_cgroup(page); pc = page_get_page_cgroup(page);
pc = page_get_page_cgroup(page); /*
/* * The page_cgroup exists and
* The page_cgroup exists and * the page has already been accounted.
* the page has already been accounted. */
*/ if (pc) {
if (pc) { if (unlikely(!atomic_inc_not_zero(&pc->ref_cnt))) {
if (unlikely(!atomic_inc_not_zero(&pc->ref_cnt))) { /* this page is under being uncharged ? */
/* this page is under being uncharged ? */ unlock_page_cgroup(page);
unlock_page_cgroup(page); cpu_relax();
cpu_relax(); goto retry;
goto retry; } else {
} else { unlock_page_cgroup(page);
unlock_page_cgroup(page); goto done;
goto done;
}
} }
unlock_page_cgroup(page);
} }
unlock_page_cgroup(page);
pc = kzalloc(sizeof(struct page_cgroup), gfp_mask); pc = kzalloc(sizeof(struct page_cgroup), gfp_mask);
if (pc == NULL) if (pc == NULL)
...@@ -663,7 +642,9 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm, ...@@ -663,7 +642,9 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
if (ctype == MEM_CGROUP_CHARGE_TYPE_CACHE) if (ctype == MEM_CGROUP_CHARGE_TYPE_CACHE)
pc->flags |= PAGE_CGROUP_FLAG_CACHE; pc->flags |= PAGE_CGROUP_FLAG_CACHE;
if (!page || page_cgroup_assign_new_page_cgroup(page, pc)) { lock_page_cgroup(page);
if (page_get_page_cgroup(page)) {
unlock_page_cgroup(page);
/* /*
* Another charge has been added to this page already. * Another charge has been added to this page already.
* We take lock_page_cgroup(page) again and read * We take lock_page_cgroup(page) again and read
...@@ -672,10 +653,10 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm, ...@@ -672,10 +653,10 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
res_counter_uncharge(&mem->res, PAGE_SIZE); res_counter_uncharge(&mem->res, PAGE_SIZE);
css_put(&mem->css); css_put(&mem->css);
kfree(pc); kfree(pc);
if (!page)
goto done;
goto retry; goto retry;
} }
page_assign_page_cgroup(page, pc);
unlock_page_cgroup(page);
mz = page_cgroup_zoneinfo(pc); mz = page_cgroup_zoneinfo(pc);
spin_lock_irqsave(&mz->lru_lock, flags); spin_lock_irqsave(&mz->lru_lock, flags);
......
...@@ -1370,14 +1370,17 @@ static int shmem_getpage(struct inode *inode, unsigned long idx, ...@@ -1370,14 +1370,17 @@ static int shmem_getpage(struct inode *inode, unsigned long idx,
shmem_swp_unmap(entry); shmem_swp_unmap(entry);
spin_unlock(&info->lock); spin_unlock(&info->lock);
unlock_page(swappage); unlock_page(swappage);
page_cache_release(swappage);
if (error == -ENOMEM) { if (error == -ENOMEM) {
/* allow reclaim from this memory cgroup */ /* allow reclaim from this memory cgroup */
error = mem_cgroup_cache_charge(NULL, error = mem_cgroup_cache_charge(swappage,
current->mm, gfp & ~__GFP_HIGHMEM); current->mm, gfp & ~__GFP_HIGHMEM);
if (error) if (error) {
page_cache_release(swappage);
goto failed; goto failed;
}
mem_cgroup_uncharge_page(swappage);
} }
page_cache_release(swappage);
goto repeat; goto repeat;
} }
} else if (sgp == SGP_READ && !filepage) { } else if (sgp == SGP_READ && !filepage) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册