提交 343493ab 编写于 作者: Y Yang Shi 提交者: Yang Yingliang

mm: move mem_cgroup_uncharge out of __page_cache_release()

mainline inclusion
from mainline-v5.4-rc1
commit 7ae88534
category: bugfix
bugzilla: 47240
CVE: NA

-------------------------------------------------

A later patch makes THP deferred split shrinker memcg aware, but it needs
page->mem_cgroup information in THP destructor, which is called after
mem_cgroup_uncharge() now.

So move mem_cgroup_uncharge() from __page_cache_release() to compound page
destructor, which is called by both THP and other compound pages except
HugeTLB.  And call it in __put_single_page() for single order page.

Link: http://lkml.kernel.org/r/1565144277-36240-3-git-send-email-yang.shi@linux.alibaba.comSigned-off-by: NYang Shi <yang.shi@linux.alibaba.com>
Suggested-by: N"Kirill A . Shutemov" <kirill.shutemov@linux.intel.com>
Acked-by: NKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Reviewed-by: NKirill Tkhai <ktkhai@virtuozzo.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Shakeel Butt <shakeelb@google.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Qian Cai <cai@lca.pw>
Cc: Vladimir Davydov <vdavydov.dev@gmail.com>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: NLiu Shixin <liushixin2@huawei.com>
Reviewed-by: NKefeng Wang <wangkefeng.wang@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
Signed-off-by: NCheng Jian <cj.chengjian@huawei.com>
上级 51c0cd55
...@@ -593,6 +593,7 @@ static void bad_page(struct page *page, const char *reason, ...@@ -593,6 +593,7 @@ static void bad_page(struct page *page, const char *reason,
void free_compound_page(struct page *page) void free_compound_page(struct page *page)
{ {
mem_cgroup_uncharge(page);
__free_pages_ok(page, compound_order(page)); __free_pages_ok(page, compound_order(page));
} }
......
...@@ -71,12 +71,12 @@ static void __page_cache_release(struct page *page) ...@@ -71,12 +71,12 @@ static void __page_cache_release(struct page *page)
spin_unlock_irqrestore(zone_lru_lock(zone), flags); spin_unlock_irqrestore(zone_lru_lock(zone), flags);
} }
__ClearPageWaiters(page); __ClearPageWaiters(page);
mem_cgroup_uncharge(page);
} }
static void __put_single_page(struct page *page) static void __put_single_page(struct page *page)
{ {
__page_cache_release(page); __page_cache_release(page);
mem_cgroup_uncharge(page);
free_unref_page(page); free_unref_page(page);
} }
......
...@@ -1475,10 +1475,9 @@ static unsigned long shrink_page_list(struct list_head *page_list, ...@@ -1475,10 +1475,9 @@ static unsigned long shrink_page_list(struct list_head *page_list,
* Is there need to periodically free_page_list? It would * Is there need to periodically free_page_list? It would
* appear not as the counts should be low * appear not as the counts should be low
*/ */
if (unlikely(PageTransHuge(page))) { if (unlikely(PageTransHuge(page)))
mem_cgroup_uncharge(page);
(*get_compound_page_dtor(page))(page); (*get_compound_page_dtor(page))(page);
} else else
list_add(&page->lru, &free_pages); list_add(&page->lru, &free_pages);
continue; continue;
...@@ -1876,7 +1875,6 @@ putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list) ...@@ -1876,7 +1875,6 @@ putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list)
if (unlikely(PageCompound(page))) { if (unlikely(PageCompound(page))) {
spin_unlock_irq(&pgdat->lru_lock); spin_unlock_irq(&pgdat->lru_lock);
mem_cgroup_uncharge(page);
(*get_compound_page_dtor(page))(page); (*get_compound_page_dtor(page))(page);
spin_lock_irq(&pgdat->lru_lock); spin_lock_irq(&pgdat->lru_lock);
} else } else
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册