提交 a3b20702 编写于 作者: S Shakeel Butt 提交者: Yang Yingliang

mm: swap: memcg: fix memcg stats for huge pages

mainline inclusion
from mainline-v5.8-rc1
commit 21e330fc
category: bugfix
bugzilla: 36232
CVE: NA

-------------------------------------------------

The commit 2262185c ("mm: per-cgroup memory reclaim stats") added
PGLAZYFREE, PGACTIVATE & PGDEACTIVATE stats for cgroups but missed
couple of places and PGLAZYFREE missed huge page handling. Fix that.
Also for PGLAZYFREE use the irq-unsafe function to update as the irq is
already disabled.

Fixes: 2262185c ("mm: per-cgroup memory reclaim stats")
Signed-off-by: NShakeel Butt <shakeelb@google.com>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Acked-by: NJohannes Weiner <hannes@cmpxchg.org>
Link: http://lkml.kernel.org/r/20200527182947.251343-1-shakeelb@google.comSigned-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: NLiu Shixin <liushixin2@huawei.com>
Reviewed-by: NKefeng Wang <wangkefeng.wang@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
Signed-off-by: NCheng Jian <cj.chengjian@huawei.com>
上级 1d864802
...@@ -277,6 +277,7 @@ static void __activate_page(struct page *page, struct lruvec *lruvec, ...@@ -277,6 +277,7 @@ static void __activate_page(struct page *page, struct lruvec *lruvec,
if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
int file = page_is_file_cache(page); int file = page_is_file_cache(page);
int lru = page_lru_base_type(page); int lru = page_lru_base_type(page);
int nr_pages = hpage_nr_pages(page);
del_page_from_lru_list(page, lruvec, lru); del_page_from_lru_list(page, lruvec, lru);
SetPageActive(page); SetPageActive(page);
...@@ -284,7 +285,9 @@ static void __activate_page(struct page *page, struct lruvec *lruvec, ...@@ -284,7 +285,9 @@ static void __activate_page(struct page *page, struct lruvec *lruvec,
add_page_to_lru_list(page, lruvec, lru); add_page_to_lru_list(page, lruvec, lru);
trace_mm_lru_activate(page); trace_mm_lru_activate(page);
__count_vm_events(PGACTIVATE, hpage_nr_pages(page)); __count_vm_events(PGACTIVATE, nr_pages);
__count_memcg_events(lruvec_memcg(lruvec), PGACTIVATE,
nr_pages);
update_page_reclaim_stat(lruvec, file, 1); update_page_reclaim_stat(lruvec, file, 1);
} }
} }
...@@ -536,18 +539,21 @@ static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec, ...@@ -536,18 +539,21 @@ static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec,
__count_vm_events(PGROTATED, nr_pages); __count_vm_events(PGROTATED, nr_pages);
} }
if (active) if (active) {
__count_vm_events(PGDEACTIVATE, nr_pages); __count_vm_events(PGDEACTIVATE, nr_pages);
__count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE,
nr_pages);
}
update_page_reclaim_stat(lruvec, file, 0); update_page_reclaim_stat(lruvec, file, 0);
} }
static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec, static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec,
void *arg) void *arg)
{ {
if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) && if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
!PageSwapCache(page) && !PageUnevictable(page)) { !PageSwapCache(page) && !PageUnevictable(page)) {
bool active = PageActive(page); bool active = PageActive(page);
int nr_pages = hpage_nr_pages(page);
del_page_from_lru_list(page, lruvec, del_page_from_lru_list(page, lruvec,
LRU_INACTIVE_ANON + active); LRU_INACTIVE_ANON + active);
...@@ -561,8 +567,9 @@ static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec, ...@@ -561,8 +567,9 @@ static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec,
ClearPageSwapBacked(page); ClearPageSwapBacked(page);
add_page_to_lru_list(page, lruvec, LRU_INACTIVE_FILE); add_page_to_lru_list(page, lruvec, LRU_INACTIVE_FILE);
__count_vm_events(PGLAZYFREE, hpage_nr_pages(page)); __count_vm_events(PGLAZYFREE, nr_pages);
count_memcg_page_event(page, PGLAZYFREE); __count_memcg_events(lruvec_memcg(lruvec), PGLAZYFREE,
nr_pages);
update_page_reclaim_stat(lruvec, 1, 0); update_page_reclaim_stat(lruvec, 1, 0);
} }
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册