提交 1d864802 编写于 作者: S Shakeel Butt 提交者: Yang Yingliang

mm: swap: fix vmstats for huge pages

mainline inclusion
from mainline-v5.8-rc1
commit 5d91f31f
category: bugfix
bugzilla: 36232
CVE: NA

-------------------------------------------------

Many of the callbacks called by pagevec_lru_move_fn() does not correctly
update the vmstats for huge pages. Fix that. Also __pagevec_lru_add_fn()
use the irq-unsafe alternative to update the stat as the irqs are
already disabled.
Signed-off-by: NShakeel Butt <shakeelb@google.com>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Acked-by: NJohannes Weiner <hannes@cmpxchg.org>
Link: http://lkml.kernel.org/r/20200527182916.249910-1-shakeelb@google.comSigned-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: NLiu Shixin <liushixin2@huawei.com>
Reviewed-by: NKefeng Wang <wangkefeng.wang@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
Signed-off-by: NCheng Jian <cj.chengjian@huawei.com>
上级 9bfb914c
...@@ -224,7 +224,7 @@ static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec, ...@@ -224,7 +224,7 @@ static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec,
del_page_from_lru_list(page, lruvec, page_lru(page)); del_page_from_lru_list(page, lruvec, page_lru(page));
ClearPageActive(page); ClearPageActive(page);
add_page_to_lru_list_tail(page, lruvec, page_lru(page)); add_page_to_lru_list_tail(page, lruvec, page_lru(page));
(*pgmoved)++; (*pgmoved) += hpage_nr_pages(page);
} }
} }
...@@ -284,7 +284,7 @@ static void __activate_page(struct page *page, struct lruvec *lruvec, ...@@ -284,7 +284,7 @@ static void __activate_page(struct page *page, struct lruvec *lruvec,
add_page_to_lru_list(page, lruvec, lru); add_page_to_lru_list(page, lruvec, lru);
trace_mm_lru_activate(page); trace_mm_lru_activate(page);
__count_vm_event(PGACTIVATE); __count_vm_events(PGACTIVATE, hpage_nr_pages(page));
update_page_reclaim_stat(lruvec, file, 1); update_page_reclaim_stat(lruvec, file, 1);
} }
} }
...@@ -499,6 +499,7 @@ static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec, ...@@ -499,6 +499,7 @@ static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec,
{ {
int lru, file; int lru, file;
bool active; bool active;
int nr_pages = hpage_nr_pages(page);
if (!PageLRU(page)) if (!PageLRU(page))
return; return;
...@@ -532,11 +533,11 @@ static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec, ...@@ -532,11 +533,11 @@ static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec,
* We moves tha page into tail of inactive. * We moves tha page into tail of inactive.
*/ */
list_move_tail(&page->lru, &lruvec->lists[lru]); list_move_tail(&page->lru, &lruvec->lists[lru]);
__count_vm_event(PGROTATED); __count_vm_events(PGROTATED, nr_pages);
} }
if (active) if (active)
__count_vm_event(PGDEACTIVATE); __count_vm_events(PGDEACTIVATE, nr_pages);
update_page_reclaim_stat(lruvec, file, 0); update_page_reclaim_stat(lruvec, file, 0);
} }
...@@ -870,6 +871,7 @@ static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec, ...@@ -870,6 +871,7 @@ static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec,
{ {
enum lru_list lru; enum lru_list lru;
int was_unevictable = TestClearPageUnevictable(page); int was_unevictable = TestClearPageUnevictable(page);
int nr_pages = hpage_nr_pages(page);
VM_BUG_ON_PAGE(PageLRU(page), page); VM_BUG_ON_PAGE(PageLRU(page), page);
...@@ -907,13 +909,13 @@ static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec, ...@@ -907,13 +909,13 @@ static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec,
update_page_reclaim_stat(lruvec, page_is_file_cache(page), update_page_reclaim_stat(lruvec, page_is_file_cache(page),
PageActive(page)); PageActive(page));
if (was_unevictable) if (was_unevictable)
count_vm_event(UNEVICTABLE_PGRESCUED); __count_vm_events(UNEVICTABLE_PGRESCUED, nr_pages);
} else { } else {
lru = LRU_UNEVICTABLE; lru = LRU_UNEVICTABLE;
ClearPageActive(page); ClearPageActive(page);
SetPageUnevictable(page); SetPageUnevictable(page);
if (!was_unevictable) if (!was_unevictable)
count_vm_event(UNEVICTABLE_PGCULLED); __count_vm_events(UNEVICTABLE_PGCULLED, nr_pages);
} }
add_page_to_lru_list(page, lruvec, lru); add_page_to_lru_list(page, lruvec, lru);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册