提交 5ff32058 编写于 作者: C Chen Wandun 提交者: Yang Yingliang

mm: fix statistic of ReliableFileCache in /proc/meminfo

hulk inclusion
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I4SK3S
CVE: NA

--------------------------------

Reliable page cache should be account in update_lru_sizes and
move_active_pages_to_lru.

update_lru_sizes is used to account page after pages are isolated
from lru in batch.

move_active_pages_to_lru will add pages to lru, so statistic
should update synchronously.
Signed-off-by: NChen Wandun <chenwandun@huawei.com>
Reviewed-by: NKefeng Wang <wangkefeng.wang@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 f7664d01
...@@ -40,6 +40,8 @@ extern bool mem_reliable_status(void); ...@@ -40,6 +40,8 @@ extern bool mem_reliable_status(void);
extern void page_cache_reliable_lru_add(enum lru_list lru, struct page *page, extern void page_cache_reliable_lru_add(enum lru_list lru, struct page *page,
int val); int val);
extern void page_cache_prepare_alloc(gfp_t *gfp); extern void page_cache_prepare_alloc(gfp_t *gfp);
extern void page_cache_reliable_lru_add_batch(int zid, enum lru_list lru,
int val);
static inline bool mem_reliable_is_enabled(void) static inline bool mem_reliable_is_enabled(void)
{ {
...@@ -174,6 +176,8 @@ static inline void page_cache_reliable_lru_add(enum lru_list lru, ...@@ -174,6 +176,8 @@ static inline void page_cache_reliable_lru_add(enum lru_list lru,
struct page *page, struct page *page,
int val) {} int val) {}
static inline void page_cache_prepare_alloc(gfp_t *gfp) {} static inline void page_cache_prepare_alloc(gfp_t *gfp) {}
static inline void page_cache_reliable_lru_add_batch(int zid, enum lru_list lru,
int val) {}
#endif #endif
#endif #endif
...@@ -64,12 +64,25 @@ static bool reliable_and_lru_check(enum lru_list lru, struct page *page) ...@@ -64,12 +64,25 @@ static bool reliable_and_lru_check(enum lru_list lru, struct page *page)
if (!page_reliable(page)) if (!page_reliable(page))
return false; return false;
if (lru != LRU_ACTIVE_FILE && lru != LRU_INACTIVE_FILE) if (!is_file_lru(lru))
return false; return false;
return true; return true;
} }
void page_cache_reliable_lru_add_batch(int zid, enum lru_list lru,
int val)
{
if (!mem_reliable_is_enabled())
return;
if (zid < 0 || zid >= MAX_NR_ZONES)
return;
if (zid < ZONE_MOVABLE && is_file_lru(lru))
this_cpu_add(pagecache_reliable_pages, val);
}
void page_cache_reliable_lru_add(enum lru_list lru, struct page *page, int val) void page_cache_reliable_lru_add(enum lru_list lru, struct page *page, int val)
{ {
if (!reliable_and_lru_check(lru, page)) if (!reliable_and_lru_check(lru, page))
...@@ -177,7 +190,7 @@ static void show_val_kb(struct seq_file *m, const char *s, unsigned long num) ...@@ -177,7 +190,7 @@ static void show_val_kb(struct seq_file *m, const char *s, unsigned long num)
void reliable_report_meminfo(struct seq_file *m) void reliable_report_meminfo(struct seq_file *m)
{ {
bool pagecache_enabled = pagecache_reliable_is_enabled(); bool pagecache_enabled = pagecache_reliable_is_enabled();
unsigned long nr_pagecache_pages = 0; long nr_pagecache_pages = 0;
long nr_buddy_pages = 0; long nr_buddy_pages = 0;
int cpu; int cpu;
......
...@@ -1683,6 +1683,8 @@ static __always_inline void update_lru_sizes(struct lruvec *lruvec, ...@@ -1683,6 +1683,8 @@ static __always_inline void update_lru_sizes(struct lruvec *lruvec,
#ifdef CONFIG_MEMCG #ifdef CONFIG_MEMCG
mem_cgroup_update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]); mem_cgroup_update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]);
#endif #endif
page_cache_reliable_lru_add_batch(zid, lru,
-nr_zone_taken[zid]);
} }
} }
...@@ -2099,6 +2101,8 @@ static unsigned move_active_pages_to_lru(struct lruvec *lruvec, ...@@ -2099,6 +2101,8 @@ static unsigned move_active_pages_to_lru(struct lruvec *lruvec,
update_lru_size(lruvec, lru, page_zonenum(page), nr_pages); update_lru_size(lruvec, lru, page_zonenum(page), nr_pages);
list_move(&page->lru, &lruvec->lists[lru]); list_move(&page->lru, &lruvec->lists[lru]);
page_cache_reliable_lru_add(lru, page, nr_pages);
if (put_page_testzero(page)) { if (put_page_testzero(page)) {
__ClearPageLRU(page); __ClearPageLRU(page);
__ClearPageActive(page); __ClearPageActive(page);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册