提交 2238a7c8 编写于 作者: C Chen Wandun 提交者: Yang Yingliang

mm: add "ReliableFileCache" item in /proc/meminfo

hulk inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I4SK3S
CVE: NA

--------------------------------

Add statistics for usage of reliable page cache, Item "ReliableFileCache"
in /proc/meminfo show the usage of reliable page cache.
Signed-off-by: NChen Wandun <chenwandun@huawei.com>
Reviewed-by: NXie XiuQi <xiexiuqi@huawei.com>
Reviewed-by: Kefeng Wang<wangkefeng.wang@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 7a36955e
......@@ -39,6 +39,9 @@ extern void mem_reliable_out_of_memory(gfp_t gfp_mask, unsigned int order,
int preferred_nid, nodemask_t *nodemask);
extern bool mem_reliable_status(void);
extern void page_cache_reliable_lru_add(enum lru_list lru, struct page *page,
int val);
static inline bool mem_reliable_is_enabled(void)
{
return static_branch_likely(&mem_reliable);
......@@ -169,6 +172,9 @@ static inline bool mem_reliable_status(void) { return false; }
static inline void mem_reliable_buddy_counter(struct page *page, int nr_page) {}
static inline bool mem_reliable_watermark_ok(int nr_page) { return true; }
static inline bool mem_reliable_shmem_limit_check(void) { return true; }
static inline void page_cache_reliable_lru_add(enum lru_list lru,
struct page *page,
int val) {}
#endif
#endif
......@@ -4,6 +4,7 @@
#include <linux/huge_mm.h>
#include <linux/swap.h>
#include <linux/mem_reliable.h>
/**
* page_is_file_cache - should the page be on a file LRU or anon LRU?
......@@ -49,6 +50,8 @@ static __always_inline void add_page_to_lru_list(struct page *page,
{
update_lru_size(lruvec, lru, page_zonenum(page), hpage_nr_pages(page));
list_add(&page->lru, &lruvec->lists[lru]);
page_cache_reliable_lru_add(lru, page, hpage_nr_pages(page));
}
static __always_inline void add_page_to_lru_list_tail(struct page *page,
......@@ -56,6 +59,7 @@ static __always_inline void add_page_to_lru_list_tail(struct page *page,
{
update_lru_size(lruvec, lru, page_zonenum(page), hpage_nr_pages(page));
list_add_tail(&page->lru, &lruvec->lists[lru]);
page_cache_reliable_lru_add(lru, page, hpage_nr_pages(page));
}
static __always_inline void del_page_from_lru_list(struct page *page,
......@@ -63,6 +67,7 @@ static __always_inline void del_page_from_lru_list(struct page *page,
{
list_del(&page->lru);
update_lru_size(lruvec, lru, page_zonenum(page), -hpage_nr_pages(page));
page_cache_reliable_lru_add(lru, page, -hpage_nr_pages(page));
}
/**
......
......@@ -36,7 +36,7 @@ long shmem_reliable_nr_page = LONG_MAX;
bool pagecache_use_reliable_mem __read_mostly = true;
atomic_long_t page_cache_fallback = ATOMIC_LONG_INIT(0);
DEFINE_PER_CPU(long, pagecache_reliable_pages);
bool mem_reliable_status(void)
{
return mem_reliable_is_enabled();
......@@ -66,6 +66,25 @@ static bool is_fallback_page(gfp_t gfp, struct page *page)
return ret;
}
static bool reliable_and_lru_check(enum lru_list lru, struct page *page)
{
if (!page || !page_reliable(page))
return false;
if (lru != LRU_ACTIVE_FILE && lru != LRU_INACTIVE_FILE)
return false;
return true;
}
void page_cache_reliable_lru_add(enum lru_list lru, struct page *page, int val)
{
if (!reliable_and_lru_check(lru, page))
return;
this_cpu_add(pagecache_reliable_pages, val);
}
void page_cache_fallback_inc(gfp_t gfp, struct page *page)
{
long num;
......@@ -196,6 +215,7 @@ void reliable_report_meminfo(struct seq_file *m)
if (pagecache_reliable_is_enabled()) {
unsigned long num = 0;
int cpu;
num += global_node_page_state(NR_LRU_BASE +
LRU_ACTIVE_FILE);
......@@ -203,6 +223,13 @@ void reliable_report_meminfo(struct seq_file *m)
LRU_INACTIVE_FILE);
seq_printf(m, "FileCache: %8lu kB\n",
num << (PAGE_SHIFT - 10));
num = 0;
for_each_possible_cpu(cpu)
num += per_cpu(pagecache_reliable_pages, cpu);
seq_printf(m, "ReliableFileCache:%8lu kB\n",
num << (PAGE_SHIFT - 10));
}
}
}
......
......@@ -4596,6 +4596,7 @@ static int add_page_for_reclaim_swapcache(struct page *page,
case 0:
list_move(&head->lru, pagelist);
update_lru_size(lruvec, lru, page_zonenum(head), -hpage_nr_pages(head));
page_cache_reliable_lru_add(lru, head, -hpage_nr_pages(head));
break;
case -EBUSY:
list_move(&head->lru, src);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册