提交 6e6cf0d7 编写于 作者: C Chen Wandun 提交者: Wang Wensheng

mm: add "ReliableFileCache" item in /proc/meminfo

hulk inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I4SK3S
CVE: NA

--------------------------------

Add statistics for usage of reliable page cache, Item "ReliableFileCache"
in /proc/meminfo show the usage of reliable page cache.
Signed-off-by: NChen Wandun <chenwandun@huawei.com>
Reviewed-by: NKefeng Wang <wangkefeng.wang@huawei.com>
上级 ccad5e7a
......@@ -15,6 +15,8 @@ DECLARE_STATIC_KEY_FALSE(mem_reliable);
extern bool reliable_enabled;
extern bool shmem_reliable;
extern bool pagecache_use_reliable_mem;
extern struct percpu_counter pagecache_reliable_pages;
extern struct percpu_counter anon_reliable_pages;
extern void mem_reliable_init(bool has_unmirrored_mem,
unsigned long *zone_movable_pfn,
......@@ -23,6 +25,11 @@ extern void shmem_reliable_init(void);
extern void reliable_report_meminfo(struct seq_file *m);
extern void page_cache_prepare_alloc(gfp_t *gfp);
extern bool mem_reliable_status(void);
extern void reliable_lru_add(enum lru_list lru, struct page *page,
int val);
extern void reliable_lru_add_batch(int zid, enum lru_list lru,
int val);
extern bool mem_reliable_counter_initialized(void);
static inline bool mem_reliable_is_enabled(void)
{
......@@ -56,6 +63,17 @@ static inline bool shmem_reliable_is_enabled(void)
{
return shmem_reliable;
}
static inline bool page_reliable(struct page *page)
{
if (!mem_reliable_is_enabled())
return false;
if (!page)
return false;
return page_zonenum(page) < ZONE_MOVABLE;
}
#else
#define reliable_enabled 0
#define pagecache_use_reliable_mem 0
......@@ -74,6 +92,12 @@ static inline void reliable_report_meminfo(struct seq_file *m) {}
static inline bool shmem_reliable_is_enabled(void) { return false; }
static inline void page_cache_prepare_alloc(gfp_t *gfp) {}
static inline bool mem_reliable_status(void) { return false; }
static inline bool page_reliable(struct page *page) { return false; }
static inline void reliable_lru_add(enum lru_list lru, struct page *page,
int val) {}
static inline void reliable_lru_add_batch(int zid, enum lru_list lru,
int val) {}
static inline bool mem_reliable_counter_initialized(void) { return false; }
#endif
#endif
......@@ -34,9 +34,6 @@
#include <linux/pgtable.h>
#include <linux/kabi.h>
/* added to mm.h to avoid every caller adding new header file */
#include <linux/mem_reliable.h>
struct mempolicy;
struct anon_vma;
struct anon_vma_chain;
......@@ -3313,5 +3310,8 @@ static inline int seal_check_future_write(int seals, struct vm_area_struct *vma)
return 0;
}
/* added to mm.h to avoid every caller adding new header file */
#include <linux/mem_reliable.h>
#endif /* __KERNEL__ */
#endif /* _LINUX_MM_H */
......@@ -4,6 +4,7 @@
#include <linux/huge_mm.h>
#include <linux/swap.h>
#include <linux/mem_reliable.h>
/**
* page_is_file_lru - should the page be on a file LRU or anon LRU?
......@@ -50,6 +51,7 @@ static __always_inline void add_page_to_lru_list(struct page *page,
{
update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page));
list_add(&page->lru, &lruvec->lists[lru]);
reliable_lru_add(lru, page, thp_nr_pages(page));
}
static __always_inline void add_page_to_lru_list_tail(struct page *page,
......@@ -57,6 +59,7 @@ static __always_inline void add_page_to_lru_list_tail(struct page *page,
{
update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page));
list_add_tail(&page->lru, &lruvec->lists[lru]);
reliable_lru_add(lru, page, thp_nr_pages(page));
}
static __always_inline void del_page_from_lru_list(struct page *page,
......@@ -64,6 +67,7 @@ static __always_inline void del_page_from_lru_list(struct page *page,
{
list_del(&page->lru);
update_lru_size(lruvec, lru, page_zonenum(page), -thp_nr_pages(page));
reliable_lru_add(lru, page, -thp_nr_pages(page));
}
/**
......
......@@ -261,6 +261,11 @@ static inline bool is_file_lru(enum lru_list lru)
return (lru == LRU_INACTIVE_FILE || lru == LRU_ACTIVE_FILE);
}
static inline int is_anon_lru(enum lru_list lru)
{
return (lru == LRU_INACTIVE_ANON || lru == LRU_ACTIVE_ANON);
}
static inline bool is_active_lru(enum lru_list lru)
{
return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE);
......
......@@ -17,6 +17,14 @@ EXPORT_SYMBOL_GPL(mem_reliable);
bool reliable_enabled;
bool shmem_reliable __read_mostly = true;
bool pagecache_use_reliable_mem __read_mostly = true;
struct percpu_counter pagecache_reliable_pages;
struct percpu_counter anon_reliable_pages;
bool mem_reliable_counter_initialized(void)
{
return likely(percpu_counter_initialized(&pagecache_reliable_pages)) &&
likely((percpu_counter_initialized(&anon_reliable_pages)));
}
bool mem_reliable_status(void)
{
......@@ -24,6 +32,37 @@ bool mem_reliable_status(void)
}
EXPORT_SYMBOL_GPL(mem_reliable_status);
void reliable_lru_add_batch(int zid, enum lru_list lru,
int val)
{
if (!mem_reliable_is_enabled())
return;
if (zid < ZONE_MOVABLE) {
if (is_file_lru(lru))
percpu_counter_add(&pagecache_reliable_pages, val);
else if (is_anon_lru(lru))
percpu_counter_add(&anon_reliable_pages, val);
}
}
void reliable_lru_add(enum lru_list lru, struct page *page, int val)
{
if (!page_reliable(page))
return;
if (is_file_lru(lru))
percpu_counter_add(&pagecache_reliable_pages, val);
else if (is_anon_lru(lru))
percpu_counter_add(&anon_reliable_pages, val);
else if (lru == LRU_UNEVICTABLE) {
if (PageAnon(page))
percpu_counter_add(&anon_reliable_pages, val);
else
percpu_counter_add(&pagecache_reliable_pages, val);
}
}
void page_cache_prepare_alloc(gfp_t *gfp)
{
if (!mem_reliable_is_enabled())
......@@ -118,14 +157,32 @@ void reliable_report_meminfo(struct seq_file *m)
show_val_kb(m, "ReliableBuddyMem: ", free_reliable_pages());
if (pagecache_reliable_is_enabled()) {
s64 nr_pagecache_pages = 0;
unsigned long num = 0;
num += global_node_page_state(NR_LRU_BASE + LRU_ACTIVE_FILE);
num += global_node_page_state(NR_LRU_BASE + LRU_INACTIVE_FILE);
show_val_kb(m, "FileCache: ", num);
nr_pagecache_pages =
percpu_counter_sum_positive(&pagecache_reliable_pages);
seq_printf(m, "ReliableFileCache: %8llu kB\n",
nr_pagecache_pages << (PAGE_SHIFT - 10));
}
}
static int __init reliable_sysctl_init(void)
{
if (!mem_reliable_is_enabled())
return 0;
percpu_counter_init(&pagecache_reliable_pages, 0, GFP_KERNEL);
percpu_counter_init(&anon_reliable_pages, 0, GFP_KERNEL);
return 0;
}
arch_initcall(reliable_sysctl_init);
static int __init setup_reliable_debug(char *str)
{
if (*str++ != '=' || !*str)
......
......@@ -1813,6 +1813,7 @@ static __always_inline void update_lru_sizes(struct lruvec *lruvec,
continue;
update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]);
reliable_lru_add_batch(zid, lru, -nr_zone_taken[zid]);
}
}
......@@ -2082,6 +2083,7 @@ static unsigned noinline_for_stack move_pages_to_lru(struct lruvec *lruvec,
update_lru_size(lruvec, lru, page_zonenum(page), nr_pages);
list_add(&page->lru, &lruvec->lists[lru]);
reliable_lru_add(lru, page, nr_pages);
nr_moved += nr_pages;
if (PageActive(page))
workingset_age_nonresident(lruvec, nr_pages);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册