提交 925368d8 编写于 作者: C Chen Wandun 提交者: Yang Yingliang

mm: add page cache fallback statistic

hulk inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I4SK3S
CVE: NA

--------------------------------

Add page cache fallback statistic, the counter will overflow
after a period time of use, only reset to zero, no negative
effect.
Signed-off-by: NChen Wandun <chenwandun@huawei.com>
Reviewed-by: NKefeng Wang <wangkefeng.wang@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 f5c69190
......@@ -21,6 +21,8 @@ extern bool reliable_allow_fallback;
extern bool shmem_reliable;
extern struct percpu_counter reliable_shmem_used_nr_page;
extern bool pagecache_use_reliable_mem;
extern atomic_long_t page_cache_fallback;
extern void page_cache_fallback_inc(gfp_t gfp, struct page *page);
extern void add_reliable_mem_size(long sz);
extern void mem_reliable_init(bool has_unmirrored_mem,
......@@ -132,6 +134,7 @@ static inline bool shmem_reliable_is_enabled(void) { return false; }
static inline void shmem_reliable_page_counter(struct page *page, int nr_page)
{
}
static inline void page_cache_fallback_inc(gfp_t gfp, struct page *page) {}
static inline bool pagecache_reliable_is_enabled(void) { return false; }
#endif
......
......@@ -249,10 +249,15 @@ extern struct page *__page_cache_alloc(gfp_t gfp);
#else
static inline struct page *__page_cache_alloc(gfp_t gfp)
{
struct page *page;
if (pagecache_reliable_is_enabled())
gfp |= ___GFP_RELIABILITY;
return alloc_pages(gfp, 0);
page = alloc_pages(gfp, 0);
page_cache_fallback_inc(gfp, page);
return page;
}
#endif
......
......@@ -1055,9 +1055,13 @@ struct page *__page_cache_alloc(gfp_t gfp)
page = __alloc_pages_node(n, gfp, 0);
} while (!page && read_mems_allowed_retry(cpuset_mems_cookie));
page_cache_fallback_inc(gfp, page);
return page;
}
return alloc_pages(gfp, 0);
page = alloc_pages(gfp, 0);
page_cache_fallback_inc(gfp, page);
return page;
}
EXPORT_SYMBOL(__page_cache_alloc);
#endif
......
......@@ -22,6 +22,8 @@ bool shmem_reliable __read_mostly = true;
struct percpu_counter reliable_shmem_used_nr_page __read_mostly;
bool pagecache_use_reliable_mem __read_mostly = true;
atomic_long_t page_cache_fallback = ATOMIC_LONG_INIT(0);
void add_reliable_mem_size(long sz)
{
atomic_long_add(sz, &total_reliable_mem);
......@@ -32,6 +34,34 @@ bool page_reliable(struct page *page)
return mem_reliable_is_enabled() && page_zonenum(page) < ZONE_MOVABLE;
}
static bool is_fallback_page(gfp_t gfp, struct page *page)
{
bool ret = false;
if (!page)
return ret;
if ((gfp & ___GFP_RELIABILITY) && !page_reliable(page))
ret = true;
return ret;
}
void page_cache_fallback_inc(gfp_t gfp, struct page *page)
{
long num;
if (!pagecache_reliable_is_enabled())
return;
if (!is_fallback_page(gfp, page))
return;
num = atomic_long_inc_return(&page_cache_fallback);
if (num < 0)
atomic_long_set(&page_cache_fallback, 0);
}
static int reliable_mem_notifier(struct notifier_block *nb,
unsigned long action, void *arg)
{
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册