From 925368d878b5c446d9f871796ca27bc0d29102fb Mon Sep 17 00:00:00 2001 From: Chen Wandun Date: Wed, 9 Feb 2022 15:36:31 +0800 Subject: [PATCH] mm: add page cache fallback statistic hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I4SK3S CVE: NA -------------------------------- Add page cache fallback statistic, the counter will overflow after a period time of use, only reset to zero, no negative effect. Signed-off-by: Chen Wandun Reviewed-by: Kefeng Wang Signed-off-by: Yang Yingliang --- include/linux/mem_reliable.h | 3 +++ include/linux/pagemap.h | 7 ++++++- mm/filemap.c | 6 +++++- mm/mem_reliable.c | 30 ++++++++++++++++++++++++++++++ 4 files changed, 44 insertions(+), 2 deletions(-) diff --git a/include/linux/mem_reliable.h b/include/linux/mem_reliable.h index 0c5f80428e97..8dbebabc8b28 100644 --- a/include/linux/mem_reliable.h +++ b/include/linux/mem_reliable.h @@ -21,6 +21,8 @@ extern bool reliable_allow_fallback; extern bool shmem_reliable; extern struct percpu_counter reliable_shmem_used_nr_page; extern bool pagecache_use_reliable_mem; +extern atomic_long_t page_cache_fallback; +extern void page_cache_fallback_inc(gfp_t gfp, struct page *page); extern void add_reliable_mem_size(long sz); extern void mem_reliable_init(bool has_unmirrored_mem, @@ -132,6 +134,7 @@ static inline bool shmem_reliable_is_enabled(void) { return false; } static inline void shmem_reliable_page_counter(struct page *page, int nr_page) { } +static inline void page_cache_fallback_inc(gfp_t gfp, struct page *page) {} static inline bool pagecache_reliable_is_enabled(void) { return false; } #endif diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 77563c03618c..a7d83fed0601 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -249,10 +249,15 @@ extern struct page *__page_cache_alloc(gfp_t gfp); #else static inline struct page *__page_cache_alloc(gfp_t gfp) { + struct page *page; + if (pagecache_reliable_is_enabled()) gfp |= ___GFP_RELIABILITY; - return alloc_pages(gfp, 0); + page = alloc_pages(gfp, 0); + page_cache_fallback_inc(gfp, page); + + return page; } #endif diff --git a/mm/filemap.c b/mm/filemap.c index 4dc3cc5834a5..320c97244e16 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1055,9 +1055,13 @@ struct page *__page_cache_alloc(gfp_t gfp) page = __alloc_pages_node(n, gfp, 0); } while (!page && read_mems_allowed_retry(cpuset_mems_cookie)); + page_cache_fallback_inc(gfp, page); return page; } - return alloc_pages(gfp, 0); + page = alloc_pages(gfp, 0); + page_cache_fallback_inc(gfp, page); + + return page; } EXPORT_SYMBOL(__page_cache_alloc); #endif diff --git a/mm/mem_reliable.c b/mm/mem_reliable.c index 5a32977b674f..796892d5d056 100644 --- a/mm/mem_reliable.c +++ b/mm/mem_reliable.c @@ -22,6 +22,8 @@ bool shmem_reliable __read_mostly = true; struct percpu_counter reliable_shmem_used_nr_page __read_mostly; bool pagecache_use_reliable_mem __read_mostly = true; +atomic_long_t page_cache_fallback = ATOMIC_LONG_INIT(0); + void add_reliable_mem_size(long sz) { atomic_long_add(sz, &total_reliable_mem); @@ -32,6 +34,34 @@ bool page_reliable(struct page *page) return mem_reliable_is_enabled() && page_zonenum(page) < ZONE_MOVABLE; } +static bool is_fallback_page(gfp_t gfp, struct page *page) +{ + bool ret = false; + + if (!page) + return ret; + + if ((gfp & ___GFP_RELIABILITY) && !page_reliable(page)) + ret = true; + + return ret; +} + +void page_cache_fallback_inc(gfp_t gfp, struct page *page) +{ + long num; + + if (!pagecache_reliable_is_enabled()) + return; + + if (!is_fallback_page(gfp, page)) + return; + + num = atomic_long_inc_return(&page_cache_fallback); + if (num < 0) + atomic_long_set(&page_cache_fallback, 0); +} + static int reliable_mem_notifier(struct notifier_block *nb, unsigned long action, void *arg) { -- GitLab