From 862e230817f796b7c7cf47d19a2a4c24f81b60a6 Mon Sep 17 00:00:00 2001 From: zhongjiang Date: Thu, 21 Feb 2019 09:56:18 +0800 Subject: [PATCH] pagecache: add Kconfig to enable/disable the feature euler inclusion category: bugfix CVE: NA Bugzilla: 9580 --------------------------- Just add Kconfig to the feature. Signed-off-by: zhongjiang Reviewed-by: Jing Xiangfeng Signed-off-by: Yang Yingliang --- include/linux/pagemap.h | 20 ++++++++++++++++++++ include/linux/swap.h | 2 ++ kernel/sysctl.c | 2 ++ mm/Kconfig | 13 +++++++++++++ mm/filemap.c | 2 ++ mm/page_alloc.c | 2 ++ mm/vmscan.c | 15 ++++++++++++++- 7 files changed, 55 insertions(+), 1 deletion(-) diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 65245ce3557f..520627fc19ee 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -613,8 +613,10 @@ static inline int fault_in_pages_readable(const char __user *uaddr, int size) return 0; } +#ifdef CONFIG_SHRINK_PAGECACHE int add_to_page_cache(struct page *page, struct address_space *mapping, pgoff_t index, gfp_t gfp_mask); +#endif int add_to_page_cache_locked(struct page *page, struct address_space *mapping, pgoff_t index, gfp_t gfp_mask); int add_to_page_cache_lru(struct page *page, struct address_space *mapping, @@ -624,6 +626,24 @@ extern void __delete_from_page_cache(struct page *page, void *shadow); int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask); void delete_from_page_cache_batch(struct address_space *mapping, struct pagevec *pvec); +#ifndef CONFIG_SHRINK_PAGECACHE +/* + * Like add_to_page_cache_locked, but used to add newly allocated pages: + * the page is new, so we can just run __SetPageLocked() against it. + */ +static inline int add_to_page_cache(struct page *page, + struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) +{ + int error; + + __SetPageLocked(page); + error = add_to_page_cache_locked(page, mapping, offset, gfp_mask); + if (unlikely(error)) + __ClearPageLocked(page); + + return error; +} +#endif static inline unsigned long dir_pages(struct inode *inode) { diff --git a/include/linux/swap.h b/include/linux/swap.h index 86fd41761b96..75ad94cabf8c 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -367,6 +367,7 @@ extern int vm_swappiness; extern int remove_mapping(struct address_space *mapping, struct page *page); extern unsigned long vm_total_pages; +#ifdef CONFIG_SHRINK_PAGECACHE extern unsigned long vm_cache_limit_ratio; extern unsigned long vm_cache_limit_ratio_min; extern unsigned long vm_cache_limit_ratio_max; @@ -385,6 +386,7 @@ extern int cache_limit_ratio_sysctl_handler(struct ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos); extern int cache_limit_mbytes_sysctl_handler(struct ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos); +#endif #ifdef CONFIG_NUMA extern int node_reclaim_mode; diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 739da03342d2..dfef03879627 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -1364,6 +1364,7 @@ static struct ctl_table vm_table[] = { .extra1 = &zero, .extra2 = &one_hundred, }, +#ifdef CONFIG_SHRINK_PAGECACHE { .procname = "cache_reclaim_s", .data = &vm_cache_reclaim_s, @@ -1382,6 +1383,7 @@ static struct ctl_table vm_table[] = { .extra1 = &vm_cache_reclaim_weight_min, .extra2 = &vm_cache_reclaim_weight_max, }, +#endif #ifdef CONFIG_HUGETLB_PAGE { .procname = "nr_hugepages", diff --git a/mm/Kconfig b/mm/Kconfig index de64ea658716..aba6e953d397 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -479,6 +479,19 @@ config FRONTSWAP If unsure, say Y to enable frontswap. +config SHRINK_PAGECACHE + bool "Enable background thread to shrink the page cache" + depends on MMU + default n + help + SHRINK_PAGECACHE means that we do not want to keep the large number + of page cache in the system, Even though it will greatly improve the + performance of the machine. But it is asked by the user to keep the + free memory as much as possible. Apparently, it will influence the + informace. hence, we should switch off the option if it is not necessary. + + if unsure, say N to disable the SHRINK_PAGECACHE. + config CMA bool "Contiguous Memory Allocator" depends on HAVE_MEMBLOCK && MMU diff --git a/mm/filemap.c b/mm/filemap.c index ac4b66869cca..4585b4e0b912 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -895,6 +895,7 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping, } EXPORT_SYMBOL(add_to_page_cache_locked); +#ifdef CONFIG_SHRINK_PAGECACHE /* * Like add_to_page_cache_locked, but used to add newly allocated pages: * the page is new, so we can just run __SetPageLocked() against it. @@ -914,6 +915,7 @@ int add_to_page_cache(struct page *page, return error; } EXPORT_SYMBOL(add_to_page_cache); +#endif int add_to_page_cache_lru(struct page *page, struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 19bf37971989..dffc6ad1f7d8 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -8150,6 +8150,7 @@ bool set_hwpoison_free_buddy_page(struct page *page) } #endif +#ifdef CONFIG_SHRINK_PAGECACHE unsigned long page_cache_over_limit(void) { unsigned long lru_file, limit; @@ -8217,3 +8218,4 @@ int cache_limit_mbytes_sysctl_handler(struct ctl_table *table, int write, return 0; } +#endif diff --git a/mm/vmscan.c b/mm/vmscan.c index 4a468f8619ba..200aa5d1ee5a 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -166,6 +166,7 @@ int vm_swappiness = 60; */ unsigned long vm_total_pages; +#ifdef CONFIG_SHRINK_PAGECACHE unsigned long vm_cache_limit_ratio; unsigned long vm_cache_limit_ratio_min; unsigned long vm_cache_limit_ratio_max; @@ -179,6 +180,7 @@ int vm_cache_reclaim_weight __read_mostly; int vm_cache_reclaim_weight_min; int vm_cache_reclaim_weight_max; static DEFINE_PER_CPU(struct delayed_work, vmscan_work); +#endif static LIST_HEAD(shrinker_list); static DECLARE_RWSEM(shrinker_rwsem); @@ -3517,8 +3519,10 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx) count_vm_event(PAGEOUTRUN); +#ifdef CONFIG_SHRINK_PAGECACHE if (vm_cache_limit_mbytes && page_cache_over_limit()) shrink_page_cache(GFP_KERNEL); +#endif do { unsigned long nr_reclaimed = sc.nr_reclaimed; @@ -3902,6 +3906,7 @@ unsigned long shrink_all_memory(unsigned long nr_to_reclaim) } #endif /* CONFIG_HIBERNATION */ +#ifdef CONFIG_SHRINK_PAGECACHE static unsigned long __shrink_page_cache(gfp_t mask) { struct scan_control sc = { @@ -4013,6 +4018,7 @@ static int kswapd_cpu_down_prep(unsigned int cpu) return 0; } +#endif /* It's optimal to keep kswapds on the same CPUs as their memory, but not required for correctness. So if the last cpu in a node goes @@ -4079,12 +4085,19 @@ static int __init kswapd_init(void) swap_setup(); for_each_node_state(nid, N_MEMORY) kswapd_run(nid); +#ifdef CONFIG_SHRINK_PAGECACHE ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "mm/vmscan:online", kswapd_cpu_online, kswapd_cpu_down_prep); +#else + ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, + "mm/vmscan:online", kswapd_cpu_online, + NULL); +#endif WARN_ON(ret < 0); +#ifdef CONFIG_SHRINK_PAGECACHE shrink_page_cache_init(); - +#endif return 0; } -- GitLab