From 354b44e803a4f39b591fd42795174de18bda1288 Mon Sep 17 00:00:00 2001 From: zhongjiang Date: Thu, 21 Feb 2019 21:21:38 +0800 Subject: [PATCH] pagecache: add switch to close the feature completely euler inclusion category: bugfix CVE: NA Bugzilla: 9580 --------------------------- The patch control the open/close of the feature by the /proc/sys/vm/cache_reclaim_enable, hence we can completely close all background thread for user demand. Signed-off-by: zhongjiang Reviewed-by: Jing Xiangfeng Signed-off-by: Yang Yingliang --- include/linux/swap.h | 3 +++ kernel/sysctl.c | 9 +++++++++ mm/vmscan.c | 43 +++++++++++++++++++++++++++++++++++-------- 3 files changed, 47 insertions(+), 8 deletions(-) diff --git a/include/linux/swap.h b/include/linux/swap.h index 75ad94cabf8c..2c3e35eb32c2 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -380,12 +380,15 @@ extern int vm_cache_reclaim_s_max; extern int vm_cache_reclaim_weight; extern int vm_cache_reclaim_weight_min; extern int vm_cache_reclaim_weight_max; +extern int vm_cache_reclaim_enable; extern unsigned long page_cache_over_limit(void); extern unsigned long shrink_page_cache(gfp_t mask); extern int cache_limit_ratio_sysctl_handler(struct ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos); extern int cache_limit_mbytes_sysctl_handler(struct ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos); +extern int cache_reclaim_enable_handler(struct ctl_table *table, int write, + void __user *buffer, size_t *length, loff_t *ppos); #endif #ifdef CONFIG_NUMA diff --git a/kernel/sysctl.c b/kernel/sysctl.c index dfef03879627..201cb993869b 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -1383,6 +1383,15 @@ static struct ctl_table vm_table[] = { .extra1 = &vm_cache_reclaim_weight_min, .extra2 = &vm_cache_reclaim_weight_max, }, + { + .procname = "cache_reclaim_enable", + .data = &vm_cache_reclaim_enable, + .maxlen = sizeof(vm_cache_reclaim_enable), + .mode = 0644, + .proc_handler = cache_reclaim_enable_handler, + .extra1 = &zero, + .extra2 = &one, + }, #endif #ifdef CONFIG_HUGETLB_PAGE { diff --git a/mm/vmscan.c b/mm/vmscan.c index 200aa5d1ee5a..316a9745bb38 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -179,6 +179,7 @@ int vm_cache_reclaim_s_max; int vm_cache_reclaim_weight __read_mostly; int vm_cache_reclaim_weight_min; int vm_cache_reclaim_weight_max; +int vm_cache_reclaim_enable; static DEFINE_PER_CPU(struct delayed_work, vmscan_work); #endif @@ -3942,14 +3943,22 @@ static void shrink_shepherd(struct work_struct *w) for_each_online_cpu(cpu) { struct delayed_work *work = &per_cpu(vmscan_work, cpu); - if (!delayed_work_pending(work)) + if (!delayed_work_pending(work) && vm_cache_reclaim_enable) queue_delayed_work_on(cpu, system_wq, work, 0); } put_online_cpus(); - schedule_delayed_work(&shepherd, - round_jiffies_relative((unsigned long)vm_cache_reclaim_s * HZ)); + /* we want all kernel thread to stop */ + if (vm_cache_reclaim_enable) { + if (vm_cache_reclaim_s == 0) + schedule_delayed_work(&shepherd, + round_jiffies_relative(120 * HZ)); + else + schedule_delayed_work(&shepherd, + round_jiffies_relative((unsigned long) + vm_cache_reclaim_s * HZ)); + } } static void shrink_shepherd_timer(void) @@ -3981,15 +3990,16 @@ static void shrink_page_cache_work(struct work_struct *w) struct delayed_work *work = to_delayed_work(w); unsigned long nr_pages; - if (vm_cache_reclaim_s == 0) { - queue_delayed_work_on(smp_processor_id(), system_wq, - work, round_jiffies_relative(120 * HZ)); + /* + * if vm_cache_reclaim_enable or vm_cache_reclaim_s is zero, + * we do not shrink page cache again. + */ + if (vm_cache_reclaim_s == 0 || !vm_cache_reclaim_enable) return; - } /* It should wait more time if we hardly reclaim the page cache */ nr_pages = shrink_page_cache(GFP_KERNEL); - if (nr_pages < SWAP_CLUSTER_MAX) + if ((nr_pages < SWAP_CLUSTER_MAX) && vm_cache_reclaim_enable) queue_delayed_work_on(smp_processor_id(), system_wq, work, round_jiffies_relative(120 * HZ)); } @@ -4008,6 +4018,7 @@ static void shrink_page_cache_init(void) vm_cache_reclaim_weight = 1; vm_cache_reclaim_weight_min = 1; vm_cache_reclaim_weight_max = 100; + vm_cache_reclaim_enable = 1; shrink_shepherd_timer(); } @@ -4018,6 +4029,22 @@ static int kswapd_cpu_down_prep(unsigned int cpu) return 0; } + +int cache_reclaim_enable_handler(struct ctl_table *table, int write, + void __user *buffer, size_t *length, loff_t *ppos) +{ + int ret; + + ret = proc_dointvec_minmax(table, write, buffer, length, ppos); + if (ret) + return ret; + + if (write) + schedule_delayed_work(&shepherd, round_jiffies_relative( + (unsigned long)vm_cache_reclaim_s * HZ)); + + return 0; +} #endif /* It's optimal to keep kswapds on the same CPUs as their memory, but -- GitLab