“66a8950949c12a2600ff62e78b24f42ef8f6d28e”上不存在“drivers/clocksource/qcom-timer.c”
提交 862e2308 编写于 作者: Z zhongjiang 提交者: Xie XiuQi

pagecache: add Kconfig to enable/disable the feature

euler inclusion
category: bugfix
CVE: NA
Bugzilla: 9580

---------------------------

Just add Kconfig to the feature.
Signed-off-by: Nzhongjiang <zhongjiang@huawei.com>
Reviewed-by: NJing Xiangfeng <jingxiangfeng@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 4e3f738c
...@@ -613,8 +613,10 @@ static inline int fault_in_pages_readable(const char __user *uaddr, int size) ...@@ -613,8 +613,10 @@ static inline int fault_in_pages_readable(const char __user *uaddr, int size)
return 0; return 0;
} }
#ifdef CONFIG_SHRINK_PAGECACHE
int add_to_page_cache(struct page *page, struct address_space *mapping, int add_to_page_cache(struct page *page, struct address_space *mapping,
pgoff_t index, gfp_t gfp_mask); pgoff_t index, gfp_t gfp_mask);
#endif
int add_to_page_cache_locked(struct page *page, struct address_space *mapping, int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
pgoff_t index, gfp_t gfp_mask); pgoff_t index, gfp_t gfp_mask);
int add_to_page_cache_lru(struct page *page, struct address_space *mapping, int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
...@@ -624,6 +626,24 @@ extern void __delete_from_page_cache(struct page *page, void *shadow); ...@@ -624,6 +626,24 @@ extern void __delete_from_page_cache(struct page *page, void *shadow);
int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask); int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
void delete_from_page_cache_batch(struct address_space *mapping, void delete_from_page_cache_batch(struct address_space *mapping,
struct pagevec *pvec); struct pagevec *pvec);
#ifndef CONFIG_SHRINK_PAGECACHE
/*
* Like add_to_page_cache_locked, but used to add newly allocated pages:
* the page is new, so we can just run __SetPageLocked() against it.
*/
static inline int add_to_page_cache(struct page *page,
struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
{
int error;
__SetPageLocked(page);
error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
if (unlikely(error))
__ClearPageLocked(page);
return error;
}
#endif
static inline unsigned long dir_pages(struct inode *inode) static inline unsigned long dir_pages(struct inode *inode)
{ {
......
...@@ -367,6 +367,7 @@ extern int vm_swappiness; ...@@ -367,6 +367,7 @@ extern int vm_swappiness;
extern int remove_mapping(struct address_space *mapping, struct page *page); extern int remove_mapping(struct address_space *mapping, struct page *page);
extern unsigned long vm_total_pages; extern unsigned long vm_total_pages;
#ifdef CONFIG_SHRINK_PAGECACHE
extern unsigned long vm_cache_limit_ratio; extern unsigned long vm_cache_limit_ratio;
extern unsigned long vm_cache_limit_ratio_min; extern unsigned long vm_cache_limit_ratio_min;
extern unsigned long vm_cache_limit_ratio_max; extern unsigned long vm_cache_limit_ratio_max;
...@@ -385,6 +386,7 @@ extern int cache_limit_ratio_sysctl_handler(struct ctl_table *table, int write, ...@@ -385,6 +386,7 @@ extern int cache_limit_ratio_sysctl_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *length, loff_t *ppos); void __user *buffer, size_t *length, loff_t *ppos);
extern int cache_limit_mbytes_sysctl_handler(struct ctl_table *table, int write, extern int cache_limit_mbytes_sysctl_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *length, loff_t *ppos); void __user *buffer, size_t *length, loff_t *ppos);
#endif
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
extern int node_reclaim_mode; extern int node_reclaim_mode;
......
...@@ -1364,6 +1364,7 @@ static struct ctl_table vm_table[] = { ...@@ -1364,6 +1364,7 @@ static struct ctl_table vm_table[] = {
.extra1 = &zero, .extra1 = &zero,
.extra2 = &one_hundred, .extra2 = &one_hundred,
}, },
#ifdef CONFIG_SHRINK_PAGECACHE
{ {
.procname = "cache_reclaim_s", .procname = "cache_reclaim_s",
.data = &vm_cache_reclaim_s, .data = &vm_cache_reclaim_s,
...@@ -1382,6 +1383,7 @@ static struct ctl_table vm_table[] = { ...@@ -1382,6 +1383,7 @@ static struct ctl_table vm_table[] = {
.extra1 = &vm_cache_reclaim_weight_min, .extra1 = &vm_cache_reclaim_weight_min,
.extra2 = &vm_cache_reclaim_weight_max, .extra2 = &vm_cache_reclaim_weight_max,
}, },
#endif
#ifdef CONFIG_HUGETLB_PAGE #ifdef CONFIG_HUGETLB_PAGE
{ {
.procname = "nr_hugepages", .procname = "nr_hugepages",
......
...@@ -479,6 +479,19 @@ config FRONTSWAP ...@@ -479,6 +479,19 @@ config FRONTSWAP
If unsure, say Y to enable frontswap. If unsure, say Y to enable frontswap.
config SHRINK_PAGECACHE
bool "Enable background thread to shrink the page cache"
depends on MMU
default n
help
SHRINK_PAGECACHE means that we do not want to keep the large number
of page cache in the system, Even though it will greatly improve the
performance of the machine. But it is asked by the user to keep the
free memory as much as possible. Apparently, it will influence the
informace. hence, we should switch off the option if it is not necessary.
if unsure, say N to disable the SHRINK_PAGECACHE.
config CMA config CMA
bool "Contiguous Memory Allocator" bool "Contiguous Memory Allocator"
depends on HAVE_MEMBLOCK && MMU depends on HAVE_MEMBLOCK && MMU
......
...@@ -895,6 +895,7 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping, ...@@ -895,6 +895,7 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
} }
EXPORT_SYMBOL(add_to_page_cache_locked); EXPORT_SYMBOL(add_to_page_cache_locked);
#ifdef CONFIG_SHRINK_PAGECACHE
/* /*
* Like add_to_page_cache_locked, but used to add newly allocated pages: * Like add_to_page_cache_locked, but used to add newly allocated pages:
* the page is new, so we can just run __SetPageLocked() against it. * the page is new, so we can just run __SetPageLocked() against it.
...@@ -914,6 +915,7 @@ int add_to_page_cache(struct page *page, ...@@ -914,6 +915,7 @@ int add_to_page_cache(struct page *page,
return error; return error;
} }
EXPORT_SYMBOL(add_to_page_cache); EXPORT_SYMBOL(add_to_page_cache);
#endif
int add_to_page_cache_lru(struct page *page, struct address_space *mapping, int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
pgoff_t offset, gfp_t gfp_mask) pgoff_t offset, gfp_t gfp_mask)
......
...@@ -8150,6 +8150,7 @@ bool set_hwpoison_free_buddy_page(struct page *page) ...@@ -8150,6 +8150,7 @@ bool set_hwpoison_free_buddy_page(struct page *page)
} }
#endif #endif
#ifdef CONFIG_SHRINK_PAGECACHE
unsigned long page_cache_over_limit(void) unsigned long page_cache_over_limit(void)
{ {
unsigned long lru_file, limit; unsigned long lru_file, limit;
...@@ -8217,3 +8218,4 @@ int cache_limit_mbytes_sysctl_handler(struct ctl_table *table, int write, ...@@ -8217,3 +8218,4 @@ int cache_limit_mbytes_sysctl_handler(struct ctl_table *table, int write,
return 0; return 0;
} }
#endif
...@@ -166,6 +166,7 @@ int vm_swappiness = 60; ...@@ -166,6 +166,7 @@ int vm_swappiness = 60;
*/ */
unsigned long vm_total_pages; unsigned long vm_total_pages;
#ifdef CONFIG_SHRINK_PAGECACHE
unsigned long vm_cache_limit_ratio; unsigned long vm_cache_limit_ratio;
unsigned long vm_cache_limit_ratio_min; unsigned long vm_cache_limit_ratio_min;
unsigned long vm_cache_limit_ratio_max; unsigned long vm_cache_limit_ratio_max;
...@@ -179,6 +180,7 @@ int vm_cache_reclaim_weight __read_mostly; ...@@ -179,6 +180,7 @@ int vm_cache_reclaim_weight __read_mostly;
int vm_cache_reclaim_weight_min; int vm_cache_reclaim_weight_min;
int vm_cache_reclaim_weight_max; int vm_cache_reclaim_weight_max;
static DEFINE_PER_CPU(struct delayed_work, vmscan_work); static DEFINE_PER_CPU(struct delayed_work, vmscan_work);
#endif
static LIST_HEAD(shrinker_list); static LIST_HEAD(shrinker_list);
static DECLARE_RWSEM(shrinker_rwsem); static DECLARE_RWSEM(shrinker_rwsem);
...@@ -3517,8 +3519,10 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx) ...@@ -3517,8 +3519,10 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
count_vm_event(PAGEOUTRUN); count_vm_event(PAGEOUTRUN);
#ifdef CONFIG_SHRINK_PAGECACHE
if (vm_cache_limit_mbytes && page_cache_over_limit()) if (vm_cache_limit_mbytes && page_cache_over_limit())
shrink_page_cache(GFP_KERNEL); shrink_page_cache(GFP_KERNEL);
#endif
do { do {
unsigned long nr_reclaimed = sc.nr_reclaimed; unsigned long nr_reclaimed = sc.nr_reclaimed;
...@@ -3902,6 +3906,7 @@ unsigned long shrink_all_memory(unsigned long nr_to_reclaim) ...@@ -3902,6 +3906,7 @@ unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
} }
#endif /* CONFIG_HIBERNATION */ #endif /* CONFIG_HIBERNATION */
#ifdef CONFIG_SHRINK_PAGECACHE
static unsigned long __shrink_page_cache(gfp_t mask) static unsigned long __shrink_page_cache(gfp_t mask)
{ {
struct scan_control sc = { struct scan_control sc = {
...@@ -4013,6 +4018,7 @@ static int kswapd_cpu_down_prep(unsigned int cpu) ...@@ -4013,6 +4018,7 @@ static int kswapd_cpu_down_prep(unsigned int cpu)
return 0; return 0;
} }
#endif
/* It's optimal to keep kswapds on the same CPUs as their memory, but /* It's optimal to keep kswapds on the same CPUs as their memory, but
not required for correctness. So if the last cpu in a node goes not required for correctness. So if the last cpu in a node goes
...@@ -4079,12 +4085,19 @@ static int __init kswapd_init(void) ...@@ -4079,12 +4085,19 @@ static int __init kswapd_init(void)
swap_setup(); swap_setup();
for_each_node_state(nid, N_MEMORY) for_each_node_state(nid, N_MEMORY)
kswapd_run(nid); kswapd_run(nid);
#ifdef CONFIG_SHRINK_PAGECACHE
ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
"mm/vmscan:online", kswapd_cpu_online, "mm/vmscan:online", kswapd_cpu_online,
kswapd_cpu_down_prep); kswapd_cpu_down_prep);
#else
ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
"mm/vmscan:online", kswapd_cpu_online,
NULL);
#endif
WARN_ON(ret < 0); WARN_ON(ret < 0);
#ifdef CONFIG_SHRINK_PAGECACHE
shrink_page_cache_init(); shrink_page_cache_init();
#endif
return 0; return 0;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册