diff --git a/include/linux/mem_reliable.h b/include/linux/mem_reliable.h index 2d017de08a679c3fb6b63e14f06c61d3366e88d0..2d3577ce71134433dc0ea3a4bb2ae7026a7448ec 100644 --- a/include/linux/mem_reliable.h +++ b/include/linux/mem_reliable.h @@ -41,6 +41,7 @@ extern bool mem_reliable_status(void); extern void page_cache_reliable_lru_add(enum lru_list lru, struct page *page, int val); +extern void page_cache_prepare_alloc(gfp_t *gfp); static inline bool mem_reliable_is_enabled(void) { @@ -175,6 +176,7 @@ static inline bool mem_reliable_shmem_limit_check(void) { return true; } static inline void page_cache_reliable_lru_add(enum lru_list lru, struct page *page, int val) {} +static inline void page_cache_prepare_alloc(gfp_t *gfp) {} #endif #endif diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index a7d83fed0601d85e9f7cfe1e71d2e52168c832c3..085aed892ce5812b774046cc601f09caf5915c13 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -249,15 +249,7 @@ extern struct page *__page_cache_alloc(gfp_t gfp); #else static inline struct page *__page_cache_alloc(gfp_t gfp) { - struct page *page; - - if (pagecache_reliable_is_enabled()) - gfp |= ___GFP_RELIABILITY; - - page = alloc_pages(gfp, 0); - page_cache_fallback_inc(gfp, page); - - return page; + return alloc_pages(gfp, 0); } #endif diff --git a/mm/filemap.c b/mm/filemap.c index 2827e2b670e02d646d73fc43dc5c303dbf000702..2ac6ddf630d804e00798c1ec41ce27d0bcbcd9f4 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1040,10 +1040,7 @@ struct page *__page_cache_alloc(gfp_t gfp) int n; struct page *page; - if (pagecache_reliable_is_enabled()) - gfp |= ___GFP_RELIABILITY; - else - WARN_ON_ONCE(gfp & ___GFP_RELIABILITY); + page_cache_prepare_alloc(&gfp); if (cpuset_do_page_mem_spread()) { unsigned int cpuset_mems_cookie; diff --git a/mm/mem_reliable.c b/mm/mem_reliable.c index 17776f387031dd034e1474ed6dd2912f7350def9..dcdd937148b60f6f6cd1ea64897680310461478b 100644 --- a/mm/mem_reliable.c +++ b/mm/mem_reliable.c @@ -37,6 +37,10 @@ long shmem_reliable_nr_page = LONG_MAX; bool pagecache_use_reliable_mem __read_mostly = true; atomic_long_t page_cache_fallback = ATOMIC_LONG_INIT(0); DEFINE_PER_CPU(long, pagecache_reliable_pages); + +static unsigned long zero; +static unsigned long reliable_pagecache_max_bytes = ULONG_MAX; + bool mem_reliable_status(void) { return mem_reliable_is_enabled(); @@ -394,6 +398,23 @@ int reliable_shmem_bytes_limit_handler(struct ctl_table *table, int write, } #endif +int reliable_pagecache_max_bytes_write(struct ctl_table *table, int write, + void __user *buffer, size_t *length, loff_t *ppos) +{ + unsigned long old_value = reliable_pagecache_max_bytes; + int ret; + + ret = proc_doulongvec_minmax(table, write, buffer, length, ppos); + if (!ret && write) { + if (reliable_pagecache_max_bytes > total_reliable_mem_sz()) { + reliable_pagecache_max_bytes = old_value; + return -EINVAL; + } + } + + return ret; +} + static struct ctl_table reliable_ctl_table[] = { { .procname = "task_reliable_limit", @@ -425,6 +446,14 @@ static struct ctl_table reliable_ctl_table[] = { .proc_handler = reliable_shmem_bytes_limit_handler, }, #endif + { + .procname = "reliable_pagecache_max_bytes", + .data = &reliable_pagecache_max_bytes, + .maxlen = sizeof(reliable_pagecache_max_bytes), + .mode = 0644, + .proc_handler = reliable_pagecache_max_bytes_write, + .extra1 = &zero, + }, {} }; @@ -438,6 +467,30 @@ static struct ctl_table reliable_dir_table[] = { {} }; +void page_cache_prepare_alloc(gfp_t *gfp) +{ + long nr_reliable = 0; + int cpu; + + if (!mem_reliable_is_enabled()) + return; + + for_each_possible_cpu(cpu) + nr_reliable += this_cpu_read(pagecache_reliable_pages); + + if (nr_reliable < 0) + goto no_reliable; + + if (nr_reliable > reliable_pagecache_max_bytes >> PAGE_SHIFT) + goto no_reliable; + + *gfp |= ___GFP_RELIABILITY; + return; + +no_reliable: + *gfp &= ~___GFP_RELIABILITY; +} + static int __init reliable_sysctl_init(void) { if (!mem_reliable_is_enabled())