diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 8b6f7071072b7d9a7387902c36cb8b871b32bb91..a46b2fe191ba70710832ceeeb961cbedbeff407e 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -1973,7 +1973,7 @@ some extension. These two features are alternatives. reliable_debug= [ARM64] - Format: [F][,S] + Format: [F][,S][,P] Only works with CONFIG_MEMORY_RELIABLE and "kernelcore=reliable" is configured. F: User tasks with PF_RELIABLE will not allocate @@ -1981,6 +1981,7 @@ from mirrored region failed. Pagecache and tmpfs will follow this rule too. S: The shmem does not use the reliable memory. + P: Page cache does not use the reliable memory. kgdbdbgp= [KGDB,HW] kgdb over EHCI usb debug port. Format: [,poll interval] diff --git a/include/linux/mem_reliable.h b/include/linux/mem_reliable.h index 4b51dfc513fc4255316a0b2e686cb95ac6e0b209..0c5f80428e9738bd4998eee48cd133a7b993a3db 100644 --- a/include/linux/mem_reliable.h +++ b/include/linux/mem_reliable.h @@ -20,6 +20,7 @@ extern unsigned long task_reliable_limit __read_mostly; extern bool reliable_allow_fallback; extern bool shmem_reliable; extern struct percpu_counter reliable_shmem_used_nr_page; +extern bool pagecache_use_reliable_mem; extern void add_reliable_mem_size(long sz); extern void mem_reliable_init(bool has_unmirrored_mem, @@ -85,6 +86,11 @@ static inline bool shmem_reliable_is_enabled(void) return shmem_reliable; } +static inline bool pagecache_reliable_is_enabled(void) +{ + return pagecache_use_reliable_mem; +} + static inline void shmem_reliable_page_counter(struct page *page, int nr_page) { if (shmem_reliable_is_enabled() && page_reliable(page)) @@ -94,6 +100,7 @@ static inline void shmem_reliable_page_counter(struct page *page, int nr_page) #else #define reliable_enabled 0 #define reliable_allow_fb_enabled() false +#define pagecache_use_reliable_mem 0 static inline bool mem_reliable_is_enabled(void) { return false; } static inline void add_reliable_mem_size(long sz) {} @@ -126,6 +133,7 @@ static inline void shmem_reliable_page_counter(struct page *page, int nr_page) { } +static inline bool pagecache_reliable_is_enabled(void) { return false; } #endif #endif diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index a6457acd7462efdd645bd6088edf5642d66226d4..77563c03618c917be3b88142e690ee08e07703c8 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -249,7 +249,9 @@ extern struct page *__page_cache_alloc(gfp_t gfp); #else static inline struct page *__page_cache_alloc(gfp_t gfp) { - gfp |= ___GFP_RELIABILITY; + if (pagecache_reliable_is_enabled()) + gfp |= ___GFP_RELIABILITY; + return alloc_pages(gfp, 0); } #endif diff --git a/mm/filemap.c b/mm/filemap.c index c30e5c1eb77c2eb8bcbd9de5875b11653457daa7..4dc3cc5834a55b452d9da9eb2bffc9844626d131 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1042,7 +1042,11 @@ struct page *__page_cache_alloc(gfp_t gfp) int n; struct page *page; - gfp |= ___GFP_RELIABILITY; + if (pagecache_reliable_is_enabled()) + gfp |= ___GFP_RELIABILITY; + else + WARN_ON_ONCE(gfp & ___GFP_RELIABILITY); + if (cpuset_do_page_mem_spread()) { unsigned int cpuset_mems_cookie; do { diff --git a/mm/mem_reliable.c b/mm/mem_reliable.c index 89164bc5728b9c385c46b67e731e74cd9bd83d32..5a32977b674fde365e318b72ed4fc2f415338f27 100644 --- a/mm/mem_reliable.c +++ b/mm/mem_reliable.c @@ -21,6 +21,7 @@ bool reliable_allow_fallback __read_mostly = true; bool shmem_reliable __read_mostly = true; struct percpu_counter reliable_shmem_used_nr_page __read_mostly; +bool pagecache_use_reliable_mem __read_mostly = true; void add_reliable_mem_size(long sz) { atomic_long_add(sz, &total_reliable_mem); @@ -249,6 +250,10 @@ static int __init setup_reliable_debug(char *str) shmem_reliable = false; pr_info("shmem reliable disabled."); break; + case 'P': + pagecache_use_reliable_mem = false; + pr_info("disable page cache use reliable memory\n"); + break; default: pr_err("reliable_debug option '%c' unknown. skipped\n", *str);