diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 3fc729aab31a6335ebbb5d805582958da4381e30..8b6f7071072b7d9a7387902c36cb8b871b32bb91 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -1973,13 +1973,14 @@ some extension. These two features are alternatives. reliable_debug= [ARM64] - Format: [F] + Format: [F][,S] Only works with CONFIG_MEMORY_RELIABLE and "kernelcore=reliable" is configured. F: User tasks with PF_RELIABLE will not allocate memory from non-mirrored region if this allocation from mirrored region failed. Pagecache and tmpfs will follow this rule too. + S: The shmem does not use the reliable memory. kgdbdbgp= [KGDB,HW] kgdb over EHCI usb debug port. Format: [,poll interval] diff --git a/include/linux/mem_reliable.h b/include/linux/mem_reliable.h index c9c4d94a4df46a40634e1412fd2d5fd15fd1278f..0641c7a88c78623cddd1d4f6af68c148391ced3c 100644 --- a/include/linux/mem_reliable.h +++ b/include/linux/mem_reliable.h @@ -17,10 +17,12 @@ extern bool reliable_enabled; extern atomic_long_t reliable_user_used_nr_page; extern unsigned long task_reliable_limit __read_mostly; extern bool reliable_allow_fallback; +extern bool shmem_reliable; extern void add_reliable_mem_size(long sz); extern void mem_reliable_init(bool has_unmirrored_mem, unsigned long *zone_movable_pfn); +extern void shmem_reliable_init(void); extern void reliable_report_meminfo(struct seq_file *m); extern bool page_reliable(struct page *page); extern void reliable_report_usage(struct seq_file *m, struct mm_struct *mm); @@ -75,6 +77,12 @@ static inline bool reliable_allow_fb_enabled(void) { return reliable_allow_fallback; } + +static inline bool shmem_reliable_is_enabled(void) +{ + return shmem_reliable; +} + #else #define reliable_enabled 0 #define reliable_allow_fb_enabled() false @@ -83,6 +91,7 @@ static inline bool mem_reliable_is_enabled(void) { return false; } static inline void add_reliable_mem_size(long sz) {} static inline void mem_reliable_init(bool has_unmirrored_mem, unsigned long *zone_movable_pfn) {} +static inline void shmem_reliable_init(void) {} static inline bool zone_reliable(struct zone *zone) { return false; } static inline bool skip_none_movable_zone(gfp_t gfp, struct zoneref *z) { @@ -104,6 +113,7 @@ static inline void mem_reliable_out_of_memory(gfp_t gfp_mask, unsigned int order, int preferred_nid, nodemask_t *nodemask) {} +static inline bool shmem_reliable_is_enabled(void) { return false; } #endif diff --git a/mm/mem_reliable.c b/mm/mem_reliable.c index 60a214e3b28f70241adfccef42aafc5d84765965..32a0270b494d243fa71e28cd527a0be456d16be1 100644 --- a/mm/mem_reliable.c +++ b/mm/mem_reliable.c @@ -18,6 +18,7 @@ atomic_long_t reliable_user_used_nr_page; /* reliable user limit for user tasks with reliable flag */ unsigned long task_reliable_limit = ULONG_MAX; bool reliable_allow_fallback __read_mostly = true; +bool shmem_reliable __read_mostly = true; void add_reliable_mem_size(long sz) { @@ -88,6 +89,17 @@ void mem_reliable_init(bool has_unmirrored_mem, unsigned long *zone_movable_pfn) atomic_long_read(&total_reliable_mem)); } +void shmem_reliable_init(void) +{ + if (!shmem_reliable_is_enabled()) + return; + + if (!mem_reliable_is_enabled()) { + shmem_reliable = false; + pr_info("shmem reliable disabled.\n"); + } +} + static unsigned long total_reliable_mem_sz(void) { return atomic_long_read(&total_reliable_mem); @@ -223,6 +235,10 @@ static int __init setup_reliable_debug(char *str) reliable_allow_fallback = false; pr_info("fallback disabled."); break; + case 'S': + shmem_reliable = false; + pr_info("shmem reliable disabled."); + break; default: pr_err("reliable_debug option '%c' unknown. skipped\n", *str); diff --git a/mm/shmem.c b/mm/shmem.c index 16bb7806a25e6e85c14f17555662c2124ac879a2..e27fc90bab412dd0b8c8adf6f6a3bd01f11f0de0 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1593,6 +1593,14 @@ static struct page *shmem_alloc_page(gfp_t gfp, return page; } +static inline void shmem_prepare_alloc(gfp_t *gfp_mask) +{ + if (!shmem_reliable_is_enabled()) + return; + + *gfp_mask |= ___GFP_RELIABILITY; +} + static struct page *shmem_alloc_and_acct_page(gfp_t gfp, struct inode *inode, pgoff_t index, bool huge, int node_id) @@ -1609,6 +1617,8 @@ static struct page *shmem_alloc_and_acct_page(gfp_t gfp, if (!shmem_inode_acct_block(inode, nr)) goto failed; + shmem_prepare_alloc(&gfp); + if (huge) page = shmem_alloc_hugepage(gfp, info, index, node_id); else @@ -3941,6 +3951,8 @@ int __init shmem_init(void) else shmem_huge = 0; /* just in case it was patched */ #endif + + shmem_reliable_init(); return 0; out1: