diff --git a/include/linux/mem_reliable.h b/include/linux/mem_reliable.h index 4add3803eb069afb7a9e1da5feb67c4d74323a39..8f858d11ce6f512d243419fe4cfbd97607a66532 100644 --- a/include/linux/mem_reliable.h +++ b/include/linux/mem_reliable.h @@ -13,10 +13,12 @@ extern struct static_key_false mem_reliable; extern bool reliable_enabled; +extern bool shmem_reliable; extern void add_reliable_mem_size(long sz); extern void mem_reliable_init(bool has_unmirrored_mem, unsigned long *zone_movable_pfn); +extern void shmem_reliable_init(void); extern void reliable_report_meminfo(struct seq_file *m); static inline bool mem_reliable_is_enabled(void) @@ -46,6 +48,11 @@ static inline bool skip_none_movable_zone(gfp_t gfp, struct zoneref *z) return false; } + +static inline bool shmem_reliable_is_enabled(void) +{ + return shmem_reliable; +} #else #define reliable_enabled 0 @@ -53,12 +60,14 @@ static inline bool mem_reliable_is_enabled(void) { return false; } static inline void add_reliable_mem_size(long sz) {} static inline void mem_reliable_init(bool has_unmirrored_mem, unsigned long *zone_movable_pfn) {} +static inline void shmem_reliable_init(void) {} static inline bool zone_reliable(struct zone *zone) { return false; } static inline bool skip_none_movable_zone(gfp_t gfp, struct zoneref *z) { return false; } static inline void reliable_report_meminfo(struct seq_file *m) {} +static inline bool shmem_reliable_is_enabled(void) { return false; } #endif #endif diff --git a/mm/mem_reliable.c b/mm/mem_reliable.c index aa89c874890e6f1568a36355d4876742a0df3aec..e977a4122f8a8e5d3810906e7dbb1000db68dee1 100644 --- a/mm/mem_reliable.c +++ b/mm/mem_reliable.c @@ -13,6 +13,7 @@ DEFINE_STATIC_KEY_FALSE(mem_reliable); bool reliable_enabled; static atomic_long_t total_reliable_mem; +bool shmem_reliable __read_mostly = true; void add_reliable_mem_size(long sz) { @@ -92,6 +93,17 @@ void mem_reliable_init(bool has_unmirrored_mem, unsigned long *zone_movable_pfn) total_reliable_mem_sz()); } +void shmem_reliable_init(void) +{ + if (!shmem_reliable_is_enabled()) + return; + + if (!mem_reliable_is_enabled()) { + shmem_reliable = false; + pr_info("shmem reliable disabled.\n"); + } +} + void reliable_report_meminfo(struct seq_file *m) { if (!mem_reliable_is_enabled()) diff --git a/mm/shmem.c b/mm/shmem.c index d36659e545426fd78fc2397187af6b825447915f..746e48454cb8ab1094e83afac95f0861837c0f75 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1570,6 +1570,14 @@ static struct page *shmem_alloc_page(gfp_t gfp, return page; } +static inline void shmem_prepare_alloc(gfp_t *gfp_mask) +{ + if (!shmem_reliable_is_enabled()) + return; + + *gfp_mask |= GFP_RELIABLE; +} + static struct page *shmem_alloc_and_acct_page(gfp_t gfp, struct inode *inode, pgoff_t index, bool huge, int node_id) @@ -1586,6 +1594,8 @@ static struct page *shmem_alloc_and_acct_page(gfp_t gfp, if (!shmem_inode_acct_block(inode, nr)) goto failed; + shmem_prepare_alloc(&gfp); + if (huge) page = shmem_alloc_hugepage(gfp, info, index, node_id); else @@ -3944,6 +3954,8 @@ int __init shmem_init(void) else shmem_huge = 0; /* just in case it was patched */ #endif + + shmem_reliable_init(); return 0; out1: