diff --git a/mm/mem_reliable.c b/mm/mem_reliable.c index 5505577d37844c8ac8ca24ece8a8d99ffe5300b8..6d4ab4bee3d51db0a0eaf279427388eef5b34f30 100644 --- a/mm/mem_reliable.c +++ b/mm/mem_reliable.c @@ -11,7 +11,7 @@ #include #include -#define MEM_RELIABLE_RESERVE_MIN (256UL << 20) +#define MEM_RELIABLE_RESERVE_MIN 0 enum mem_reliable_types { MEM_RELIABLE_ALL, diff --git a/mm/page_alloc.c b/mm/page_alloc.c index fa1cdeba529369232d5f5a770083e660e0e6701c..9d4f752354204cd916d4a3017f26ad15908a30b7 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -4271,6 +4271,29 @@ check_retry_cpuset(int cpuset_mems_cookie, struct alloc_context *ac) return false; } +#ifdef CONFIG_MEMORY_RELIABLE +static inline void mem_reliable_fallback_slowpath(gfp_t gfp_mask, + struct alloc_context *ac) +{ + if (!reliable_allow_fb_enabled()) + return; + + if (gfp_mask & __GFP_NOFAIL) + return; + + if ((ac->high_zoneidx == ZONE_NORMAL) && + (gfp_mask & ___GFP_RELIABILITY)) { + ac->high_zoneidx = gfp_zone(gfp_mask & ~___GFP_RELIABILITY); + ac->preferred_zoneref = first_zones_zonelist( + ac->zonelist, ac->high_zoneidx, ac->nodemask); + return; + } +} +#else +static inline void mem_reliable_fallback_slowpath(gfp_t gfp_mask, + struct alloc_context *ac) {} +#endif + static inline struct page * __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, struct alloc_context *ac) @@ -4322,6 +4345,8 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, if (gfp_mask & __GFP_KSWAPD_RECLAIM) wake_all_kswapds(order, gfp_mask, ac); + mem_reliable_fallback_slowpath(gfp_mask, ac); + /* * The adjusted alloc_flags might result in immediate success, so try * that first