diff --git a/mm/page_alloc.c b/mm/page_alloc.c index a3b7eb86f9129e0b87d3ede5da2bddddb4f30ac2..8380011d77dbcd84f10f69b96ff98f4fa2841ecf 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3398,31 +3398,26 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, ac.nodemask, &ac.preferred_zone); if (!ac.preferred_zone) { page = NULL; - goto out; + goto no_zone; } ac.classzone_idx = zonelist_zone_idx(preferred_zoneref); /* First allocation attempt */ page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac); - if (unlikely(!page)) { - /* - * Runtime PM, block IO and its error handling path - * can deadlock because I/O on the device might not - * complete. - */ - alloc_mask = memalloc_noio_flags(gfp_mask); - ac.spread_dirty_pages = false; - - page = __alloc_pages_slowpath(alloc_mask, order, &ac); - } + if (likely(page)) + goto out; - if (kmemcheck_enabled && page) - kmemcheck_pagealloc_alloc(page, order, gfp_mask); + /* + * Runtime PM, block IO and its error handling path can deadlock + * because I/O on the device might not complete. + */ + alloc_mask = memalloc_noio_flags(gfp_mask); + ac.spread_dirty_pages = false; - trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype); + page = __alloc_pages_slowpath(alloc_mask, order, &ac); -out: +no_zone: /* * When updating a task's mems_allowed, it is possible to race with * parallel threads in such a way that an allocation can fail while @@ -3434,6 +3429,12 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, goto retry_cpuset; } +out: + if (kmemcheck_enabled && page) + kmemcheck_pagealloc_alloc(page, order, gfp_mask); + + trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype); + return page; } EXPORT_SYMBOL(__alloc_pages_nodemask);