diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 19394f8f9daf1ad9d499f4cf2987182615b76d60..12a7a81996d3b6fb145bc78b20fd2448c8532bbc 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -4971,6 +4971,18 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid, if (nr_pages - nr_populated == 1) goto failed; +#ifdef CONFIG_PAGE_OWNER + /* + * PAGE_OWNER may recurse into the allocator to allocate space to + * save the stack with pagesets.lock held. Releasing/reacquiring + * removes much of the performance benefit of bulk allocation so + * force the caller to allocate one page at a time as it'll have + * similar performance to added complexity to the bulk allocator. + */ + if (static_branch_unlikely(&page_owner_inited)) + goto failed; +#endif + /* May set ALLOC_NOFRAGMENT, fragmentation will return 1 page. */ gfp &= gfp_allowed_mask; alloc_gfp = gfp;