diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index a8f0d804a758c093552b809df16005fbdfc3e4f2..fcc50f6fdb46b0579791cb19d3e43698f5d07c82 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1394,6 +1394,8 @@ int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages) goto failed_removal; } + drain_all_pages(zone); + arg.start_pfn = start_pfn; arg.nr_pages = nr_pages; node_states_check_changes_offline(nr_pages, zone, &arg); @@ -1444,11 +1446,10 @@ int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages) } /* - * per-cpu pages are drained in start_isolate_page_range, but if - * there are still pages that are not free, make sure that we - * drain again, because when we isolated range we might - * have raced with another thread that was adding pages to pcp - * list. + * per-cpu pages are drained after start_isolate_page_range, but + * if there are still pages that are not free, make sure that we + * drain again, because when we isolated range we might have + * raced with another thread that was adding pages to pcp list. * * Forward progress should be still guaranteed because * pages on the pcp list can only belong to MOVABLE_ZONE diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 20cdcd2b3280e3557acc3d136c7093b958ce1d83..da0663435f0aaeadb6d9b38626cdae2274bc0d52 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -8739,6 +8739,8 @@ int alloc_contig_range(unsigned long start, unsigned long end, if (ret) return ret; + drain_all_pages(cc.zone); + /* * In case of -EBUSY, we'd like to know which page causes problem. * So, just fall through. test_pages_isolated() has a tracepoint diff --git a/mm/page_isolation.c b/mm/page_isolation.c index 756d1542f2c8f2e64230b1e91bb18939c8c907b9..46005c960ffa5abf9c0240214fc60c3ca0ca833e 100644 --- a/mm/page_isolation.c +++ b/mm/page_isolation.c @@ -49,7 +49,6 @@ static int set_migratetype_isolate(struct page *page, int migratetype, int isol_ __mod_zone_freepage_state(zone, -nr_pages, mt); spin_unlock_irqrestore(&zone->lock, flags); - drain_all_pages(zone); return 0; } @@ -172,11 +171,12 @@ __first_valid_page(unsigned long pfn, unsigned long nr_pages) * * Please note that there is no strong synchronization with the page allocator * either. Pages might be freed while their page blocks are marked ISOLATED. - * In some cases pages might still end up on pcp lists and that would allow + * A call to drain_all_pages() after isolation can flush most of them. However + * in some cases pages might still end up on pcp lists and that would allow * for their allocation even when they are in fact isolated already. Depending - * on how strong of a guarantee the caller needs drain_all_pages might be needed - * (e.g. __offline_pages will need to call it after check for isolated range for - * a next retry). + * on how strong of a guarantee the caller needs, further drain_all_pages() + * might be needed (e.g. __offline_pages will need to call it after check for + * isolated range for a next retry). * * Return: 0 on success and -EBUSY if any part of range cannot be isolated. */