diff --git a/mm/compaction.c b/mm/compaction.c index bbf41ee99142f88468d55a69f8ffbf2952514a46..658c009d60cc4130f722b3bf6798acfe9a8cd891 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -1392,11 +1392,19 @@ static enum compact_result __compaction_suitable(struct zone *zone, int order, /* * Watermarks for order-0 must be met for compaction to be able to - * isolate free pages for migration targets. + * isolate free pages for migration targets. This means that the + * watermark and alloc_flags have to match, or be more pessimistic than + * the check in __isolate_free_page(). We don't use the direct + * compactor's alloc_flags, as they are not relevant for freepage + * isolation. We however do use the direct compactor's classzone_idx to + * skip over zones where lowmem reserves would prevent allocation even + * if compaction succeeds. + * ALLOC_CMA is used, as pages in CMA pageblocks are considered + * suitable migration targets */ watermark = low_wmark_pages(zone) + compact_gap(order); if (!__zone_watermark_ok(zone, 0, watermark, classzone_idx, - alloc_flags, wmark_target)) + ALLOC_CMA, wmark_target)) return COMPACT_SKIPPED; /* diff --git a/mm/page_alloc.c b/mm/page_alloc.c index a2214c64ed3cd04dceaed7a579f593852e458df1..637b0e907df016620b40fbaa6bd5d502b5048e4e 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2491,7 +2491,7 @@ int __isolate_free_page(struct page *page, unsigned int order) if (!is_migrate_isolate(mt)) { /* Obey watermarks as if the page was being allocated */ watermark = low_wmark_pages(zone) + (1 << order); - if (!zone_watermark_ok(zone, 0, watermark, 0, 0)) + if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA)) return 0; __mod_zone_freepage_state(zone, -(1UL << order), mt);