diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 852e1997e12c6e129cdd0b641851114c62f65ce1..929dd2a1238095f9c7804b117d4e1ebfea25d330 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -546,6 +546,12 @@ enum pgdat_flags { PGDAT_RECLAIM_LOCKED, /* prevents concurrent reclaim */ }; +enum zone_flags { + ZONE_BOOSTED_WATERMARK, /* zone recently boosted watermarks. + * Cleared when kswapd is woken. + */ +}; + static inline unsigned long zone_end_pfn(const struct zone *zone) { return zone->zone_start_pfn + zone->spanned_pages; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 2cfd72c771a42fc08832149804d2b4ba5f99b5d1..739aa486dbbf7a2bff5286342e4a983cb0c0f931 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2385,7 +2385,7 @@ static void steal_suitable_fallback(struct zone *zone, struct page *page, */ boost_watermark(zone); if (alloc_flags & ALLOC_KSWAPD) - wakeup_kswapd(zone, 0, 0, zone_idx(zone)); + set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags); /* We are not allowed to try stealing from the whole block */ if (!whole_block) @@ -3281,6 +3281,12 @@ struct page *rmqueue(struct zone *preferred_zone, local_irq_restore(flags); out: + /* Separate test+clear to avoid unnecessary atomics */ + if (test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags)) { + clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags); + wakeup_kswapd(zone, 0, 0, zone_idx(zone)); + } + VM_BUG_ON_PAGE(page && bad_range(zone, page), page); return page;