diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 2b5a60a856680afcc4a92fdbb5fac510b4194d7f..5543c1fb490124cabf70d5cbf8744aa136161350 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -809,7 +809,7 @@ static inline void __free_one_page(struct page *page, struct page *buddy; unsigned int max_order; - max_order = min_t(unsigned int, MAX_ORDER, pageblock_order + 1); + max_order = min_t(unsigned int, MAX_ORDER - 1, pageblock_order); VM_BUG_ON(!zone_is_initialized(zone)); VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page); @@ -822,7 +822,7 @@ static inline void __free_one_page(struct page *page, VM_BUG_ON_PAGE(bad_range(zone, page), page); continue_merging: - while (order < max_order - 1) { + while (order < max_order) { buddy_pfn = __find_buddy_pfn(pfn, order); buddy = page + (buddy_pfn - pfn); @@ -846,7 +846,7 @@ static inline void __free_one_page(struct page *page, pfn = combined_pfn; order++; } - if (max_order < MAX_ORDER) { + if (order < MAX_ORDER - 1) { /* If we are here, it means order is >= pageblock_order. * We want to prevent merge between freepages on isolate * pageblock and normal pageblock. Without this, pageblock @@ -867,7 +867,7 @@ static inline void __free_one_page(struct page *page, is_migrate_isolate(buddy_mt))) goto done_merging; } - max_order++; + max_order = order + 1; goto continue_merging; }