diff --git a/mm/vmalloc.c b/mm/vmalloc.c index d7a68eb0db429476724e9de15f7b52fd0d1f2735..3f94097425d6ed3290b3ea1db390396c3d46e444 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -2639,14 +2639,17 @@ static void __vunmap(const void *addr, int deallocate_pages) vm_remove_mappings(area, deallocate_pages); if (deallocate_pages) { - unsigned int page_order = vm_area_page_order(area); int i; - for (i = 0; i < area->nr_pages; i += 1U << page_order) { + for (i = 0; i < area->nr_pages; i++) { struct page *page = area->pages[i]; BUG_ON(!page); - __free_pages(page, page_order); + /* + * High-order allocs for huge vmallocs are split, so + * can be freed as an array of order-0 allocations + */ + __free_pages(page, 0); } atomic_long_sub(area->nr_pages, &nr_vmalloc_pages); @@ -2926,8 +2929,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, struct page *page; int p; - /* Compound pages required for remap_vmalloc_page */ - page = alloc_pages_node(node, gfp_mask | __GFP_COMP, page_order); + page = alloc_pages_node(node, gfp_mask, page_order); if (unlikely(!page)) { /* Successfully allocated i pages, free them in __vfree() */ area->nr_pages = i; @@ -2939,6 +2941,16 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, goto fail; } + /* + * Higher order allocations must be able to be treated as + * indepdenent small pages by callers (as they can with + * small-page vmallocs). Some drivers do their own refcounting + * on vmalloc_to_page() pages, some use page->mapping, + * page->lru, etc. + */ + if (page_order) + split_page(page, page_order); + for (p = 0; p < (1U << page_order); p++) area->pages[i + p] = page + p;