diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index 7a9886f98b0c12e8df43fe0e8a897a7d4cbeac9d..a5091c03474753111f77df8de2910152ee38abb8 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c @@ -188,15 +188,20 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, pr_debug("vmemmap_populate %lx..%lx, node %d\n", start, end, node); for (; start < end; start += page_size) { - void *p; + void *p = NULL; int rc; if (vmemmap_populated(start, page_size)) continue; + /* + * Allocate from the altmap first if we have one. This may + * fail due to alignment issues when using 16MB hugepages, so + * fall back to system memory if the altmap allocation fail. + */ if (altmap) p = altmap_alloc_block_buf(page_size, altmap); - else + if (!p) p = vmemmap_alloc_block_buf(page_size, node); if (!p) return -ENOMEM; @@ -255,8 +260,15 @@ void __ref vmemmap_free(unsigned long start, unsigned long end, { unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift; unsigned long page_order = get_order(page_size); + unsigned long alt_start = ~0, alt_end = ~0; + unsigned long base_pfn; start = _ALIGN_DOWN(start, page_size); + if (altmap) { + alt_start = altmap->base_pfn; + alt_end = altmap->base_pfn + altmap->reserve + + altmap->free + altmap->alloc + altmap->align; + } pr_debug("vmemmap_free %lx...%lx\n", start, end); @@ -280,8 +292,9 @@ void __ref vmemmap_free(unsigned long start, unsigned long end, page = pfn_to_page(addr >> PAGE_SHIFT); section_base = pfn_to_page(vmemmap_section_start(start)); nr_pages = 1 << page_order; + base_pfn = PHYS_PFN(addr); - if (altmap) { + if (base_pfn >= alt_start && base_pfn < alt_end) { vmem_altmap_free(altmap, nr_pages); } else if (PageReserved(page)) { /* allocated from bootmem */