diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 701c82c3613833ea3eccbcf1539a88d515cf4992..c262566f7c5dc5243d6534f028ff7345beaa2bc9 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -1435,7 +1435,7 @@ static int get_hstate_idx(int page_size_log) if (!h) return -1; - return h - hstates; + return hstate_index(h); } /* diff --git a/mm/hugetlb.c b/mm/hugetlb.c index c0d4c7784a84db14abaafcbd761396696090a8af..87104dc78e59057334ad08536a295b5ae52f8329 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1273,7 +1273,7 @@ static void free_gigantic_page(struct page *page, unsigned int order) static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask, int nid, nodemask_t *nodemask) { - unsigned long nr_pages = 1UL << huge_page_order(h); + unsigned long nr_pages = pages_per_huge_page(h); if (nid == NUMA_NO_NODE) nid = numa_mem_id(); @@ -3267,10 +3267,10 @@ static int __init hugepages_setup(char *s) /* * Global state is always initialized later in hugetlb_init. - * But we need to allocate >= MAX_ORDER hstates here early to still + * But we need to allocate gigantic hstates here early to still * use the bootmem allocator. */ - if (hugetlb_max_hstate && parsed_hstate->order >= MAX_ORDER) + if (hugetlb_max_hstate && hstate_is_gigantic(parsed_hstate)) hugetlb_hstate_alloc_pages(parsed_hstate); last_mhp = mhp;