From cc3f11d4e4d654c2be73cb044a011df231b0fd0d Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Wed, 14 Jul 2021 07:06:13 +0000 Subject: [PATCH] mm/page_alloc: combine __alloc_pages and __alloc_pages_nodemask mainline inclusion from mainline-5.13-rc1 commit 84172f4bb752424415756351a40f8da5714e1554 category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I3ZVL2 CVE: NA Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=84172f4bb752424415756351a40f8da5714e1554 ------------------------------------------------- There are only two callers of __alloc_pages() so prune the thicket of alloc_page variants by combining the two functions together. Current callers of __alloc_pages() simply add an extra 'NULL' parameter and current callers of __alloc_pages_nodemask() call __alloc_pages() instead. Link: https://lkml.kernel.org/r/20210225150642.2582252-4-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Acked-by: Vlastimil Babka Acked-by: Michal Hocko Cc: Mike Rapoport Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds (cherry picked from commit 84172f4bb752424415756351a40f8da5714e1554) Signed-off-by: Yongqiang Liu Reviewed-by: Tong Tiangen Signed-off-by: Zheng Zengkai --- Documentation/admin-guide/mm/transhuge.rst | 2 +- include/linux/gfp.h | 13 +++---------- mm/hugetlb.c | 2 +- mm/internal.h | 4 ++-- mm/mempolicy.c | 6 +++--- mm/migrate.c | 2 +- mm/page_alloc.c | 5 ++--- 7 files changed, 13 insertions(+), 21 deletions(-) diff --git a/Documentation/admin-guide/mm/transhuge.rst b/Documentation/admin-guide/mm/transhuge.rst index b2acd0d395ca..2bfb380e8380 100644 --- a/Documentation/admin-guide/mm/transhuge.rst +++ b/Documentation/admin-guide/mm/transhuge.rst @@ -417,7 +417,7 @@ compact_blocks_moved a huge page aligned range of pages. It is possible to establish how long the stalls were using the function -tracer to record how long was spent in __alloc_pages_nodemask and +tracer to record how long was spent in __alloc_pages() and using the mm_page_alloc tracepoint to identify which allocations were for huge pages. diff --git a/include/linux/gfp.h b/include/linux/gfp.h index c603237e006c..bd9aadbaf07d 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -501,15 +501,8 @@ static inline int arch_make_page_accessible(struct page *page) } #endif -struct page * -__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid, - nodemask_t *nodemask); - -static inline struct page * -__alloc_pages(gfp_t gfp_mask, unsigned int order, int preferred_nid) -{ - return __alloc_pages_nodemask(gfp_mask, order, preferred_nid, NULL); -} +struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid, + nodemask_t *nodemask); /* * Allocate pages, preferring the node given as nid. The node must be valid and @@ -521,7 +514,7 @@ __alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order) VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES); VM_WARN_ON((gfp_mask & __GFP_THISNODE) && !node_online(nid)); - return __alloc_pages(gfp_mask, order, nid); + return __alloc_pages(gfp_mask, order, nid, NULL); } /* diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 8e6e2a024916..ac16161d5297 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1719,7 +1719,7 @@ static struct page *alloc_buddy_huge_page(struct hstate *h, gfp_mask |= __GFP_RETRY_MAYFAIL; if (nid == NUMA_NO_NODE) nid = numa_mem_id(); - page = __alloc_pages_nodemask(gfp_mask, order, nid, nmask); + page = __alloc_pages(gfp_mask, order, nid, nmask); if (page) __count_vm_event(HTLB_BUDDY_PGALLOC); else diff --git a/mm/internal.h b/mm/internal.h index 3547fed59d51..b289a10d8274 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -126,10 +126,10 @@ extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address); * family of functions. * * nodemask, migratetype and highest_zoneidx are initialized only once in - * __alloc_pages_nodemask() and then never change. + * __alloc_pages() and then never change. * * zonelist, preferred_zone and highest_zoneidx are set first in - * __alloc_pages_nodemask() for the fast path, and might be later changed + * __alloc_pages() for the fast path, and might be later changed * in __alloc_pages_slowpath(). All other functions pass the whole structure * by a const pointer. */ diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 3ca4898f3f24..e12046d1e8a9 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -2134,7 +2134,7 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, { struct page *page; - page = __alloc_pages(gfp, order, nid); + page = __alloc_pages(gfp, order, nid, NULL); /* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */ if (!static_branch_likely(&vm_numa_stat_key)) return page; @@ -2231,7 +2231,7 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, nmask = policy_nodemask(gfp, pol); preferred_nid = policy_node(gfp, pol, node); - page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask); + page = __alloc_pages(gfp, order, preferred_nid, nmask); mpol_cond_put(pol); out: return page; @@ -2268,7 +2268,7 @@ struct page *alloc_pages_current(gfp_t gfp, unsigned order) if (pol->mode == MPOL_INTERLEAVE) page = alloc_page_interleave(gfp, order, interleave_nodes(pol)); else - page = __alloc_pages_nodemask(gfp, order, + page = __alloc_pages(gfp, order, policy_node(gfp, pol, numa_node_id()), policy_nodemask(gfp, pol)); diff --git a/mm/migrate.c b/mm/migrate.c index d76ad37dda87..5c3486864821 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -1574,7 +1574,7 @@ struct page *alloc_migration_target(struct page *page, unsigned long private) if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE) gfp_mask |= __GFP_HIGHMEM; - new_page = __alloc_pages_nodemask(gfp_mask, order, nid, mtc->nmask); + new_page = __alloc_pages(gfp_mask, order, nid, mtc->nmask); if (new_page && PageTransHuge(new_page)) prep_transhuge_page(new_page); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 78512e2e70f3..202c19bb03bb 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -4919,8 +4919,7 @@ static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order, /* * This is the 'heart' of the zoned buddy allocator. */ -struct page * -__alloc_pages_nodemask(gfp_t gfp, unsigned int order, int preferred_nid, +struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid, nodemask_t *nodemask) { struct page *page; @@ -4982,7 +4981,7 @@ __alloc_pages_nodemask(gfp_t gfp, unsigned int order, int preferred_nid, return page; } -EXPORT_SYMBOL(__alloc_pages_nodemask); +EXPORT_SYMBOL(__alloc_pages); /* * Common helper functions. Never use with __GFP_HIGHMEM because the returned -- GitLab