提交 8b913238 编写于 作者: M Michal Hocko 提交者: Linus Torvalds

mm: unify new_node_page and alloc_migrate_target

Commit 394e31d2 ("mem-hotplug: alloc new page from a nearest
neighbor node when mem-offline") has duplicated a large part of
alloc_migrate_target with some hotplug specific special casing.

To be more precise it tried to enfore the allocation from a different
node than the original page.  As a result the two function diverged in
their shared logic, e.g.  the hugetlb allocation strategy.

Let's unify the two and express different NUMA requirements by the given
nodemask.  new_node_page will simply exclude the node it doesn't care
about and alloc_migrate_target will use all the available nodes.
alloc_migrate_target will then learn to migrate hugetlb pages more
sanely and use preallocated pool when possible.

Please note that alloc_migrate_target used to call alloc_page resp.
alloc_pages_current so the memory policy of the current context which is
quite strange when we consider that it is used in the context of
alloc_contig_range which just tries to migrate pages which stand in the
way.

Link: http://lkml.kernel.org/r/20170608074553.22152-4-mhocko@kernel.orgSigned-off-by: NMichal Hocko <mhocko@suse.com>
Acked-by: NVlastimil Babka <vbabka@suse.cz>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Xishi Qiu <qiuxishi@huawei.com>
Cc: zhong jiang <zhongjiang@huawei.com>
Cc: Joonsoo Kim <js1304@gmail.com>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 4db9b2ef
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/mempolicy.h> #include <linux/mempolicy.h>
#include <linux/migrate_mode.h> #include <linux/migrate_mode.h>
#include <linux/hugetlb.h>
typedef struct page *new_page_t(struct page *page, unsigned long private, typedef struct page *new_page_t(struct page *page, unsigned long private,
int **reason); int **reason);
...@@ -30,6 +31,21 @@ enum migrate_reason { ...@@ -30,6 +31,21 @@ enum migrate_reason {
/* In mm/debug.c; also keep sync with include/trace/events/migrate.h */ /* In mm/debug.c; also keep sync with include/trace/events/migrate.h */
extern char *migrate_reason_names[MR_TYPES]; extern char *migrate_reason_names[MR_TYPES];
static inline struct page *new_page_nodemask(struct page *page,
int preferred_nid, nodemask_t *nodemask)
{
gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE;
if (PageHuge(page))
return alloc_huge_page_nodemask(page_hstate(compound_head(page)),
nodemask);
if (PageHighMem(page) || (zone_idx(page_zone(page)) == ZONE_MOVABLE))
gfp_mask |= __GFP_HIGHMEM;
return __alloc_pages_nodemask(gfp_mask, 0, preferred_nid, nodemask);
}
#ifdef CONFIG_MIGRATION #ifdef CONFIG_MIGRATION
extern void putback_movable_pages(struct list_head *l); extern void putback_movable_pages(struct list_head *l);
......
...@@ -1433,7 +1433,6 @@ static unsigned long scan_movable_pages(unsigned long start, unsigned long end) ...@@ -1433,7 +1433,6 @@ static unsigned long scan_movable_pages(unsigned long start, unsigned long end)
static struct page *new_node_page(struct page *page, unsigned long private, static struct page *new_node_page(struct page *page, unsigned long private,
int **result) int **result)
{ {
gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE;
int nid = page_to_nid(page); int nid = page_to_nid(page);
nodemask_t nmask = node_states[N_MEMORY]; nodemask_t nmask = node_states[N_MEMORY];
...@@ -1446,15 +1445,7 @@ static struct page *new_node_page(struct page *page, unsigned long private, ...@@ -1446,15 +1445,7 @@ static struct page *new_node_page(struct page *page, unsigned long private,
if (nodes_empty(nmask)) if (nodes_empty(nmask))
node_set(nid, nmask); node_set(nid, nmask);
if (PageHuge(page)) return new_page_nodemask(page, nid, &nmask);
return alloc_huge_page_nodemask(
page_hstate(compound_head(page)), &nmask);
if (PageHighMem(page)
|| (zone_idx(page_zone(page)) == ZONE_MOVABLE))
gfp_mask |= __GFP_HIGHMEM;
return __alloc_pages_nodemask(gfp_mask, 0, nid, &nmask);
} }
#define NR_OFFLINE_AT_ONCE_PAGES (256) #define NR_OFFLINE_AT_ONCE_PAGES (256)
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include <linux/memory.h> #include <linux/memory.h>
#include <linux/hugetlb.h> #include <linux/hugetlb.h>
#include <linux/page_owner.h> #include <linux/page_owner.h>
#include <linux/migrate.h>
#include "internal.h" #include "internal.h"
#define CREATE_TRACE_POINTS #define CREATE_TRACE_POINTS
...@@ -294,20 +295,5 @@ int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn, ...@@ -294,20 +295,5 @@ int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
struct page *alloc_migrate_target(struct page *page, unsigned long private, struct page *alloc_migrate_target(struct page *page, unsigned long private,
int **resultp) int **resultp)
{ {
gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE; return new_page_nodemask(page, numa_node_id(), &node_states[N_MEMORY]);
/*
* TODO: allocate a destination hugepage from a nearest neighbor node,
* accordance with memory policy of the user process if possible. For
* now as a simple work-around, we use the next node for destination.
*/
if (PageHuge(page))
return alloc_huge_page_node(page_hstate(compound_head(page)),
next_node_in(page_to_nid(page),
node_online_map));
if (PageHighMem(page))
gfp_mask |= __GFP_HIGHMEM;
return alloc_page(gfp_mask);
} }
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册