提交 e97ca8e5 编写于 作者: J Johannes Weiner 提交者: Linus Torvalds

mm: fix GFP_THISNODE callers and clarify

GFP_THISNODE is for callers that implement their own clever fallback to
remote nodes.  It restricts the allocation to the specified node and
does not invoke reclaim, assuming that the caller will take care of it
when the fallback fails, e.g.  through a subsequent allocation request
without GFP_THISNODE set.

However, many current GFP_THISNODE users only want the node exclusive
aspect of the flag, without actually implementing their own fallback or
triggering reclaim if necessary.  This results in things like page
migration failing prematurely even when there is easily reclaimable
memory available, unless kswapd happens to be running already or a
concurrent allocation attempt triggers the necessary reclaim.

Convert all callsites that don't implement their own fallback strategy
to __GFP_THISNODE.  This restricts the allocation a single node too, but
at the same time allows the allocator to enter the slowpath, wake
kswapd, and invoke direct reclaim if necessary, to make the allocation
happen when memory is full.
Signed-off-by: NJohannes Weiner <hannes@cmpxchg.org>
Acked-by: NRik van Riel <riel@redhat.com>
Cc: Jan Stancek <jstancek@redhat.com>
Cc: Mel Gorman <mgorman@suse.de>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 fa389e22
...@@ -98,7 +98,7 @@ static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid) ...@@ -98,7 +98,7 @@ static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid)
/* attempt to allocate a granule's worth of cached memory pages */ /* attempt to allocate a granule's worth of cached memory pages */
page = alloc_pages_exact_node(nid, page = alloc_pages_exact_node(nid,
GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,
IA64_GRANULE_SHIFT-PAGE_SHIFT); IA64_GRANULE_SHIFT-PAGE_SHIFT);
if (!page) { if (!page) {
mutex_unlock(&uc_pool->add_chunk_mutex); mutex_unlock(&uc_pool->add_chunk_mutex);
......
...@@ -123,7 +123,8 @@ static int __init cbe_ptcal_enable_on_node(int nid, int order) ...@@ -123,7 +123,8 @@ static int __init cbe_ptcal_enable_on_node(int nid, int order)
area->nid = nid; area->nid = nid;
area->order = order; area->order = order;
area->pages = alloc_pages_exact_node(area->nid, GFP_KERNEL|GFP_THISNODE, area->pages = alloc_pages_exact_node(area->nid,
GFP_KERNEL|__GFP_THISNODE,
area->order); area->order);
if (!area->pages) { if (!area->pages) {
......
...@@ -240,7 +240,7 @@ xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name, ...@@ -240,7 +240,7 @@ xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name,
nid = cpu_to_node(cpu); nid = cpu_to_node(cpu);
page = alloc_pages_exact_node(nid, page = alloc_pages_exact_node(nid,
GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,
pg_order); pg_order);
if (page == NULL) { if (page == NULL) {
dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d " dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d "
......
...@@ -123,6 +123,10 @@ struct vm_area_struct; ...@@ -123,6 +123,10 @@ struct vm_area_struct;
__GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | \ __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | \
__GFP_NO_KSWAPD) __GFP_NO_KSWAPD)
/*
* GFP_THISNODE does not perform any reclaim, you most likely want to
* use __GFP_THISNODE to allocate from a given node without fallback!
*/
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
#define GFP_THISNODE (__GFP_THISNODE | __GFP_NOWARN | __GFP_NORETRY) #define GFP_THISNODE (__GFP_THISNODE | __GFP_NOWARN | __GFP_NORETRY)
#else #else
......
...@@ -590,10 +590,10 @@ static inline bool zone_is_empty(struct zone *zone) ...@@ -590,10 +590,10 @@ static inline bool zone_is_empty(struct zone *zone)
/* /*
* The NUMA zonelists are doubled because we need zonelists that restrict the * The NUMA zonelists are doubled because we need zonelists that restrict the
* allocations to a single node for GFP_THISNODE. * allocations to a single node for __GFP_THISNODE.
* *
* [0] : Zonelist with fallback * [0] : Zonelist with fallback
* [1] : No fallback (GFP_THISNODE) * [1] : No fallback (__GFP_THISNODE)
*/ */
#define MAX_ZONELISTS 2 #define MAX_ZONELISTS 2
......
...@@ -410,7 +410,7 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags) ...@@ -410,7 +410,7 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
* *
* %GFP_NOWAIT - Allocation will not sleep. * %GFP_NOWAIT - Allocation will not sleep.
* *
* %GFP_THISNODE - Allocate node-local memory only. * %__GFP_THISNODE - Allocate node-local memory only.
* *
* %GFP_DMA - Allocation suitable for DMA. * %GFP_DMA - Allocation suitable for DMA.
* Should only be used for kmalloc() caches. Otherwise, use a * Should only be used for kmalloc() caches. Otherwise, use a
......
...@@ -549,14 +549,14 @@ static int create_hash_tables(void) ...@@ -549,14 +549,14 @@ static int create_hash_tables(void)
struct page *page; struct page *page;
page = alloc_pages_exact_node(node, page = alloc_pages_exact_node(node,
GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,
0); 0);
if (!page) if (!page)
goto out_cleanup; goto out_cleanup;
per_cpu(cpu_profile_hits, cpu)[1] per_cpu(cpu_profile_hits, cpu)[1]
= (struct profile_hit *)page_address(page); = (struct profile_hit *)page_address(page);
page = alloc_pages_exact_node(node, page = alloc_pages_exact_node(node,
GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,
0); 0);
if (!page) if (!page)
goto out_cleanup; goto out_cleanup;
......
...@@ -1158,7 +1158,7 @@ static struct page *new_page_node(struct page *p, unsigned long private, ...@@ -1158,7 +1158,7 @@ static struct page *new_page_node(struct page *p, unsigned long private,
pm->node); pm->node);
else else
return alloc_pages_exact_node(pm->node, return alloc_pages_exact_node(pm->node,
GFP_HIGHUSER_MOVABLE | GFP_THISNODE, 0); GFP_HIGHUSER_MOVABLE | __GFP_THISNODE, 0);
} }
/* /*
...@@ -1544,9 +1544,9 @@ static struct page *alloc_misplaced_dst_page(struct page *page, ...@@ -1544,9 +1544,9 @@ static struct page *alloc_misplaced_dst_page(struct page *page,
struct page *newpage; struct page *newpage;
newpage = alloc_pages_exact_node(nid, newpage = alloc_pages_exact_node(nid,
(GFP_HIGHUSER_MOVABLE | GFP_THISNODE | (GFP_HIGHUSER_MOVABLE |
__GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_THISNODE | __GFP_NOMEMALLOC |
__GFP_NOWARN) & __GFP_NORETRY | __GFP_NOWARN) &
~GFP_IOFS, 0); ~GFP_IOFS, 0);
return newpage; return newpage;
...@@ -1747,7 +1747,8 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm, ...@@ -1747,7 +1747,8 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
goto out_dropref; goto out_dropref;
new_page = alloc_pages_node(node, new_page = alloc_pages_node(node,
(GFP_TRANSHUGE | GFP_THISNODE) & ~__GFP_WAIT, HPAGE_PMD_ORDER); (GFP_TRANSHUGE | __GFP_THISNODE) & ~__GFP_WAIT,
HPAGE_PMD_ORDER);
if (!new_page) if (!new_page)
goto out_fail; goto out_fail;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册