提交 9b819d20 编写于 作者: C Christoph Lameter 提交者: Linus Torvalds

[PATCH] Add __GFP_THISNODE to avoid fallback to other nodes and ignore...

[PATCH] Add __GFP_THISNODE to avoid fallback to other nodes and ignore cpuset/memory policy restrictions

Add a new gfp flag __GFP_THISNODE to avoid fallback to other nodes.  This
flag is essential if a kernel component requires memory to be located on a
certain node.  It will be needed for alloc_pages_node() to force allocation
on the indicated node and for alloc_pages() to force allocation on the
current node.
Signed-off-by: NChristoph Lameter <clameter@sgi.com>
Cc: Andy Whitcroft <apw@shadowen.org>
Cc: Mel Gorman <mel@csn.ul.ie>
Signed-off-by: NAndrew Morton <akpm@osdl.org>
Signed-off-by: NLinus Torvalds <torvalds@osdl.org>
上级 056c6241
...@@ -45,6 +45,7 @@ struct vm_area_struct; ...@@ -45,6 +45,7 @@ struct vm_area_struct;
#define __GFP_ZERO ((__force gfp_t)0x8000u)/* Return zeroed page on success */ #define __GFP_ZERO ((__force gfp_t)0x8000u)/* Return zeroed page on success */
#define __GFP_NOMEMALLOC ((__force gfp_t)0x10000u) /* Don't use emergency reserves */ #define __GFP_NOMEMALLOC ((__force gfp_t)0x10000u) /* Don't use emergency reserves */
#define __GFP_HARDWALL ((__force gfp_t)0x20000u) /* Enforce hardwall cpuset memory allocs */ #define __GFP_HARDWALL ((__force gfp_t)0x20000u) /* Enforce hardwall cpuset memory allocs */
#define __GFP_THISNODE ((__force gfp_t)0x40000u)/* No fallback, no policies */
#define __GFP_BITS_SHIFT 20 /* Room for 20 __GFP_FOO bits */ #define __GFP_BITS_SHIFT 20 /* Room for 20 __GFP_FOO bits */
#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
...@@ -53,7 +54,7 @@ struct vm_area_struct; ...@@ -53,7 +54,7 @@ struct vm_area_struct;
#define GFP_LEVEL_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS| \ #define GFP_LEVEL_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS| \
__GFP_COLD|__GFP_NOWARN|__GFP_REPEAT| \ __GFP_COLD|__GFP_NOWARN|__GFP_REPEAT| \
__GFP_NOFAIL|__GFP_NORETRY|__GFP_NO_GROW|__GFP_COMP| \ __GFP_NOFAIL|__GFP_NORETRY|__GFP_NO_GROW|__GFP_COMP| \
__GFP_NOMEMALLOC|__GFP_HARDWALL) __GFP_NOMEMALLOC|__GFP_HARDWALL|__GFP_THISNODE)
/* This equals 0, but use constants in case they ever change */ /* This equals 0, but use constants in case they ever change */
#define GFP_NOWAIT (GFP_ATOMIC & ~__GFP_HIGH) #define GFP_NOWAIT (GFP_ATOMIC & ~__GFP_HIGH)
......
...@@ -2316,7 +2316,7 @@ int __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) ...@@ -2316,7 +2316,7 @@ int __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
const struct cpuset *cs; /* current cpuset ancestors */ const struct cpuset *cs; /* current cpuset ancestors */
int allowed; /* is allocation in zone z allowed? */ int allowed; /* is allocation in zone z allowed? */
if (in_interrupt()) if (in_interrupt() || (gfp_mask & __GFP_THISNODE))
return 1; return 1;
node = z->zone_pgdat->node_id; node = z->zone_pgdat->node_id;
might_sleep_if(!(gfp_mask & __GFP_HARDWALL)); might_sleep_if(!(gfp_mask & __GFP_HARDWALL));
......
...@@ -1290,7 +1290,7 @@ struct page *alloc_pages_current(gfp_t gfp, unsigned order) ...@@ -1290,7 +1290,7 @@ struct page *alloc_pages_current(gfp_t gfp, unsigned order)
if ((gfp & __GFP_WAIT) && !in_interrupt()) if ((gfp & __GFP_WAIT) && !in_interrupt())
cpuset_update_task_memory_state(); cpuset_update_task_memory_state();
if (!pol || in_interrupt()) if (!pol || in_interrupt() || (gfp & __GFP_THISNODE))
pol = &default_policy; pol = &default_policy;
if (pol->policy == MPOL_INTERLEAVE) if (pol->policy == MPOL_INTERLEAVE)
return alloc_page_interleave(gfp, order, interleave_nodes(pol)); return alloc_page_interleave(gfp, order, interleave_nodes(pol));
......
...@@ -893,6 +893,9 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, ...@@ -893,6 +893,9 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order,
* See also cpuset_zone_allowed() comment in kernel/cpuset.c. * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
*/ */
do { do {
if (unlikely((gfp_mask & __GFP_THISNODE) &&
(*z)->zone_pgdat != zonelist->zones[0]->zone_pgdat))
break;
if ((alloc_flags & ALLOC_CPUSET) && if ((alloc_flags & ALLOC_CPUSET) &&
!cpuset_zone_allowed(*z, gfp_mask)) !cpuset_zone_allowed(*z, gfp_mask))
continue; continue;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册