提交 7877cdcc 编写于 作者: M Michal Hocko 提交者: Linus Torvalds

mm: consolidate warn_alloc_failed users

warn_alloc_failed is currently used from the page and vmalloc
allocators.  This is a good reuse of the code except that vmalloc would
appreciate a slightly different warning message.  This is already
handled by the fmt parameter except that

  "%s: page allocation failure: order:%u, mode:%#x(%pGg)"

is printed anyway.  This might be quite misleading because it might be a
vmalloc failure which leads to the warning while the page allocator is
not the culprit here.  Fix this by always using the fmt string and only
print the context that makes sense for the particular context (e.g.
order makes only very little sense for the vmalloc context).

Rename the function to not miss any user and also because a later patch
will reuse it also for !failure cases.

Link: http://lkml.kernel.org/r/20160929084407.7004-2-mhocko@kernel.orgSigned-off-by: NMichal Hocko <mhocko@suse.com>
Acked-by: NVlastimil Babka <vbabka@suse.cz>
Cc: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Dave Hansen <dave.hansen@intel.com>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 c2a9737f
...@@ -1916,9 +1916,8 @@ extern void si_meminfo_node(struct sysinfo *val, int nid); ...@@ -1916,9 +1916,8 @@ extern void si_meminfo_node(struct sysinfo *val, int nid);
extern unsigned long arch_reserved_kernel_pages(void); extern unsigned long arch_reserved_kernel_pages(void);
#endif #endif
extern __printf(3, 4) extern __printf(2, 3)
void warn_alloc_failed(gfp_t gfp_mask, unsigned int order, void warn_alloc(gfp_t gfp_mask, const char *fmt, ...);
const char *fmt, ...);
extern void setup_per_cpu_pageset(void); extern void setup_per_cpu_pageset(void);
......
...@@ -2979,9 +2979,11 @@ static DEFINE_RATELIMIT_STATE(nopage_rs, ...@@ -2979,9 +2979,11 @@ static DEFINE_RATELIMIT_STATE(nopage_rs,
DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_INTERVAL,
DEFAULT_RATELIMIT_BURST); DEFAULT_RATELIMIT_BURST);
void warn_alloc_failed(gfp_t gfp_mask, unsigned int order, const char *fmt, ...) void warn_alloc(gfp_t gfp_mask, const char *fmt, ...)
{ {
unsigned int filter = SHOW_MEM_FILTER_NODES; unsigned int filter = SHOW_MEM_FILTER_NODES;
struct va_format vaf;
va_list args;
if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs) || if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs) ||
debug_guardpage_minorder() > 0) debug_guardpage_minorder() > 0)
...@@ -2999,22 +3001,16 @@ void warn_alloc_failed(gfp_t gfp_mask, unsigned int order, const char *fmt, ...) ...@@ -2999,22 +3001,16 @@ void warn_alloc_failed(gfp_t gfp_mask, unsigned int order, const char *fmt, ...)
if (in_interrupt() || !(gfp_mask & __GFP_DIRECT_RECLAIM)) if (in_interrupt() || !(gfp_mask & __GFP_DIRECT_RECLAIM))
filter &= ~SHOW_MEM_FILTER_NODES; filter &= ~SHOW_MEM_FILTER_NODES;
if (fmt) { pr_warn("%s: ", current->comm);
struct va_format vaf;
va_list args;
va_start(args, fmt); va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
pr_cont("%pV", &vaf);
va_end(args);
vaf.fmt = fmt; pr_cont(", mode:%#x(%pGg)\n", gfp_mask, &gfp_mask);
vaf.va = &args;
pr_warn("%pV", &vaf);
va_end(args);
}
pr_warn("%s: page allocation failure: order:%u, mode:%#x(%pGg)\n",
current->comm, order, gfp_mask, &gfp_mask);
dump_stack(); dump_stack();
if (!should_suppress_show_mem()) if (!should_suppress_show_mem())
show_mem(filter); show_mem(filter);
...@@ -3680,7 +3676,8 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, ...@@ -3680,7 +3676,8 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
} }
nopage: nopage:
warn_alloc_failed(gfp_mask, order, NULL); warn_alloc(gfp_mask,
"page allocation failure: order:%u", order);
got_pg: got_pg:
return page; return page;
} }
......
...@@ -1601,7 +1601,6 @@ static void *__vmalloc_node(unsigned long size, unsigned long align, ...@@ -1601,7 +1601,6 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
pgprot_t prot, int node) pgprot_t prot, int node)
{ {
const int order = 0;
struct page **pages; struct page **pages;
unsigned int nr_pages, array_size, i; unsigned int nr_pages, array_size, i;
const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO; const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
...@@ -1629,9 +1628,9 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, ...@@ -1629,9 +1628,9 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
struct page *page; struct page *page;
if (node == NUMA_NO_NODE) if (node == NUMA_NO_NODE)
page = alloc_pages(alloc_mask, order); page = alloc_page(alloc_mask);
else else
page = alloc_pages_node(node, alloc_mask, order); page = alloc_pages_node(node, alloc_mask, 0);
if (unlikely(!page)) { if (unlikely(!page)) {
/* Successfully allocated i pages, free them in __vunmap() */ /* Successfully allocated i pages, free them in __vunmap() */
...@@ -1648,8 +1647,8 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, ...@@ -1648,8 +1647,8 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
return area->addr; return area->addr;
fail: fail:
warn_alloc_failed(gfp_mask, order, warn_alloc(gfp_mask,
"vmalloc: allocation failure, allocated %ld of %ld bytes\n", "vmalloc: allocation failure, allocated %ld of %ld bytes",
(area->nr_pages*PAGE_SIZE), area->size); (area->nr_pages*PAGE_SIZE), area->size);
vfree(area->addr); vfree(area->addr);
return NULL; return NULL;
...@@ -1710,9 +1709,8 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align, ...@@ -1710,9 +1709,8 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
return addr; return addr;
fail: fail:
warn_alloc_failed(gfp_mask, 0, warn_alloc(gfp_mask,
"vmalloc: allocation failure: %lu bytes\n", "vmalloc: allocation failure: %lu bytes", real_size);
real_size);
return NULL; return NULL;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册