提交 5d1f57e4 编写于 作者: N Namhyung Kim 提交者: Pekka Enberg

slub: Move NUMA-related functions under CONFIG_NUMA

Make kmalloc_cache_alloc_node_notrace(), kmalloc_large_node()
and __kmalloc_node_track_caller() to be compiled only when
CONFIG_NUMA is selected.
Acked-by: NDavid Rientjes <rientjes@google.com>
Signed-off-by: NNamhyung Kim <namhyung@gmail.com>
Signed-off-by: NPekka Enberg <penberg@kernel.org>
上级 3478973d
......@@ -1792,7 +1792,6 @@ void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
return ret;
}
EXPORT_SYMBOL(kmem_cache_alloc_node);
#endif
#ifdef CONFIG_TRACING
void *kmem_cache_alloc_node_notrace(struct kmem_cache *s,
......@@ -1803,6 +1802,7 @@ void *kmem_cache_alloc_node_notrace(struct kmem_cache *s,
}
EXPORT_SYMBOL(kmem_cache_alloc_node_notrace);
#endif
#endif
/*
* Slow patch handling. This may still be called frequently since objects
......@@ -2673,6 +2673,7 @@ void *__kmalloc(size_t size, gfp_t flags)
}
EXPORT_SYMBOL(__kmalloc);
#ifdef CONFIG_NUMA
static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
{
struct page *page;
......@@ -2687,7 +2688,6 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
return ptr;
}
#ifdef CONFIG_NUMA
void *__kmalloc_node(size_t size, gfp_t flags, int node)
{
struct kmem_cache *s;
......@@ -3342,6 +3342,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
return ret;
}
#ifdef CONFIG_NUMA
void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
int node, unsigned long caller)
{
......@@ -3370,6 +3371,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
return ret;
}
#endif
#ifdef CONFIG_SLUB_DEBUG
static int count_inuse(struct page *page)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册