提交 e5adfffc 编写于 作者: K Kirill A. Shutemov 提交者: Linus Torvalds

mm: use IS_ENABLED(CONFIG_NUMA) instead of NUMA_BUILD

We don't need custom NUMA_BUILD anymore, since we have handy
IS_ENABLED().
Signed-off-by: NKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: NKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Acked-by: NDavid Rientjes <rientjes@google.com>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 19965460
...@@ -266,7 +266,7 @@ static inline enum zone_type gfp_zone(gfp_t flags) ...@@ -266,7 +266,7 @@ static inline enum zone_type gfp_zone(gfp_t flags)
static inline int gfp_zonelist(gfp_t flags) static inline int gfp_zonelist(gfp_t flags)
{ {
if (NUMA_BUILD && unlikely(flags & __GFP_THISNODE)) if (IS_ENABLED(CONFIG_NUMA) && unlikely(flags & __GFP_THISNODE))
return 1; return 1;
return 0; return 0;
......
...@@ -687,13 +687,6 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { } ...@@ -687,13 +687,6 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
/* Trap pasters of __FUNCTION__ at compile-time */ /* Trap pasters of __FUNCTION__ at compile-time */
#define __FUNCTION__ (__func__) #define __FUNCTION__ (__func__)
/* This helps us to avoid #ifdef CONFIG_NUMA */
#ifdef CONFIG_NUMA
#define NUMA_BUILD 1
#else
#define NUMA_BUILD 0
#endif
/* This helps us avoid #ifdef CONFIG_COMPACTION */ /* This helps us avoid #ifdef CONFIG_COMPACTION */
#ifdef CONFIG_COMPACTION #ifdef CONFIG_COMPACTION
#define COMPACTION_BUILD 1 #define COMPACTION_BUILD 1
......
...@@ -1871,7 +1871,7 @@ get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order, ...@@ -1871,7 +1871,7 @@ get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
*/ */
for_each_zone_zonelist_nodemask(zone, z, zonelist, for_each_zone_zonelist_nodemask(zone, z, zonelist,
high_zoneidx, nodemask) { high_zoneidx, nodemask) {
if (NUMA_BUILD && zlc_active && if (IS_ENABLED(CONFIG_NUMA) && zlc_active &&
!zlc_zone_worth_trying(zonelist, z, allowednodes)) !zlc_zone_worth_trying(zonelist, z, allowednodes))
continue; continue;
if ((alloc_flags & ALLOC_CPUSET) && if ((alloc_flags & ALLOC_CPUSET) &&
...@@ -1917,7 +1917,8 @@ get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order, ...@@ -1917,7 +1917,8 @@ get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
classzone_idx, alloc_flags)) classzone_idx, alloc_flags))
goto try_this_zone; goto try_this_zone;
if (NUMA_BUILD && !did_zlc_setup && nr_online_nodes > 1) { if (IS_ENABLED(CONFIG_NUMA) &&
!did_zlc_setup && nr_online_nodes > 1) {
/* /*
* we do zlc_setup if there are multiple nodes * we do zlc_setup if there are multiple nodes
* and before considering the first zone allowed * and before considering the first zone allowed
...@@ -1936,7 +1937,7 @@ get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order, ...@@ -1936,7 +1937,7 @@ get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
* As we may have just activated ZLC, check if the first * As we may have just activated ZLC, check if the first
* eligible zone has failed zone_reclaim recently. * eligible zone has failed zone_reclaim recently.
*/ */
if (NUMA_BUILD && zlc_active && if (IS_ENABLED(CONFIG_NUMA) && zlc_active &&
!zlc_zone_worth_trying(zonelist, z, allowednodes)) !zlc_zone_worth_trying(zonelist, z, allowednodes))
continue; continue;
...@@ -1962,11 +1963,11 @@ get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order, ...@@ -1962,11 +1963,11 @@ get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
if (page) if (page)
break; break;
this_zone_full: this_zone_full:
if (NUMA_BUILD) if (IS_ENABLED(CONFIG_NUMA))
zlc_mark_zone_full(zonelist, z); zlc_mark_zone_full(zonelist, z);
} }
if (unlikely(NUMA_BUILD && page == NULL && zlc_active)) { if (unlikely(IS_ENABLED(CONFIG_NUMA) && page == NULL && zlc_active)) {
/* Disable zlc cache for second zonelist scan */ /* Disable zlc cache for second zonelist scan */
zlc_active = 0; zlc_active = 0;
goto zonelist_scan; goto zonelist_scan;
...@@ -2266,7 +2267,7 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, ...@@ -2266,7 +2267,7 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
return NULL; return NULL;
/* After successful reclaim, reconsider all zones for allocation */ /* After successful reclaim, reconsider all zones for allocation */
if (NUMA_BUILD) if (IS_ENABLED(CONFIG_NUMA))
zlc_clear_zones_full(zonelist); zlc_clear_zones_full(zonelist);
retry: retry:
...@@ -2412,7 +2413,8 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, ...@@ -2412,7 +2413,8 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
* allowed per node queues are empty and that nodes are * allowed per node queues are empty and that nodes are
* over allocated. * over allocated.
*/ */
if (NUMA_BUILD && (gfp_mask & GFP_THISNODE) == GFP_THISNODE) if (IS_ENABLED(CONFIG_NUMA) &&
(gfp_mask & GFP_THISNODE) == GFP_THISNODE)
goto nopage; goto nopage;
restart: restart:
...@@ -2819,7 +2821,7 @@ unsigned int nr_free_pagecache_pages(void) ...@@ -2819,7 +2821,7 @@ unsigned int nr_free_pagecache_pages(void)
static inline void show_node(struct zone *zone) static inline void show_node(struct zone *zone)
{ {
if (NUMA_BUILD) if (IS_ENABLED(CONFIG_NUMA))
printk("Node %d ", zone_to_nid(zone)); printk("Node %d ", zone_to_nid(zone));
} }
......
...@@ -2550,7 +2550,7 @@ static void s_stop(struct seq_file *m, void *p) ...@@ -2550,7 +2550,7 @@ static void s_stop(struct seq_file *m, void *p)
static void show_numa_info(struct seq_file *m, struct vm_struct *v) static void show_numa_info(struct seq_file *m, struct vm_struct *v)
{ {
if (NUMA_BUILD) { if (IS_ENABLED(CONFIG_NUMA)) {
unsigned int nr, *counters = m->private; unsigned int nr, *counters = m->private;
if (!counters) if (!counters)
...@@ -2615,7 +2615,7 @@ static int vmalloc_open(struct inode *inode, struct file *file) ...@@ -2615,7 +2615,7 @@ static int vmalloc_open(struct inode *inode, struct file *file)
unsigned int *ptr = NULL; unsigned int *ptr = NULL;
int ret; int ret;
if (NUMA_BUILD) { if (IS_ENABLED(CONFIG_NUMA)) {
ptr = kmalloc(nr_node_ids * sizeof(unsigned int), GFP_KERNEL); ptr = kmalloc(nr_node_ids * sizeof(unsigned int), GFP_KERNEL);
if (ptr == NULL) if (ptr == NULL)
return -ENOMEM; return -ENOMEM;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册