提交 de2d9445 编写于 作者: T Tejun Heo 提交者: Ingo Molnar

x86: Unify node_to_cpumask_map handling between 32 and 64bit

x86_32 has been managing node_to_cpumask_map explicitly from
map_cpu_to_node() and friends in a rather ugly way.  With
previous changes, it's now possible to share the code with
64bit.

* When CONFIG_NUMA_EMU is disabled, numa_add/remove_cpu() are
  implemented in numa.c and shared by 32 and 64bit.  CONFIG_NUMA_EMU
  versions still live in numa_64.c.

  NUMA_EMU's dependency on 64bit is planned to be removed and the
  above should go away together.

* identify_cpu() now calls numa_add_cpu() for 32bit too.  This
  makes the explicit mask management from map_cpu_to_node() unnecessary.

* The whole x86_32 specific map_cpu_to_node() chunk is no longer
  necessary.  Dropped.
Signed-off-by: NTejun Heo <tj@kernel.org>
Reviewed-by: NPekka Enberg <penberg@kernel.org>
Cc: eric.dumazet@gmail.com
Cc: yinghai@kernel.org
Cc: brgerst@gmail.com
Cc: gorcunov@gmail.com
Cc: shaohui.zheng@intel.com
Cc: rientjes@google.com
LKML-Reference: <1295789862-25482-16-git-send-email-tj@kernel.org>
Signed-off-by: NIngo Molnar <mingo@elte.hu>
Cc: David Rientjes <rientjes@google.com>
Cc: Shaohui Zheng <shaohui.zheng@intel.com>
上级 645a7919
#ifndef _ASM_X86_NUMA_H #ifndef _ASM_X86_NUMA_H
#define _ASM_X86_NUMA_H #define _ASM_X86_NUMA_H
#include <asm/topology.h>
#include <asm/apicdef.h> #include <asm/apicdef.h>
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
...@@ -33,9 +34,17 @@ static inline void set_apicid_to_node(int apicid, s16 node) ...@@ -33,9 +34,17 @@ static inline void set_apicid_to_node(int apicid, s16 node)
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
extern void __cpuinit numa_set_node(int cpu, int node); extern void __cpuinit numa_set_node(int cpu, int node);
extern void __cpuinit numa_clear_node(int cpu); extern void __cpuinit numa_clear_node(int cpu);
extern void __cpuinit numa_add_cpu(int cpu);
extern void __cpuinit numa_remove_cpu(int cpu);
#else /* CONFIG_NUMA */ #else /* CONFIG_NUMA */
static inline void numa_set_node(int cpu, int node) { } static inline void numa_set_node(int cpu, int node) { }
static inline void numa_clear_node(int cpu) { } static inline void numa_clear_node(int cpu) { }
static inline void numa_add_cpu(int cpu) { }
static inline void numa_remove_cpu(int cpu) { }
#endif /* CONFIG_NUMA */ #endif /* CONFIG_NUMA */
#ifdef CONFIG_DEBUG_PER_CPU_MAPS
struct cpumask __cpuinit *debug_cpumask_set_cpu(int cpu, int enable);
#endif
#endif /* _ASM_X86_NUMA_H */ #endif /* _ASM_X86_NUMA_H */
...@@ -4,7 +4,6 @@ ...@@ -4,7 +4,6 @@
extern int numa_off; extern int numa_off;
extern int pxm_to_nid(int pxm); extern int pxm_to_nid(int pxm);
extern void numa_remove_cpu(int cpu);
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
extern int __cpuinit numa_cpu_node(int apicid); extern int __cpuinit numa_cpu_node(int apicid);
......
...@@ -30,8 +30,6 @@ extern void setup_node_bootmem(int nodeid, unsigned long start, ...@@ -30,8 +30,6 @@ extern void setup_node_bootmem(int nodeid, unsigned long start,
extern void __init init_cpu_to_node(void); extern void __init init_cpu_to_node(void);
extern int __cpuinit numa_cpu_node(int cpu); extern int __cpuinit numa_cpu_node(int cpu);
extern void __cpuinit numa_add_cpu(int cpu);
extern void __cpuinit numa_remove_cpu(int cpu);
#ifdef CONFIG_NUMA_EMU #ifdef CONFIG_NUMA_EMU
#define FAKE_NODE_MIN_SIZE ((u64)32 << 20) #define FAKE_NODE_MIN_SIZE ((u64)32 << 20)
...@@ -41,8 +39,6 @@ void numa_emu_cmdline(char *); ...@@ -41,8 +39,6 @@ void numa_emu_cmdline(char *);
#else #else
static inline void init_cpu_to_node(void) { } static inline void init_cpu_to_node(void) { }
static inline int numa_cpu_node(int cpu) { return NUMA_NO_NODE; } static inline int numa_cpu_node(int cpu) { return NUMA_NO_NODE; }
static inline void numa_add_cpu(int cpu, int node) { }
static inline void numa_remove_cpu(int cpu) { }
#endif #endif
#endif /* _ASM_X86_NUMA_64_H */ #endif /* _ASM_X86_NUMA_64_H */
...@@ -869,7 +869,7 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) ...@@ -869,7 +869,7 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
select_idle_routine(c); select_idle_routine(c);
#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) #ifdef CONFIG_NUMA
numa_add_cpu(smp_processor_id()); numa_add_cpu(smp_processor_id());
#endif #endif
} }
......
...@@ -132,49 +132,6 @@ EXPORT_PER_CPU_SYMBOL(cpu_info); ...@@ -132,49 +132,6 @@ EXPORT_PER_CPU_SYMBOL(cpu_info);
atomic_t init_deasserted; atomic_t init_deasserted;
#if defined(CONFIG_NUMA) && defined(CONFIG_X86_32)
/* set up a mapping between cpu and node. */
static void map_cpu_to_node(int cpu, int node)
{
printk(KERN_INFO "Mapping cpu %d to node %d\n", cpu, node);
cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
}
/* undo a mapping between cpu and node. */
static void unmap_cpu_to_node(int cpu)
{
int node;
printk(KERN_INFO "Unmapping cpu %d from all nodes\n", cpu);
for (node = 0; node < MAX_NUMNODES; node++)
cpumask_clear_cpu(cpu, node_to_cpumask_map[node]);
}
#else /* !(CONFIG_NUMA && CONFIG_X86_32) */
#define map_cpu_to_node(cpu, node) ({})
#define unmap_cpu_to_node(cpu) ({})
#endif
#ifdef CONFIG_X86_32
static void map_cpu_to_logical_apicid(void)
{
int cpu = smp_processor_id();
int node;
node = numa_cpu_node(cpu);
if (!node_online(node))
node = first_online_node;
map_cpu_to_node(cpu, node);
}
void numa_remove_cpu(int cpu)
{
unmap_cpu_to_node(cpu);
}
#else
#define map_cpu_to_logical_apicid() do {} while (0)
#endif
/* /*
* Report back to the Boot Processor. * Report back to the Boot Processor.
* Running on AP. * Running on AP.
...@@ -242,7 +199,6 @@ static void __cpuinit smp_callin(void) ...@@ -242,7 +199,6 @@ static void __cpuinit smp_callin(void)
apic->smp_callin_clear_local_apic(); apic->smp_callin_clear_local_apic();
setup_local_APIC(); setup_local_APIC();
end_local_APIC_setup(); end_local_APIC_setup();
map_cpu_to_logical_apicid();
/* /*
* Need to setup vector mappings before we enable interrupts. * Need to setup vector mappings before we enable interrupts.
...@@ -943,7 +899,6 @@ static __init void disable_smp(void) ...@@ -943,7 +899,6 @@ static __init void disable_smp(void)
physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map); physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map);
else else
physid_set_mask_of_physid(0, &phys_cpu_present_map); physid_set_mask_of_physid(0, &phys_cpu_present_map);
map_cpu_to_logical_apicid();
cpumask_set_cpu(0, cpu_sibling_mask(0)); cpumask_set_cpu(0, cpu_sibling_mask(0));
cpumask_set_cpu(0, cpu_core_mask(0)); cpumask_set_cpu(0, cpu_core_mask(0));
} }
...@@ -1120,8 +1075,6 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) ...@@ -1120,8 +1075,6 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
end_local_APIC_setup(); end_local_APIC_setup();
map_cpu_to_logical_apicid();
if (apic->setup_portio_remap) if (apic->setup_portio_remap)
apic->setup_portio_remap(); apic->setup_portio_remap();
......
...@@ -99,7 +99,21 @@ void __init setup_node_to_cpumask_map(void) ...@@ -99,7 +99,21 @@ void __init setup_node_to_cpumask_map(void)
pr_debug("Node to cpumask map for %d nodes\n", nr_node_ids); pr_debug("Node to cpumask map for %d nodes\n", nr_node_ids);
} }
#ifdef CONFIG_DEBUG_PER_CPU_MAPS #ifndef CONFIG_DEBUG_PER_CPU_MAPS
# ifndef CONFIG_NUMA_EMU
void __cpuinit numa_add_cpu(int cpu)
{
cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
}
void __cpuinit numa_remove_cpu(int cpu)
{
cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
}
# endif /* !CONFIG_NUMA_EMU */
#else /* !CONFIG_DEBUG_PER_CPU_MAPS */
int __cpu_to_node(int cpu) int __cpu_to_node(int cpu)
{ {
...@@ -131,6 +145,52 @@ int early_cpu_to_node(int cpu) ...@@ -131,6 +145,52 @@ int early_cpu_to_node(int cpu)
return per_cpu(x86_cpu_to_node_map, cpu); return per_cpu(x86_cpu_to_node_map, cpu);
} }
struct cpumask __cpuinit *debug_cpumask_set_cpu(int cpu, int enable)
{
int node = early_cpu_to_node(cpu);
struct cpumask *mask;
char buf[64];
mask = node_to_cpumask_map[node];
if (!mask) {
pr_err("node_to_cpumask_map[%i] NULL\n", node);
dump_stack();
return NULL;
}
cpulist_scnprintf(buf, sizeof(buf), mask);
printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
enable ? "numa_add_cpu" : "numa_remove_cpu",
cpu, node, buf);
return mask;
}
# ifndef CONFIG_NUMA_EMU
static void __cpuinit numa_set_cpumask(int cpu, int enable)
{
struct cpumask *mask;
mask = debug_cpumask_set_cpu(cpu, enable);
if (!mask)
return;
if (enable)
cpumask_set_cpu(cpu, mask);
else
cpumask_clear_cpu(cpu, mask);
}
void __cpuinit numa_add_cpu(int cpu)
{
numa_set_cpumask(cpu, 1);
}
void __cpuinit numa_remove_cpu(int cpu)
{
numa_set_cpumask(cpu, 0);
}
# endif /* !CONFIG_NUMA_EMU */
/* /*
* Returns a pointer to the bitmask of CPUs on Node 'node'. * Returns a pointer to the bitmask of CPUs on Node 'node'.
*/ */
...@@ -154,4 +214,4 @@ const struct cpumask *cpumask_of_node(int node) ...@@ -154,4 +214,4 @@ const struct cpumask *cpumask_of_node(int node)
} }
EXPORT_SYMBOL(cpumask_of_node); EXPORT_SYMBOL(cpumask_of_node);
#endif /* CONFIG_DEBUG_PER_CPU_MAPS */ #endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
...@@ -726,19 +726,18 @@ int __cpuinit numa_cpu_node(int cpu) ...@@ -726,19 +726,18 @@ int __cpuinit numa_cpu_node(int cpu)
return NUMA_NO_NODE; return NUMA_NO_NODE;
} }
#ifndef CONFIG_DEBUG_PER_CPU_MAPS /*
* UGLINESS AHEAD: Currently, CONFIG_NUMA_EMU is 64bit only and makes use
#ifndef CONFIG_NUMA_EMU * of 64bit specific data structures. The distinction is artificial and
void __cpuinit numa_add_cpu(int cpu) * should be removed. numa_{add|remove}_cpu() are implemented in numa.c
{ * for both 32 and 64bit when CONFIG_NUMA_EMU is disabled but here when
cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); * enabled.
} *
* NUMA emulation is planned to be made generic and the following and other
void __cpuinit numa_remove_cpu(int cpu) * related code should be moved to numa.c.
{ */
cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); #ifdef CONFIG_NUMA_EMU
} # ifndef CONFIG_DEBUG_PER_CPU_MAPS
#else
void __cpuinit numa_add_cpu(int cpu) void __cpuinit numa_add_cpu(int cpu)
{ {
unsigned long addr; unsigned long addr;
...@@ -778,47 +777,7 @@ void __cpuinit numa_remove_cpu(int cpu) ...@@ -778,47 +777,7 @@ void __cpuinit numa_remove_cpu(int cpu)
for_each_online_node(i) for_each_online_node(i)
cpumask_clear_cpu(cpu, node_to_cpumask_map[i]); cpumask_clear_cpu(cpu, node_to_cpumask_map[i]);
} }
#endif /* !CONFIG_NUMA_EMU */ # else /* !CONFIG_DEBUG_PER_CPU_MAPS */
#else /* CONFIG_DEBUG_PER_CPU_MAPS */
static struct cpumask __cpuinit *debug_cpumask_set_cpu(int cpu, int enable)
{
int node = early_cpu_to_node(cpu);
struct cpumask *mask;
char buf[64];
mask = node_to_cpumask_map[node];
if (!mask) {
pr_err("node_to_cpumask_map[%i] NULL\n", node);
dump_stack();
return NULL;
}
cpulist_scnprintf(buf, sizeof(buf), mask);
printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
enable ? "numa_add_cpu" : "numa_remove_cpu",
cpu, node, buf);
return mask;
}
/*
* --------- debug versions of the numa functions ---------
*/
#ifndef CONFIG_NUMA_EMU
static void __cpuinit numa_set_cpumask(int cpu, int enable)
{
struct cpumask *mask;
mask = debug_cpumask_set_cpu(cpu, enable);
if (!mask)
return;
if (enable)
cpumask_set_cpu(cpu, mask);
else
cpumask_clear_cpu(cpu, mask);
}
#else
static void __cpuinit numa_set_cpumask(int cpu, int enable) static void __cpuinit numa_set_cpumask(int cpu, int enable)
{ {
int node = early_cpu_to_node(cpu); int node = early_cpu_to_node(cpu);
...@@ -842,7 +801,6 @@ static void __cpuinit numa_set_cpumask(int cpu, int enable) ...@@ -842,7 +801,6 @@ static void __cpuinit numa_set_cpumask(int cpu, int enable)
cpumask_clear_cpu(cpu, mask); cpumask_clear_cpu(cpu, mask);
} }
} }
#endif /* CONFIG_NUMA_EMU */
void __cpuinit numa_add_cpu(int cpu) void __cpuinit numa_add_cpu(int cpu)
{ {
...@@ -853,8 +811,5 @@ void __cpuinit numa_remove_cpu(int cpu) ...@@ -853,8 +811,5 @@ void __cpuinit numa_remove_cpu(int cpu)
{ {
numa_set_cpumask(cpu, 0); numa_set_cpumask(cpu, 0);
} }
/* # endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
* --------- end of debug versions of the numa functions --------- #endif /* CONFIG_NUMA_EMU */
*/
#endif /* CONFIG_DEBUG_PER_CPU_MAPS */
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册