提交 cc1ba8ea 编写于 作者: A Anton Blanchard 提交者: Benjamin Herrenschmidt

powerpc/cpumask: Dynamically allocate cpu_sibling_map and cpu_core_map cpumasks

Dynamically allocate cpu_sibling_map and cpu_core_map cpumasks.

We don't need to set_cpu_online() the boot cpu in smp_prepare_boot_cpu,
init/main.c does it for us.

We also postpone setting of the boot cpu in cpu_sibling_map and cpu_core_map
until when the memory allocator is available (smp_prepare_cpus), similar
to x86.
Signed-off-by: NAnton Blanchard <anton@samba.org>
Signed-off-by: NBenjamin Herrenschmidt <benh@kernel.crashing.org>
上级 e6532c63
...@@ -68,8 +68,19 @@ static inline void set_hard_smp_processor_id(int cpu, int phys) ...@@ -68,8 +68,19 @@ static inline void set_hard_smp_processor_id(int cpu, int phys)
} }
#endif #endif
DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map);
DECLARE_PER_CPU(cpumask_t, cpu_core_map); DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
static inline struct cpumask *cpu_sibling_mask(int cpu)
{
return per_cpu(cpu_sibling_map, cpu);
}
static inline struct cpumask *cpu_core_mask(int cpu)
{
return per_cpu(cpu_core_map, cpu);
}
extern int cpu_to_core_id(int cpu); extern int cpu_to_core_id(int cpu);
/* Since OpenPIC has only 4 IPIs, we use slightly different message numbers. /* Since OpenPIC has only 4 IPIs, we use slightly different message numbers.
...@@ -93,7 +104,6 @@ void smp_init_pSeries(void); ...@@ -93,7 +104,6 @@ void smp_init_pSeries(void);
void smp_init_cell(void); void smp_init_cell(void);
void smp_init_celleb(void); void smp_init_celleb(void);
void smp_setup_cpu_maps(void); void smp_setup_cpu_maps(void);
void smp_setup_cpu_sibling_map(void);
extern int __cpu_disable(void); extern int __cpu_disable(void);
extern void __cpu_die(unsigned int cpu); extern void __cpu_die(unsigned int cpu);
......
...@@ -112,8 +112,8 @@ static inline void sysfs_remove_device_from_node(struct sys_device *dev, ...@@ -112,8 +112,8 @@ static inline void sysfs_remove_device_from_node(struct sys_device *dev,
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
#include <asm/smp.h> #include <asm/smp.h>
#define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu)) #define topology_thread_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu))
#define topology_core_cpumask(cpu) (&per_cpu(cpu_core_map, cpu)) #define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu))
#define topology_core_id(cpu) (cpu_to_core_id(cpu)) #define topology_core_id(cpu) (cpu_to_core_id(cpu))
#endif #endif
#endif #endif
......
...@@ -59,8 +59,8 @@ ...@@ -59,8 +59,8 @@
struct thread_info *secondary_ti; struct thread_info *secondary_ti;
DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE; DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
DEFINE_PER_CPU(cpumask_t, cpu_core_map) = CPU_MASK_NONE; DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
EXPORT_PER_CPU_SYMBOL(cpu_core_map); EXPORT_PER_CPU_SYMBOL(cpu_core_map);
...@@ -271,6 +271,16 @@ void __init smp_prepare_cpus(unsigned int max_cpus) ...@@ -271,6 +271,16 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
smp_store_cpu_info(boot_cpuid); smp_store_cpu_info(boot_cpuid);
cpu_callin_map[boot_cpuid] = 1; cpu_callin_map[boot_cpuid] = 1;
for_each_possible_cpu(cpu) {
zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu),
GFP_KERNEL, cpu_to_node(cpu));
zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu),
GFP_KERNEL, cpu_to_node(cpu));
}
cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid));
cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid));
if (smp_ops) if (smp_ops)
if (smp_ops->probe) if (smp_ops->probe)
max_cpus = smp_ops->probe(); max_cpus = smp_ops->probe();
...@@ -289,10 +299,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus) ...@@ -289,10 +299,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
void __devinit smp_prepare_boot_cpu(void) void __devinit smp_prepare_boot_cpu(void)
{ {
BUG_ON(smp_processor_id() != boot_cpuid); BUG_ON(smp_processor_id() != boot_cpuid);
set_cpu_online(boot_cpuid, true);
cpu_set(boot_cpuid, per_cpu(cpu_sibling_map, boot_cpuid));
cpu_set(boot_cpuid, per_cpu(cpu_core_map, boot_cpuid));
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
paca[boot_cpuid].__current = current; paca[boot_cpuid].__current = current;
#endif #endif
...@@ -525,15 +531,15 @@ int __devinit start_secondary(void *unused) ...@@ -525,15 +531,15 @@ int __devinit start_secondary(void *unused)
for (i = 0; i < threads_per_core; i++) { for (i = 0; i < threads_per_core; i++) {
if (cpu_is_offline(base + i)) if (cpu_is_offline(base + i))
continue; continue;
cpu_set(cpu, per_cpu(cpu_sibling_map, base + i)); cpumask_set_cpu(cpu, cpu_sibling_mask(base + i));
cpu_set(base + i, per_cpu(cpu_sibling_map, cpu)); cpumask_set_cpu(base + i, cpu_sibling_mask(cpu));
/* cpu_core_map should be a superset of /* cpu_core_map should be a superset of
* cpu_sibling_map even if we don't have cache * cpu_sibling_map even if we don't have cache
* information, so update the former here, too. * information, so update the former here, too.
*/ */
cpu_set(cpu, per_cpu(cpu_core_map, base +i)); cpumask_set_cpu(cpu, cpu_core_mask(base + i));
cpu_set(base + i, per_cpu(cpu_core_map, cpu)); cpumask_set_cpu(base + i, cpu_core_mask(cpu));
} }
l2_cache = cpu_to_l2cache(cpu); l2_cache = cpu_to_l2cache(cpu);
for_each_online_cpu(i) { for_each_online_cpu(i) {
...@@ -541,8 +547,8 @@ int __devinit start_secondary(void *unused) ...@@ -541,8 +547,8 @@ int __devinit start_secondary(void *unused)
if (!np) if (!np)
continue; continue;
if (np == l2_cache) { if (np == l2_cache) {
cpu_set(cpu, per_cpu(cpu_core_map, i)); cpumask_set_cpu(cpu, cpu_core_mask(i));
cpu_set(i, per_cpu(cpu_core_map, cpu)); cpumask_set_cpu(i, cpu_core_mask(cpu));
} }
of_node_put(np); of_node_put(np);
} }
...@@ -602,10 +608,10 @@ int __cpu_disable(void) ...@@ -602,10 +608,10 @@ int __cpu_disable(void)
/* Update sibling maps */ /* Update sibling maps */
base = cpu_first_thread_in_core(cpu); base = cpu_first_thread_in_core(cpu);
for (i = 0; i < threads_per_core; i++) { for (i = 0; i < threads_per_core; i++) {
cpu_clear(cpu, per_cpu(cpu_sibling_map, base + i)); cpumask_clear_cpu(cpu, cpu_sibling_mask(base + i));
cpu_clear(base + i, per_cpu(cpu_sibling_map, cpu)); cpumask_clear_cpu(base + i, cpu_sibling_mask(cpu));
cpu_clear(cpu, per_cpu(cpu_core_map, base +i)); cpumask_clear_cpu(cpu, cpu_core_mask(base + i));
cpu_clear(base + i, per_cpu(cpu_core_map, cpu)); cpumask_clear_cpu(base + i, cpu_core_mask(cpu));
} }
l2_cache = cpu_to_l2cache(cpu); l2_cache = cpu_to_l2cache(cpu);
...@@ -614,8 +620,8 @@ int __cpu_disable(void) ...@@ -614,8 +620,8 @@ int __cpu_disable(void)
if (!np) if (!np)
continue; continue;
if (np == l2_cache) { if (np == l2_cache) {
cpu_clear(cpu, per_cpu(cpu_core_map, i)); cpumask_clear_cpu(cpu, cpu_core_mask(i));
cpu_clear(i, per_cpu(cpu_core_map, cpu)); cpumask_clear_cpu(i, cpu_core_mask(cpu));
} }
of_node_put(np); of_node_put(np);
} }
......
...@@ -118,7 +118,7 @@ static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy) ...@@ -118,7 +118,7 @@ static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy)
policy->cur = cbe_freqs[cur_pmode].frequency; policy->cur = cbe_freqs[cur_pmode].frequency;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
cpumask_copy(policy->cpus, &per_cpu(cpu_sibling_map, policy->cpu)); cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu));
#endif #endif
cpufreq_frequency_table_get_attr(cbe_freqs, policy->cpu); cpufreq_frequency_table_get_attr(cbe_freqs, policy->cpu);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册