提交 b6278470 编写于 作者: M Mike Travis 提交者: Thomas Gleixner

x86: convert cpu_llc_id to be a per cpu variable

Convert cpu_llc_id from a static array sized by NR_CPUS to a per_cpu
variable. This saves sizeof(cpu_llc_id) * NR unused cpus.  Access is
mostly from startup and CPU HOTPLUG functions.

Note there's an additional change of the type of cpu_llc_id from int to
u8 for ARCH i386 to correspond with the same type in ARCH x86_64.
Signed-off-by: NMike Travis <travis@sgi.com>
Cc: Andi Kleen <ak@suse.de>
Cc: Christoph Lameter <clameter@sgi.com>
Cc: "Siddha, Suresh B" <suresh.b.siddha@intel.com>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NIngo Molnar <mingo@elte.hu>
Signed-off-by: NThomas Gleixner <tglx@linutronix.de>
上级 71fff5e6
...@@ -417,14 +417,14 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) ...@@ -417,14 +417,14 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
if (new_l2) { if (new_l2) {
l2 = new_l2; l2 = new_l2;
#ifdef CONFIG_X86_HT #ifdef CONFIG_X86_HT
cpu_llc_id[cpu] = l2_id; per_cpu(cpu_llc_id, cpu) = l2_id;
#endif #endif
} }
if (new_l3) { if (new_l3) {
l3 = new_l3; l3 = new_l3;
#ifdef CONFIG_X86_HT #ifdef CONFIG_X86_HT
cpu_llc_id[cpu] = l3_id; per_cpu(cpu_llc_id, cpu) = l3_id;
#endif #endif
} }
......
...@@ -67,7 +67,7 @@ int smp_num_siblings = 1; ...@@ -67,7 +67,7 @@ int smp_num_siblings = 1;
EXPORT_SYMBOL(smp_num_siblings); EXPORT_SYMBOL(smp_num_siblings);
/* Last level cache ID of each logical CPU */ /* Last level cache ID of each logical CPU */
int cpu_llc_id[NR_CPUS] __cpuinitdata = {[0 ... NR_CPUS-1] = BAD_APICID}; DEFINE_PER_CPU(u8, cpu_llc_id) = BAD_APICID;
/* representing HT siblings of each logical CPU */ /* representing HT siblings of each logical CPU */
DEFINE_PER_CPU(cpumask_t, cpu_sibling_map); DEFINE_PER_CPU(cpumask_t, cpu_sibling_map);
...@@ -348,8 +348,8 @@ void __cpuinit set_cpu_sibling_map(int cpu) ...@@ -348,8 +348,8 @@ void __cpuinit set_cpu_sibling_map(int cpu)
} }
for_each_cpu_mask(i, cpu_sibling_setup_map) { for_each_cpu_mask(i, cpu_sibling_setup_map) {
if (cpu_llc_id[cpu] != BAD_APICID && if (per_cpu(cpu_llc_id, cpu) != BAD_APICID &&
cpu_llc_id[cpu] == cpu_llc_id[i]) { per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
cpu_set(i, c[cpu].llc_shared_map); cpu_set(i, c[cpu].llc_shared_map);
cpu_set(cpu, c[i].llc_shared_map); cpu_set(cpu, c[i].llc_shared_map);
} }
......
...@@ -65,7 +65,7 @@ int smp_num_siblings = 1; ...@@ -65,7 +65,7 @@ int smp_num_siblings = 1;
EXPORT_SYMBOL(smp_num_siblings); EXPORT_SYMBOL(smp_num_siblings);
/* Last level cache ID of each logical CPU */ /* Last level cache ID of each logical CPU */
u8 cpu_llc_id[NR_CPUS] __cpuinitdata = {[0 ... NR_CPUS-1] = BAD_APICID}; DEFINE_PER_CPU(u8, cpu_llc_id) = BAD_APICID;
/* Bitmask of currently online CPUs */ /* Bitmask of currently online CPUs */
cpumask_t cpu_online_map __read_mostly; cpumask_t cpu_online_map __read_mostly;
...@@ -283,8 +283,8 @@ static inline void set_cpu_sibling_map(int cpu) ...@@ -283,8 +283,8 @@ static inline void set_cpu_sibling_map(int cpu)
} }
for_each_cpu_mask(i, cpu_sibling_setup_map) { for_each_cpu_mask(i, cpu_sibling_setup_map) {
if (cpu_llc_id[cpu] != BAD_APICID && if (per_cpu(cpu_llc_id, cpu) != BAD_APICID &&
cpu_llc_id[cpu] == cpu_llc_id[i]) { per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
cpu_set(i, c[cpu].llc_shared_map); cpu_set(i, c[cpu].llc_shared_map);
cpu_set(cpu, c[i].llc_shared_map); cpu_set(cpu, c[i].llc_shared_map);
} }
......
...@@ -110,7 +110,11 @@ extern struct cpuinfo_x86 cpu_data[]; ...@@ -110,7 +110,11 @@ extern struct cpuinfo_x86 cpu_data[];
#define current_cpu_data boot_cpu_data #define current_cpu_data boot_cpu_data
#endif #endif
extern int cpu_llc_id[NR_CPUS]; /*
* the following now lives in the per cpu area:
* extern int cpu_llc_id[NR_CPUS];
*/
DECLARE_PER_CPU(u8, cpu_llc_id);
extern char ignore_fpu_irq; extern char ignore_fpu_irq;
void __init cpu_detect(struct cpuinfo_x86 *c); void __init cpu_detect(struct cpuinfo_x86 *c);
......
...@@ -49,7 +49,7 @@ extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *), ...@@ -49,7 +49,7 @@ extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *),
*/ */
DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
DECLARE_PER_CPU(cpumask_t, cpu_core_map); DECLARE_PER_CPU(cpumask_t, cpu_core_map);
extern u8 cpu_llc_id[NR_CPUS]; DECLARE_PER_CPU(u8, cpu_llc_id);
#define SMP_TRAMPOLINE_BASE 0x6000 #define SMP_TRAMPOLINE_BASE 0x6000
...@@ -121,6 +121,7 @@ static __inline int logical_smp_processor_id(void) ...@@ -121,6 +121,7 @@ static __inline int logical_smp_processor_id(void)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) #define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu)
#else #else
extern unsigned int boot_cpu_id;
#define cpu_physical_id(cpu) boot_cpu_id #define cpu_physical_id(cpu) boot_cpu_id
#endif /* !CONFIG_SMP */ #endif /* !CONFIG_SMP */
#endif #endif
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册