diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index 21472cea3d6cb39f6d8f6da392434d5e629b94c2..57b3d86dd9edb5ca3d505522ae2296b941a9bace 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h @@ -126,8 +126,39 @@ extern unsigned long setup_trampoline(void); void smp_store_cpu_info(int id); #define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) -#else + +/* We don't mark CPUs online until __cpu_up(), so we need another measure */ +static inline int num_booting_cpus(void) +{ + return cpus_weight(cpu_callout_map); +} +#endif /* CONFIG_SMP */ + +#ifdef CONFIG_X86_32_SMP +/* + * This function is needed by all SMP systems. It must _always_ be valid + * from the initial startup. We map APIC_BASE very early in page_setup(), + * so this is correct in the x86 case. + */ +DECLARE_PER_CPU(int, cpu_number); +#define raw_smp_processor_id() (x86_read_percpu(cpu_number)) +extern int safe_smp_processor_id(void); + +#elif defined(CONFIG_X86_64_SMP) +#define raw_smp_processor_id() read_pda(cpunumber) + +#define stack_smp_processor_id() \ +({ \ + struct thread_info *ti; \ + __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \ + ti->cpu; \ +}) +#define safe_smp_processor_id() smp_processor_id() + +#else /* !CONFIG_X86_32_SMP && !CONFIG_X86_64_SMP */ #define cpu_physical_id(cpu) boot_cpu_physical_apicid +#define safe_smp_processor_id() 0 +#define stack_smp_processor_id() 0 #endif #ifdef CONFIG_X86_32 diff --git a/include/asm-x86/smp_32.h b/include/asm-x86/smp_32.h index 694d3245a88f1d5aac92df9490449022d241d930..d9ae5ac93dfc70d39373eae9f252811e432d12e5 100644 --- a/include/asm-x86/smp_32.h +++ b/include/asm-x86/smp_32.h @@ -8,26 +8,5 @@ extern cpumask_t cpu_callin_map; extern void (*mtrr_hook)(void); extern void zap_low_mappings(void); -#ifdef CONFIG_SMP -/* - * This function is needed by all SMP systems. It must _always_ be valid - * from the initial startup. We map APIC_BASE very early in page_setup(), - * so this is correct in the x86 case. - */ -DECLARE_PER_CPU(int, cpu_number); -#define raw_smp_processor_id() (x86_read_percpu(cpu_number)) - -extern int safe_smp_processor_id(void); - -/* We don't mark CPUs online until __cpu_up(), so we need another measure */ -static inline int num_booting_cpus(void) -{ - return cpus_weight(cpu_callout_map); -} - -#else /* CONFIG_SMP */ -#define safe_smp_processor_id() 0 -#endif /* !CONFIG_SMP */ - #endif /* !ASSEMBLY */ #endif diff --git a/include/asm-x86/smp_64.h b/include/asm-x86/smp_64.h index 8ea49529f3248be1767fada1df474979c44d56c6..058f41399798f648299a28859d1d662367ebac27 100644 --- a/include/asm-x86/smp_64.h +++ b/include/asm-x86/smp_64.h @@ -7,32 +7,5 @@ extern cpumask_t cpu_callin_map; extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info, int wait); -#ifdef CONFIG_SMP - -#define raw_smp_processor_id() read_pda(cpunumber) - -#define stack_smp_processor_id() \ -({ \ - struct thread_info *ti; \ - asm("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \ - ti->cpu; \ -}) - -/* - * On x86 all CPUs are mapped 1:1 to the APIC space. This simplifies - * scheduling and IPI sending and compresses data structures. - */ -static inline int num_booting_cpus(void) -{ - return cpus_weight(cpu_callout_map); -} - -#define safe_smp_processor_id() smp_processor_id() -#else /* CONFIG_SMP */ -#define stack_smp_processor_id() 0 -#define safe_smp_processor_id() 0 -#endif /* !CONFIG_SMP */ - - #endif