提交 297d9c03 编写于 作者: J Jeremy Fitzhardinge 提交者: Linus Torvalds

i386: move common parts of smp into their own file

Several parts of kernel/smp.c and smpboot.c are generally useful for other
subarchitectures and paravirt_ops implementations, so make them available for
reuse.
Signed-off-by: NJeremy Fitzhardinge <jeremy@xensource.com>
Acked-by: NChris Wright <chrisw@sous-sol.org>
Cc: James Bottomley <James.Bottomley@HansenPartnership.com>
Cc: Eric W. Biederman <ebiederm@xmission.com>
Cc: Andi Kleen <ak@suse.de>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 838c4118
...@@ -19,6 +19,7 @@ obj-$(CONFIG_X86_CPUID) += cpuid.o ...@@ -19,6 +19,7 @@ obj-$(CONFIG_X86_CPUID) += cpuid.o
obj-$(CONFIG_MICROCODE) += microcode.o obj-$(CONFIG_MICROCODE) += microcode.o
obj-$(CONFIG_APM) += apm.o obj-$(CONFIG_APM) += apm.o
obj-$(CONFIG_X86_SMP) += smp.o smpboot.o tsc_sync.o obj-$(CONFIG_X86_SMP) += smp.o smpboot.o tsc_sync.o
obj-$(CONFIG_SMP) += smpcommon.o
obj-$(CONFIG_X86_TRAMPOLINE) += trampoline.o obj-$(CONFIG_X86_TRAMPOLINE) += trampoline.o
obj-$(CONFIG_X86_MPPARSE) += mpparse.o obj-$(CONFIG_X86_MPPARSE) += mpparse.o
obj-$(CONFIG_X86_LOCAL_APIC) += apic.o nmi.o obj-$(CONFIG_X86_LOCAL_APIC) += apic.o nmi.o
......
...@@ -467,7 +467,7 @@ void flush_tlb_all(void) ...@@ -467,7 +467,7 @@ void flush_tlb_all(void)
* it goes straight through and wastes no time serializing * it goes straight through and wastes no time serializing
* anything. Worst case is that we lose a reschedule ... * anything. Worst case is that we lose a reschedule ...
*/ */
void native_smp_send_reschedule(int cpu) static void native_smp_send_reschedule(int cpu)
{ {
WARN_ON(cpu_is_offline(cpu)); WARN_ON(cpu_is_offline(cpu));
send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR); send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
...@@ -546,9 +546,10 @@ static void __smp_call_function(void (*func) (void *info), void *info, ...@@ -546,9 +546,10 @@ static void __smp_call_function(void (*func) (void *info), void *info,
* You must not call this function with disabled interrupts or from a * You must not call this function with disabled interrupts or from a
* hardware interrupt handler or from a bottom half handler. * hardware interrupt handler or from a bottom half handler.
*/ */
int native_smp_call_function_mask(cpumask_t mask, static int
void (*func)(void *), void *info, native_smp_call_function_mask(cpumask_t mask,
int wait) void (*func)(void *), void *info,
int wait)
{ {
struct call_data_struct data; struct call_data_struct data;
cpumask_t allbutself; cpumask_t allbutself;
...@@ -599,60 +600,6 @@ int native_smp_call_function_mask(cpumask_t mask, ...@@ -599,60 +600,6 @@ int native_smp_call_function_mask(cpumask_t mask,
return 0; return 0;
} }
/**
* smp_call_function(): Run a function on all other CPUs.
* @func: The function to run. This must be fast and non-blocking.
* @info: An arbitrary pointer to pass to the function.
* @nonatomic: Unused.
* @wait: If true, wait (atomically) until function has completed on other CPUs.
*
* Returns 0 on success, else a negative status code.
*
* If @wait is true, then returns once @func has returned; otherwise
* it returns just before the target cpu calls @func.
*
* You must not call this function with disabled interrupts or from a
* hardware interrupt handler or from a bottom half handler.
*/
int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
int wait)
{
return smp_call_function_mask(cpu_online_map, func, info, wait);
}
EXPORT_SYMBOL(smp_call_function);
/**
* smp_call_function_single - Run a function on another CPU
* @cpu: The target CPU. Cannot be the calling CPU.
* @func: The function to run. This must be fast and non-blocking.
* @info: An arbitrary pointer to pass to the function.
* @nonatomic: Unused.
* @wait: If true, wait until function has completed on other CPUs.
*
* Returns 0 on success, else a negative status code.
*
* If @wait is true, then returns once @func has returned; otherwise
* it returns just before the target cpu calls @func.
*/
int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
int nonatomic, int wait)
{
/* prevent preemption and reschedule on another processor */
int ret;
int me = get_cpu();
if (cpu == me) {
WARN_ON(1);
put_cpu();
return -EBUSY;
}
ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, wait);
put_cpu();
return ret;
}
EXPORT_SYMBOL(smp_call_function_single);
static void stop_this_cpu (void * dummy) static void stop_this_cpu (void * dummy)
{ {
local_irq_disable(); local_irq_disable();
...@@ -670,7 +617,7 @@ static void stop_this_cpu (void * dummy) ...@@ -670,7 +617,7 @@ static void stop_this_cpu (void * dummy)
* this function calls the 'stop' function on all other CPUs in the system. * this function calls the 'stop' function on all other CPUs in the system.
*/ */
void native_smp_send_stop(void) static void native_smp_send_stop(void)
{ {
/* Don't deadlock on the call lock in panic */ /* Don't deadlock on the call lock in panic */
int nolock = !spin_trylock(&call_lock); int nolock = !spin_trylock(&call_lock);
......
...@@ -98,9 +98,6 @@ EXPORT_SYMBOL(x86_cpu_to_apicid); ...@@ -98,9 +98,6 @@ EXPORT_SYMBOL(x86_cpu_to_apicid);
u8 apicid_2_node[MAX_APICID]; u8 apicid_2_node[MAX_APICID];
DEFINE_PER_CPU(unsigned long, this_cpu_off);
EXPORT_PER_CPU_SYMBOL(this_cpu_off);
/* /*
* Trampoline 80x86 program as an array. * Trampoline 80x86 program as an array.
*/ */
...@@ -763,25 +760,6 @@ static inline struct task_struct * alloc_idle_task(int cpu) ...@@ -763,25 +760,6 @@ static inline struct task_struct * alloc_idle_task(int cpu)
#define alloc_idle_task(cpu) fork_idle(cpu) #define alloc_idle_task(cpu) fork_idle(cpu)
#endif #endif
/* Initialize the CPU's GDT. This is either the boot CPU doing itself
(still using the master per-cpu area), or a CPU doing it for a
secondary which will soon come up. */
static __cpuinit void init_gdt(int cpu)
{
struct desc_struct *gdt = get_cpu_gdt_table(cpu);
pack_descriptor((u32 *)&gdt[GDT_ENTRY_PERCPU].a,
(u32 *)&gdt[GDT_ENTRY_PERCPU].b,
__per_cpu_offset[cpu], 0xFFFFF,
0x80 | DESCTYPE_S | 0x2, 0x8);
per_cpu(this_cpu_off, cpu) = __per_cpu_offset[cpu];
per_cpu(cpu_number, cpu) = cpu;
}
/* Defined in head.S */
extern struct Xgt_desc_struct early_gdt_descr;
static int __cpuinit do_boot_cpu(int apicid, int cpu) static int __cpuinit do_boot_cpu(int apicid, int cpu)
/* /*
* NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
......
/*
* SMP stuff which is common to all sub-architectures.
*/
#include <linux/module.h>
#include <asm/smp.h>
DEFINE_PER_CPU(unsigned long, this_cpu_off);
EXPORT_PER_CPU_SYMBOL(this_cpu_off);
/* Initialize the CPU's GDT. This is either the boot CPU doing itself
(still using the master per-cpu area), or a CPU doing it for a
secondary which will soon come up. */
__cpuinit void init_gdt(int cpu)
{
struct desc_struct *gdt = get_cpu_gdt_table(cpu);
pack_descriptor((u32 *)&gdt[GDT_ENTRY_PERCPU].a,
(u32 *)&gdt[GDT_ENTRY_PERCPU].b,
__per_cpu_offset[cpu], 0xFFFFF,
0x80 | DESCTYPE_S | 0x2, 0x8);
per_cpu(this_cpu_off, cpu) = __per_cpu_offset[cpu];
per_cpu(cpu_number, cpu) = cpu;
}
/**
* smp_call_function(): Run a function on all other CPUs.
* @func: The function to run. This must be fast and non-blocking.
* @info: An arbitrary pointer to pass to the function.
* @nonatomic: Unused.
* @wait: If true, wait (atomically) until function has completed on other CPUs.
*
* Returns 0 on success, else a negative status code.
*
* If @wait is true, then returns once @func has returned; otherwise
* it returns just before the target cpu calls @func.
*
* You must not call this function with disabled interrupts or from a
* hardware interrupt handler or from a bottom half handler.
*/
int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
int wait)
{
return smp_call_function_mask(cpu_online_map, func, info, wait);
}
EXPORT_SYMBOL(smp_call_function);
/**
* smp_call_function_single - Run a function on another CPU
* @cpu: The target CPU. Cannot be the calling CPU.
* @func: The function to run. This must be fast and non-blocking.
* @info: An arbitrary pointer to pass to the function.
* @nonatomic: Unused.
* @wait: If true, wait until function has completed on other CPUs.
*
* Returns 0 on success, else a negative status code.
*
* If @wait is true, then returns once @func has returned; otherwise
* it returns just before the target cpu calls @func.
*/
int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
int nonatomic, int wait)
{
/* prevent preemption and reschedule on another processor */
int ret;
int me = get_cpu();
if (cpu == me) {
WARN_ON(1);
put_cpu();
return -EBUSY;
}
ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, wait);
put_cpu();
return ret;
}
EXPORT_SYMBOL(smp_call_function_single);
...@@ -749,9 +749,13 @@ extern unsigned long boot_option_idle_override; ...@@ -749,9 +749,13 @@ extern unsigned long boot_option_idle_override;
extern void enable_sep_cpu(void); extern void enable_sep_cpu(void);
extern int sysenter_setup(void); extern int sysenter_setup(void);
/* Defined in head.S */
extern struct Xgt_desc_struct early_gdt_descr;
extern void cpu_set_gdt(int); extern void cpu_set_gdt(int);
extern void switch_to_new_gdt(void); extern void switch_to_new_gdt(void);
extern void cpu_init(void); extern void cpu_init(void);
extern void init_gdt(int cpu);
extern int force_mwait; extern int force_mwait;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册