提交 6fe940d6 编写于 作者: L Li Shaohua 提交者: Linus Torvalds

[PATCH] sep initializing rework

Make SEP init per-cpu, so it is hotplug safe.

Signed-off-by: Li Shaohua<shaohua.li@intel.com>
Signed-off-by: NAshok Raj <ashok.raj@intel.com>
Signed-off-by: NAndrew Morton <akpm@osdl.org>
Signed-off-by: NLinus Torvalds <torvalds@osdl.org>
上级 67664c8f
...@@ -432,6 +432,9 @@ void __init identify_cpu(struct cpuinfo_x86 *c) ...@@ -432,6 +432,9 @@ void __init identify_cpu(struct cpuinfo_x86 *c)
#ifdef CONFIG_X86_MCE #ifdef CONFIG_X86_MCE
mcheck_init(c); mcheck_init(c);
#endif #endif
if (c == &boot_cpu_data)
sysenter_setup();
enable_sep_cpu();
} }
#ifdef CONFIG_X86_HT #ifdef CONFIG_X86_HT
......
...@@ -495,6 +495,16 @@ struct call_data_struct { ...@@ -495,6 +495,16 @@ struct call_data_struct {
int wait; int wait;
}; };
void lock_ipi_call_lock(void)
{
spin_lock_irq(&call_lock);
}
void unlock_ipi_call_lock(void)
{
spin_unlock_irq(&call_lock);
}
static struct call_data_struct * call_data; static struct call_data_struct * call_data;
/* /*
......
...@@ -449,7 +449,18 @@ static void __init start_secondary(void *unused) ...@@ -449,7 +449,18 @@ static void __init start_secondary(void *unused)
* the local TLBs too. * the local TLBs too.
*/ */
local_flush_tlb(); local_flush_tlb();
/*
* We need to hold call_lock, so there is no inconsistency
* between the time smp_call_function() determines number of
* IPI receipients, and the time when the determination is made
* for which cpus receive the IPI. Holding this
* lock helps us to not include this cpu in a currently in progress
* smp_call_function().
*/
lock_ipi_call_lock();
cpu_set(smp_processor_id(), cpu_online_map); cpu_set(smp_processor_id(), cpu_online_map);
unlock_ipi_call_lock();
/* We can take interrupts now: we're officially "up". */ /* We can take interrupts now: we're officially "up". */
local_irq_enable(); local_irq_enable();
......
...@@ -21,11 +21,16 @@ ...@@ -21,11 +21,16 @@
extern asmlinkage void sysenter_entry(void); extern asmlinkage void sysenter_entry(void);
void enable_sep_cpu(void *info) void enable_sep_cpu(void)
{ {
int cpu = get_cpu(); int cpu = get_cpu();
struct tss_struct *tss = &per_cpu(init_tss, cpu); struct tss_struct *tss = &per_cpu(init_tss, cpu);
if (!boot_cpu_has(X86_FEATURE_SEP)) {
put_cpu();
return;
}
tss->ss1 = __KERNEL_CS; tss->ss1 = __KERNEL_CS;
tss->esp1 = sizeof(struct tss_struct) + (unsigned long) tss; tss->esp1 = sizeof(struct tss_struct) + (unsigned long) tss;
wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0); wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0);
...@@ -41,7 +46,7 @@ void enable_sep_cpu(void *info) ...@@ -41,7 +46,7 @@ void enable_sep_cpu(void *info)
extern const char vsyscall_int80_start, vsyscall_int80_end; extern const char vsyscall_int80_start, vsyscall_int80_end;
extern const char vsyscall_sysenter_start, vsyscall_sysenter_end; extern const char vsyscall_sysenter_start, vsyscall_sysenter_end;
static int __init sysenter_setup(void) int __init sysenter_setup(void)
{ {
void *page = (void *)get_zeroed_page(GFP_ATOMIC); void *page = (void *)get_zeroed_page(GFP_ATOMIC);
...@@ -58,8 +63,5 @@ static int __init sysenter_setup(void) ...@@ -58,8 +63,5 @@ static int __init sysenter_setup(void)
&vsyscall_sysenter_start, &vsyscall_sysenter_start,
&vsyscall_sysenter_end - &vsyscall_sysenter_start); &vsyscall_sysenter_end - &vsyscall_sysenter_start);
on_each_cpu(enable_sep_cpu, NULL, 1, 1);
return 0; return 0;
} }
__initcall(sysenter_setup);
...@@ -22,9 +22,11 @@ ...@@ -22,9 +22,11 @@
#include <linux/device.h> #include <linux/device.h>
#include <linux/suspend.h> #include <linux/suspend.h>
#include <linux/acpi.h> #include <linux/acpi.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/acpi.h> #include <asm/acpi.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/processor.h>
static struct saved_context saved_context; static struct saved_context saved_context;
...@@ -33,8 +35,6 @@ unsigned long saved_context_esp, saved_context_ebp; ...@@ -33,8 +35,6 @@ unsigned long saved_context_esp, saved_context_ebp;
unsigned long saved_context_esi, saved_context_edi; unsigned long saved_context_esi, saved_context_edi;
unsigned long saved_context_eflags; unsigned long saved_context_eflags;
extern void enable_sep_cpu(void *);
void __save_processor_state(struct saved_context *ctxt) void __save_processor_state(struct saved_context *ctxt)
{ {
kernel_fpu_begin(); kernel_fpu_begin();
...@@ -136,7 +136,7 @@ void __restore_processor_state(struct saved_context *ctxt) ...@@ -136,7 +136,7 @@ void __restore_processor_state(struct saved_context *ctxt)
* sysenter MSRs * sysenter MSRs
*/ */
if (boot_cpu_has(X86_FEATURE_SEP)) if (boot_cpu_has(X86_FEATURE_SEP))
enable_sep_cpu(NULL); enable_sep_cpu();
fix_processor_context(); fix_processor_context();
do_fpu_end(); do_fpu_end();
......
...@@ -691,5 +691,7 @@ extern void select_idle_routine(const struct cpuinfo_x86 *c); ...@@ -691,5 +691,7 @@ extern void select_idle_routine(const struct cpuinfo_x86 *c);
#define cache_line_size() (boot_cpu_data.x86_cache_alignment) #define cache_line_size() (boot_cpu_data.x86_cache_alignment)
extern unsigned long boot_option_idle_override; extern unsigned long boot_option_idle_override;
extern void enable_sep_cpu(void);
extern int sysenter_setup(void);
#endif /* __ASM_I386_PROCESSOR_H */ #endif /* __ASM_I386_PROCESSOR_H */
...@@ -42,6 +42,8 @@ extern void smp_message_irq(int cpl, void *dev_id, struct pt_regs *regs); ...@@ -42,6 +42,8 @@ extern void smp_message_irq(int cpl, void *dev_id, struct pt_regs *regs);
extern void smp_invalidate_rcv(void); /* Process an NMI */ extern void smp_invalidate_rcv(void); /* Process an NMI */
extern void (*mtrr_hook) (void); extern void (*mtrr_hook) (void);
extern void zap_low_mappings (void); extern void zap_low_mappings (void);
extern void lock_ipi_call_lock(void);
extern void unlock_ipi_call_lock(void);
#define MAX_APICID 256 #define MAX_APICID 256
extern u8 x86_cpu_to_apicid[]; extern u8 x86_cpu_to_apicid[];
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册