smp.h 4.9 KB
Newer Older
H
H. Peter Anvin 已提交
1 2
#ifndef _ASM_X86_SMP_H
#define _ASM_X86_SMP_H
G
Glauber Costa 已提交
3
#ifndef __ASSEMBLY__
4
#include <linux/cpumask.h>
5
#include <linux/init.h>
6
#include <asm/percpu.h>
7

G
Glauber Costa 已提交
8 9 10 11 12 13 14 15 16 17 18 19
/*
 * We need the APIC definitions automatically as part of 'smp.h'
 */
#ifdef CONFIG_X86_LOCAL_APIC
# include <asm/mpspec.h>
# include <asm/apic.h>
# ifdef CONFIG_X86_IO_APIC
#  include <asm/io_apic.h>
# endif
#endif
#include <asm/pda.h>
#include <asm/thread_info.h>
20
#include <asm/cpumask.h>
G
Glauber Costa 已提交
21

22 23
extern int smp_num_siblings;
extern unsigned int num_processors;
G
Glauber Costa 已提交
24

25 26 27
DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
DECLARE_PER_CPU(cpumask_t, cpu_core_map);
DECLARE_PER_CPU(u16, cpu_llc_id);
28 29 30
#ifdef CONFIG_X86_32
DECLARE_PER_CPU(int, cpu_number);
#endif
31

32 33 34 35 36 37 38 39 40 41
static inline struct cpumask *cpu_sibling_mask(int cpu)
{
	return &per_cpu(cpu_sibling_map, cpu);
}

static inline struct cpumask *cpu_core_mask(int cpu)
{
	return &per_cpu(cpu_core_map, cpu);
}

42 43
DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid);
DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
44

45 46 47 48 49 50
/* Static state in head.S used to set up a CPU */
extern struct {
	void *sp;
	unsigned short ss;
} stack_start;

51 52 53 54 55 56 57
struct smp_ops {
	void (*smp_prepare_boot_cpu)(void);
	void (*smp_prepare_cpus)(unsigned max_cpus);
	void (*smp_cpus_done)(unsigned max_cpus);

	void (*smp_send_stop)(void);
	void (*smp_send_reschedule)(int cpu);
58

59 60 61 62 63
	int (*cpu_up)(unsigned cpu);
	int (*cpu_disable)(void);
	void (*cpu_die)(unsigned int cpu);
	void (*play_dead)(void);

64
	void (*send_call_func_ipi)(const struct cpumask *mask);
65
	void (*send_call_func_single_ipi)(int cpu);
66 67
};

68 69 70
/* Globals due to paravirt */
extern void set_cpu_sibling_map(int cpu);

71
#ifdef CONFIG_SMP
72 73 74
#ifndef CONFIG_PARAVIRT
#define startup_ipi_hook(phys_apicid, start_eip, start_esp) do { } while (0)
#endif
75
extern struct smp_ops smp_ops;
G
Glauber Costa 已提交
76

G
Glauber Costa 已提交
77 78 79 80 81
static inline void smp_send_stop(void)
{
	smp_ops.smp_send_stop();
}

G
Glauber Costa 已提交
82 83 84 85 86
static inline void smp_prepare_boot_cpu(void)
{
	smp_ops.smp_prepare_boot_cpu();
}

G
Glauber Costa 已提交
87 88 89 90 91
static inline void smp_prepare_cpus(unsigned int max_cpus)
{
	smp_ops.smp_prepare_cpus(max_cpus);
}

G
Glauber Costa 已提交
92 93 94 95 96
static inline void smp_cpus_done(unsigned int max_cpus)
{
	smp_ops.smp_cpus_done(max_cpus);
}

G
Glauber Costa 已提交
97 98 99 100 101
static inline int __cpu_up(unsigned int cpu)
{
	return smp_ops.cpu_up(cpu);
}

102 103 104 105 106 107 108 109 110 111 112 113 114 115 116
static inline int __cpu_disable(void)
{
	return smp_ops.cpu_disable();
}

static inline void __cpu_die(unsigned int cpu)
{
	smp_ops.cpu_die(cpu);
}

static inline void play_dead(void)
{
	smp_ops.play_dead();
}

G
Glauber Costa 已提交
117 118 119 120
static inline void smp_send_reschedule(int cpu)
{
	smp_ops.smp_send_reschedule(cpu);
}
121

122 123 124 125 126 127
static inline void arch_send_call_function_single_ipi(int cpu)
{
	smp_ops.send_call_func_single_ipi(cpu);
}

static inline void arch_send_call_function_ipi(cpumask_t mask)
128
{
129
	smp_ops.send_call_func_ipi(&mask);
130
}
G
Glauber Costa 已提交
131

132
void cpu_disable_common(void);
G
Glauber Costa 已提交
133
void native_smp_prepare_boot_cpu(void);
G
Glauber Costa 已提交
134
void native_smp_prepare_cpus(unsigned int max_cpus);
G
Glauber Costa 已提交
135
void native_smp_cpus_done(unsigned int max_cpus);
G
Glauber Costa 已提交
136
int native_cpu_up(unsigned int cpunum);
137 138 139
int native_cpu_disable(void);
void native_cpu_die(unsigned int cpu);
void native_play_dead(void);
140
void play_dead_common(void);
141

142
void native_send_call_func_ipi(const struct cpumask *mask);
143
void native_send_call_func_single_ipi(int cpu);
144

145
void smp_store_cpu_info(int id);
146
#define cpu_physical_id(cpu)	per_cpu(x86_cpu_to_apicid, cpu)
147 148 149 150

/* We don't mark CPUs online until __cpu_up(), so we need another measure */
static inline int num_booting_cpus(void)
{
151
	return cpumask_weight(cpu_callout_mask);
152
}
153
#endif /* CONFIG_SMP */
154

155 156
extern unsigned disabled_cpus __cpuinitdata;

157 158 159 160 161 162
#ifdef CONFIG_X86_32_SMP
/*
 * This function is needed by all SMP systems. It must _always_ be valid
 * from the initial startup. We map APIC_BASE very early in page_setup(),
 * so this is correct in the x86 case.
 */
163
#define raw_smp_processor_id() (percpu_read(cpu_number))
164 165 166 167 168 169 170 171 172 173 174 175 176
extern int safe_smp_processor_id(void);

#elif defined(CONFIG_X86_64_SMP)
#define raw_smp_processor_id()	read_pda(cpunumber)

#define stack_smp_processor_id()					\
({								\
	struct thread_info *ti;						\
	__asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK));	\
	ti->cpu;							\
})
#define safe_smp_processor_id()		smp_processor_id()

177
#endif
178

179 180
#ifdef CONFIG_X86_LOCAL_APIC

181
#ifndef CONFIG_X86_64
182 183 184 185 186 187
static inline int logical_smp_processor_id(void)
{
	/* we don't want to mark this access volatile - bad code generation */
	return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR));
}

188
#include <mach_apicdef.h>
189 190
static inline unsigned int read_apic_id(void)
{
191 192 193 194 195
	unsigned int reg;

	reg = *(u32 *)(APIC_BASE + APIC_ID);

	return GET_APIC_ID(reg);
196
}
J
Jack Steiner 已提交
197 198
#endif

199

200
# if defined(APIC_DEFINITION) || defined(CONFIG_X86_64)
201 202
extern int hard_smp_processor_id(void);
# else
203
#include <mach_apicdef.h>
204 205 206
static inline int hard_smp_processor_id(void)
{
	/* we don't want to mark this access volatile - bad code generation */
207
	return read_apic_id();
208 209 210 211 212 213 214 215 216 217 218
}
# endif /* APIC_DEFINITION */

#else /* CONFIG_X86_LOCAL_APIC */

# ifndef CONFIG_SMP
#  define hard_smp_processor_id()	0
# endif

#endif /* CONFIG_X86_LOCAL_APIC */

G
Glauber Costa 已提交
219
#endif /* __ASSEMBLY__ */
H
H. Peter Anvin 已提交
220
#endif /* _ASM_X86_SMP_H */