提交 8678969e 编写于 作者: G Glauber Costa 提交者: Ingo Molnar

x86: merge smp_send_reschedule

function definition is moved to common header, x86_64 version is now called
native_smp_send_reschedule
Signed-off-by: NGlauber Costa <gcosta@redhat.com>
Signed-off-by: NIngo Molnar <mingo@elte.hu>
上级 c76cb368
......@@ -290,8 +290,9 @@ void flush_tlb_all(void)
* anything. Worst case is that we lose a reschedule ...
*/
void smp_send_reschedule(int cpu)
static void native_smp_send_reschedule(int cpu)
{
WARN_ON(cpu_is_offline(cpu));
send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
}
......@@ -528,5 +529,7 @@ asmlinkage void smp_call_function_interrupt(void)
}
}
struct smp_ops smp_ops;
struct smp_ops smp_ops = {
.smp_send_reschedule = native_smp_send_reschedule,
};
EXPORT_SYMBOL_GPL(smp_ops);
......@@ -23,6 +23,11 @@ struct smp_ops {
#ifdef CONFIG_SMP
extern struct smp_ops smp_ops;
static inline void smp_send_reschedule(int cpu)
{
smp_ops.smp_send_reschedule(cpu);
}
#endif
#ifdef CONFIG_X86_32
......
......@@ -60,10 +60,6 @@ static inline void smp_send_stop(void)
{
smp_ops.smp_send_stop();
}
static inline void smp_send_reschedule(int cpu)
{
smp_ops.smp_send_reschedule(cpu);
}
static inline int smp_call_function_mask(cpumask_t mask,
void (*func) (void *info), void *info,
int wait)
......
......@@ -65,8 +65,6 @@ static inline int num_booting_cpus(void)
return cpus_weight(cpu_callout_map);
}
extern void smp_send_reschedule(int cpu);
#else /* CONFIG_SMP */
extern unsigned int boot_cpu_id;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册