提交 78eef01b 编写于 作者: A Andrew Morton 提交者: Linus Torvalds

[PATCH] on_each_cpu(): disable local interrupts

When on_each_cpu() runs the callback on other CPUs, it runs with local
interrupts disabled.  So we should run the function with local interrupts
disabled on this CPU, too.

And do the same for UP, so the callback is run in the same environment on both
UP and SMP.  (strictly it should do preempt_disable() too, but I think
local_irq_disable is sufficiently equivalent).

Also uninlines on_each_cpu().  softirq.c was the most appropriate file I could
find, but it doesn't seem to justify creating a new file.

Oh, and fix up that comment over (under?) x86's smp_call_function().  It
drives me nuts.
Signed-off-by: NAndrew Morton <akpm@osdl.org>
Signed-off-by: NLinus Torvalds <torvalds@osdl.org>
上级 ac2b898c
...@@ -504,27 +504,23 @@ void unlock_ipi_call_lock(void) ...@@ -504,27 +504,23 @@ void unlock_ipi_call_lock(void)
spin_unlock_irq(&call_lock); spin_unlock_irq(&call_lock);
} }
static struct call_data_struct * call_data; static struct call_data_struct *call_data;
/* /**
* this function sends a 'generic call function' IPI to all other CPUs * smp_call_function(): Run a function on all other CPUs.
* in the system. * @func: The function to run. This must be fast and non-blocking.
*/ * @info: An arbitrary pointer to pass to the function.
* @nonatomic: currently unused.
int smp_call_function (void (*func) (void *info), void *info, int nonatomic, * @wait: If true, wait (atomically) until function has completed on other CPUs.
int wait) *
/* * Returns 0 on success, else a negative status code. Does not return until
* [SUMMARY] Run a function on all other CPUs.
* <func> The function to run. This must be fast and non-blocking.
* <info> An arbitrary pointer to pass to the function.
* <nonatomic> currently unused.
* <wait> If true, wait (atomically) until function has completed on other CPUs.
* [RETURNS] 0 on success, else a negative status code. Does not return until
* remote CPUs are nearly ready to execute <<func>> or are or have executed. * remote CPUs are nearly ready to execute <<func>> or are or have executed.
* *
* You must not call this function with disabled interrupts or from a * You must not call this function with disabled interrupts or from a
* hardware interrupt handler or from a bottom half handler. * hardware interrupt handler or from a bottom half handler.
*/ */
int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
int wait)
{ {
struct call_data_struct data; struct call_data_struct data;
int cpus; int cpus;
......
...@@ -52,23 +52,12 @@ extern void smp_cpus_done(unsigned int max_cpus); ...@@ -52,23 +52,12 @@ extern void smp_cpus_done(unsigned int max_cpus);
/* /*
* Call a function on all other processors * Call a function on all other processors
*/ */
extern int smp_call_function (void (*func) (void *info), void *info, int smp_call_function(void(*func)(void *info), void *info, int retry, int wait);
int retry, int wait);
/* /*
* Call a function on all processors * Call a function on all processors
*/ */
static inline int on_each_cpu(void (*func) (void *info), void *info, int on_each_cpu(void (*func) (void *info), void *info, int retry, int wait);
int retry, int wait)
{
int ret = 0;
preempt_disable();
ret = smp_call_function(func, info, retry, wait);
func(info);
preempt_enable();
return ret;
}
#define MSG_ALL_BUT_SELF 0x8000 /* Assume <32768 CPU's */ #define MSG_ALL_BUT_SELF 0x8000 /* Assume <32768 CPU's */
#define MSG_ALL 0x8001 #define MSG_ALL 0x8001
...@@ -94,7 +83,13 @@ void smp_prepare_boot_cpu(void); ...@@ -94,7 +83,13 @@ void smp_prepare_boot_cpu(void);
#define raw_smp_processor_id() 0 #define raw_smp_processor_id() 0
#define hard_smp_processor_id() 0 #define hard_smp_processor_id() 0
#define smp_call_function(func,info,retry,wait) ({ 0; }) #define smp_call_function(func,info,retry,wait) ({ 0; })
#define on_each_cpu(func,info,retry,wait) ({ func(info); 0; }) #define on_each_cpu(func,info,retry,wait) \
({ \
local_irq_disable(); \
func(info); \
local_irq_enable(); \
0; \
})
static inline void smp_send_reschedule(int cpu) { } static inline void smp_send_reschedule(int cpu) { }
#define num_booting_cpus() 1 #define num_booting_cpus() 1
#define smp_prepare_boot_cpu() do {} while (0) #define smp_prepare_boot_cpu() do {} while (0)
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include <linux/cpu.h> #include <linux/cpu.h>
#include <linux/kthread.h> #include <linux/kthread.h>
#include <linux/rcupdate.h> #include <linux/rcupdate.h>
#include <linux/smp.h>
#include <asm/irq.h> #include <asm/irq.h>
/* /*
...@@ -495,3 +496,22 @@ __init int spawn_ksoftirqd(void) ...@@ -495,3 +496,22 @@ __init int spawn_ksoftirqd(void)
register_cpu_notifier(&cpu_nfb); register_cpu_notifier(&cpu_nfb);
return 0; return 0;
} }
#ifdef CONFIG_SMP
/*
* Call a function on all processors
*/
int on_each_cpu(void (*func) (void *info), void *info, int retry, int wait)
{
int ret = 0;
preempt_disable();
ret = smp_call_function(func, info, retry, wait);
local_irq_disable();
func(info);
local_irq_enable();
preempt_enable();
return ret;
}
EXPORT_SYMBOL(on_each_cpu);
#endif
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册