diff --git a/arch/x86/kernel/smp_32.c b/arch/x86/kernel/smp_32.c index dc0cde9d16fb38088587b664f02b91f878176628..e4a6b669a0b824fbceb82169aba5535f02eab57a 100644 --- a/arch/x86/kernel/smp_32.c +++ b/arch/x86/kernel/smp_32.c @@ -583,7 +583,7 @@ native_smp_call_function_mask(cpumask_t mask, atomic_set(&data.finished, 0); call_data = &data; - mb(); + wmb(); /* Send a message to other CPUs */ if (cpus_equal(mask, allbutself)) diff --git a/arch/x86/kernel/smp_64.c b/arch/x86/kernel/smp_64.c index aa2edb7f3a5147de61bf9ca0dad6719d90609908..e4494e829dfadb39f87c8f7f728c76b93358bcf5 100644 --- a/arch/x86/kernel/smp_64.c +++ b/arch/x86/kernel/smp_64.c @@ -354,26 +354,30 @@ static void __smp_call_function(void (*func) (void *info), void *info, } -/* - * this function sends a 'generic call function' IPI to all other CPU - * of the system defined in the mask. - */ -static int __smp_call_function_mask(cpumask_t mask, - void (*func)(void *), void *info, - int wait) +int native_smp_call_function_mask(cpumask_t mask, + void (*func)(void *), void *info, + int wait) { struct call_data_struct data; cpumask_t allbutself; int cpus; + /* Can deadlock when called with interrupts disabled */ + WARN_ON(irqs_disabled()); + + /* Holding any lock stops cpus from going down. */ + spin_lock(&call_lock); + allbutself = cpu_online_map; cpu_clear(smp_processor_id(), allbutself); cpus_and(mask, mask, allbutself); cpus = cpus_weight(mask); - if (!cpus) + if (!cpus) { + spin_unlock(&call_lock); return 0; + } data.func = func; data.info = info; @@ -395,43 +399,14 @@ static int __smp_call_function_mask(cpumask_t mask, while (atomic_read(&data.started) != cpus) cpu_relax(); - if (!wait) - return 0; + if (wait) + while (atomic_read(&data.finished) != cpus) + cpu_relax(); - while (atomic_read(&data.finished) != cpus) - cpu_relax(); + spin_unlock(&call_lock); return 0; } -/** - * smp_call_function_mask(): Run a function on a set of other CPUs. - * @mask: The set of cpus to run on. Must not include the current cpu. - * @func: The function to run. This must be fast and non-blocking. - * @info: An arbitrary pointer to pass to the function. - * @wait: If true, wait (atomically) until function has completed on other CPUs. - * - * Returns 0 on success, else a negative status code. - * - * If @wait is true, then returns once @func has returned; otherwise - * it returns just before the target cpu calls @func. - * - * You must not call this function with disabled interrupts or from a - * hardware interrupt handler or from a bottom half handler. - */ -int native_smp_call_function_mask(cpumask_t mask, - void (*func)(void *), void *info, - int wait) -{ - int ret; - - /* Can deadlock when called with interrupts disabled */ - WARN_ON(irqs_disabled()); - - spin_lock(&call_lock); - ret = __smp_call_function_mask(mask, func, info, wait); - spin_unlock(&call_lock); - return ret; -} static void stop_this_cpu(void *dummy) {