提交 c524a1d8 编写于 作者: J Jens Axboe

alpha: convert to generic helpers for IPI function calls

This converts alpha to use the new helpers for smp_call_function() and
friends, and adds support for smp_call_function_single().
Signed-off-by: NJens Axboe <jens.axboe@oracle.com>
上级 f27b433e
...@@ -528,6 +528,7 @@ config ARCH_MAY_HAVE_PC_FDC ...@@ -528,6 +528,7 @@ config ARCH_MAY_HAVE_PC_FDC
config SMP config SMP
bool "Symmetric multi-processing support" bool "Symmetric multi-processing support"
depends on ALPHA_SABLE || ALPHA_LYNX || ALPHA_RAWHIDE || ALPHA_DP264 || ALPHA_WILDFIRE || ALPHA_TITAN || ALPHA_GENERIC || ALPHA_SHARK || ALPHA_MARVEL depends on ALPHA_SABLE || ALPHA_LYNX || ALPHA_RAWHIDE || ALPHA_DP264 || ALPHA_WILDFIRE || ALPHA_TITAN || ALPHA_GENERIC || ALPHA_SHARK || ALPHA_MARVEL
select USE_GENERIC_SMP_HELPERS
---help--- ---help---
This enables support for systems with more than one CPU. If you have This enables support for systems with more than one CPU. If you have
a system with only one CPU, like most personal computers, say N. If a system with only one CPU, like most personal computers, say N. If
......
...@@ -660,9 +660,9 @@ __marvel_rtc_io(u8 b, unsigned long addr, int write) ...@@ -660,9 +660,9 @@ __marvel_rtc_io(u8 b, unsigned long addr, int write)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
if (smp_processor_id() != boot_cpuid) if (smp_processor_id() != boot_cpuid)
smp_call_function_on_cpu(__marvel_access_rtc, smp_call_function_single(boot_cpuid,
&rtc_access, 1, 1, __marvel_access_rtc,
cpumask_of_cpu(boot_cpuid)); &rtc_access, 1, 1);
else else
__marvel_access_rtc(&rtc_access); __marvel_access_rtc(&rtc_access);
#else #else
......
...@@ -62,6 +62,7 @@ static struct { ...@@ -62,6 +62,7 @@ static struct {
enum ipi_message_type { enum ipi_message_type {
IPI_RESCHEDULE, IPI_RESCHEDULE,
IPI_CALL_FUNC, IPI_CALL_FUNC,
IPI_CALL_FUNC_SINGLE,
IPI_CPU_STOP, IPI_CPU_STOP,
}; };
...@@ -558,51 +559,6 @@ send_ipi_message(cpumask_t to_whom, enum ipi_message_type operation) ...@@ -558,51 +559,6 @@ send_ipi_message(cpumask_t to_whom, enum ipi_message_type operation)
wripir(i); wripir(i);
} }
/* Structure and data for smp_call_function. This is designed to
minimize static memory requirements. Plus it looks cleaner. */
struct smp_call_struct {
void (*func) (void *info);
void *info;
long wait;
atomic_t unstarted_count;
atomic_t unfinished_count;
};
static struct smp_call_struct *smp_call_function_data;
/* Atomicly drop data into a shared pointer. The pointer is free if
it is initially locked. If retry, spin until free. */
static int
pointer_lock (void *lock, void *data, int retry)
{
void *old, *tmp;
mb();
again:
/* Compare and swap with zero. */
asm volatile (
"1: ldq_l %0,%1\n"
" mov %3,%2\n"
" bne %0,2f\n"
" stq_c %2,%1\n"
" beq %2,1b\n"
"2:"
: "=&r"(old), "=m"(*(void **)lock), "=&r"(tmp)
: "r"(data)
: "memory");
if (old == 0)
return 0;
if (! retry)
return -EBUSY;
while (*(void **)lock)
barrier();
goto again;
}
void void
handle_ipi(struct pt_regs *regs) handle_ipi(struct pt_regs *regs)
{ {
...@@ -632,31 +588,12 @@ handle_ipi(struct pt_regs *regs) ...@@ -632,31 +588,12 @@ handle_ipi(struct pt_regs *regs)
break; break;
case IPI_CALL_FUNC: case IPI_CALL_FUNC:
{ generic_smp_call_function_interrupt();
struct smp_call_struct *data; break;
void (*func)(void *info);
void *info; case IPI_CALL_FUNC_SINGLE:
int wait; generic_smp_call_function_single_interrupt();
data = smp_call_function_data;
func = data->func;
info = data->info;
wait = data->wait;
/* Notify the sending CPU that the data has been
received, and execution is about to begin. */
mb();
atomic_dec (&data->unstarted_count);
/* At this point the structure may be gone unless
wait is true. */
(*func)(info);
/* Notify the sending CPU that the task is done. */
mb();
if (wait) atomic_dec (&data->unfinished_count);
break; break;
}
case IPI_CPU_STOP: case IPI_CPU_STOP:
halt(); halt();
...@@ -700,102 +637,15 @@ smp_send_stop(void) ...@@ -700,102 +637,15 @@ smp_send_stop(void)
send_ipi_message(to_whom, IPI_CPU_STOP); send_ipi_message(to_whom, IPI_CPU_STOP);
} }
/* void arch_send_call_function_ipi(cpumask_t mask)
* Run a function on all other CPUs.
* <func> The function to run. This must be fast and non-blocking.
* <info> An arbitrary pointer to pass to the function.
* <retry> If true, keep retrying until ready.
* <wait> If true, wait until function has completed on other CPUs.
* [RETURNS] 0 on success, else a negative status code.
*
* Does not return until remote CPUs are nearly ready to execute <func>
* or are or have executed.
* You must not call this function with disabled interrupts or from a
* hardware interrupt handler or from a bottom half handler.
*/
int
smp_call_function_on_cpu (void (*func) (void *info), void *info, int retry,
int wait, cpumask_t to_whom)
{ {
struct smp_call_struct data; send_ipi_message(mask, IPI_CALL_FUNC);
unsigned long timeout;
int num_cpus_to_call;
/* Can deadlock when called with interrupts disabled */
WARN_ON(irqs_disabled());
data.func = func;
data.info = info;
data.wait = wait;
cpu_clear(smp_processor_id(), to_whom);
num_cpus_to_call = cpus_weight(to_whom);
atomic_set(&data.unstarted_count, num_cpus_to_call);
atomic_set(&data.unfinished_count, num_cpus_to_call);
/* Acquire the smp_call_function_data mutex. */
if (pointer_lock(&smp_call_function_data, &data, retry))
return -EBUSY;
/* Send a message to the requested CPUs. */
send_ipi_message(to_whom, IPI_CALL_FUNC);
/* Wait for a minimal response. */
timeout = jiffies + HZ;
while (atomic_read (&data.unstarted_count) > 0
&& time_before (jiffies, timeout))
barrier();
/* If there's no response yet, log a message but allow a longer
* timeout period -- if we get a response this time, log
* a message saying when we got it..
*/
if (atomic_read(&data.unstarted_count) > 0) {
long start_time = jiffies;
printk(KERN_ERR "%s: initial timeout -- trying long wait\n",
__func__);
timeout = jiffies + 30 * HZ;
while (atomic_read(&data.unstarted_count) > 0
&& time_before(jiffies, timeout))
barrier();
if (atomic_read(&data.unstarted_count) <= 0) {
long delta = jiffies - start_time;
printk(KERN_ERR
"%s: response %ld.%ld seconds into long wait\n",
__func__, delta / HZ,
(100 * (delta - ((delta / HZ) * HZ))) / HZ);
}
}
/* We either got one or timed out -- clear the lock. */
mb();
smp_call_function_data = NULL;
/*
* If after both the initial and long timeout periods we still don't
* have a response, something is very wrong...
*/
BUG_ON(atomic_read (&data.unstarted_count) > 0);
/* Wait for a complete response, if needed. */
if (wait) {
while (atomic_read (&data.unfinished_count) > 0)
barrier();
}
return 0;
} }
EXPORT_SYMBOL(smp_call_function_on_cpu);
int void arch_send_call_function_single_ipi(int cpu)
smp_call_function (void (*func) (void *info), void *info, int retry, int wait)
{ {
return smp_call_function_on_cpu (func, info, retry, wait, send_ipi_message(cpumask_of_cpu(cpu), IPI_CALL_FUNC_SINGLE);
cpu_online_map);
} }
EXPORT_SYMBOL(smp_call_function);
static void static void
ipi_imb(void *ignored) ipi_imb(void *ignored)
......
...@@ -47,7 +47,8 @@ extern struct cpuinfo_alpha cpu_data[NR_CPUS]; ...@@ -47,7 +47,8 @@ extern struct cpuinfo_alpha cpu_data[NR_CPUS];
extern int smp_num_cpus; extern int smp_num_cpus;
#define cpu_possible_map cpu_present_map #define cpu_possible_map cpu_present_map
int smp_call_function_on_cpu(void (*func) (void *info), void *info,int retry, int wait, cpumask_t cpu); extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi(cpumask_t mask);
#else /* CONFIG_SMP */ #else /* CONFIG_SMP */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册