提交 dbcf4787 编写于 作者: J Jens Axboe

parisc: convert to generic helpers for IPI function calls

This converts parisc to use the new helpers for smp_call_function() and
friends, and adds support for smp_call_function_single(). Tested by
Kyle, seems to work.

Cc: Matthew Wilcox <matthew@wil.cx>
Cc: Grant Grundler <grundler@parisc-linux.org>
Signed-off-by: NKyle McMartin <kyle@mcmartin.ca>
Signed-off-by: NJens Axboe <jens.axboe@oracle.com>
上级 2f304c0a
...@@ -199,6 +199,7 @@ endchoice ...@@ -199,6 +199,7 @@ endchoice
config SMP config SMP
bool "Symmetric multi-processing support" bool "Symmetric multi-processing support"
select USE_GENERIC_SMP_HELPERS
---help--- ---help---
This enables support for systems with more than one CPU. If you have This enables support for systems with more than one CPU. If you have
a system with only one CPU, like most personal computers, say N. If a system with only one CPU, like most personal computers, say N. If
......
...@@ -84,19 +84,11 @@ EXPORT_SYMBOL(cpu_possible_map); ...@@ -84,19 +84,11 @@ EXPORT_SYMBOL(cpu_possible_map);
DEFINE_PER_CPU(spinlock_t, ipi_lock) = SPIN_LOCK_UNLOCKED; DEFINE_PER_CPU(spinlock_t, ipi_lock) = SPIN_LOCK_UNLOCKED;
struct smp_call_struct {
void (*func) (void *info);
void *info;
long wait;
atomic_t unstarted_count;
atomic_t unfinished_count;
};
static volatile struct smp_call_struct *smp_call_function_data;
enum ipi_message_type { enum ipi_message_type {
IPI_NOP=0, IPI_NOP=0,
IPI_RESCHEDULE=1, IPI_RESCHEDULE=1,
IPI_CALL_FUNC, IPI_CALL_FUNC,
IPI_CALL_FUNC_SINGLE,
IPI_CPU_START, IPI_CPU_START,
IPI_CPU_STOP, IPI_CPU_STOP,
IPI_CPU_TEST IPI_CPU_TEST
...@@ -187,33 +179,12 @@ ipi_interrupt(int irq, void *dev_id) ...@@ -187,33 +179,12 @@ ipi_interrupt(int irq, void *dev_id)
case IPI_CALL_FUNC: case IPI_CALL_FUNC:
smp_debug(100, KERN_DEBUG "CPU%d IPI_CALL_FUNC\n", this_cpu); smp_debug(100, KERN_DEBUG "CPU%d IPI_CALL_FUNC\n", this_cpu);
{ generic_smp_call_function_interrupt();
volatile struct smp_call_struct *data; break;
void (*func)(void *info);
void *info; case IPI_CALL_FUNC_SINGLE:
int wait; smp_debug(100, KERN_DEBUG "CPU%d IPI_CALL_FUNC_SINGLE\n", this_cpu);
generic_smp_call_function_single_interrupt();
data = smp_call_function_data;
func = data->func;
info = data->info;
wait = data->wait;
mb();
atomic_dec ((atomic_t *)&data->unstarted_count);
/* At this point, *data can't
* be relied upon.
*/
(*func)(info);
/* Notify the sending CPU that the
* task is done.
*/
mb();
if (wait)
atomic_dec ((atomic_t *)&data->unfinished_count);
}
break; break;
case IPI_CPU_START: case IPI_CPU_START:
...@@ -256,6 +227,14 @@ ipi_send(int cpu, enum ipi_message_type op) ...@@ -256,6 +227,14 @@ ipi_send(int cpu, enum ipi_message_type op)
spin_unlock_irqrestore(lock, flags); spin_unlock_irqrestore(lock, flags);
} }
static void
send_IPI_mask(cpumask_t mask, enum ipi_message_type op)
{
int cpu;
for_each_cpu_mask(cpu, mask)
ipi_send(cpu, op);
}
static inline void static inline void
send_IPI_single(int dest_cpu, enum ipi_message_type op) send_IPI_single(int dest_cpu, enum ipi_message_type op)
...@@ -295,86 +274,15 @@ smp_send_all_nop(void) ...@@ -295,86 +274,15 @@ smp_send_all_nop(void)
send_IPI_allbutself(IPI_NOP); send_IPI_allbutself(IPI_NOP);
} }
void arch_send_call_function_ipi(cpumask_t mask)
/**
* Run a function on all other CPUs.
* <func> The function to run. This must be fast and non-blocking.
* <info> An arbitrary pointer to pass to the function.
* <retry> If true, keep retrying until ready.
* <wait> If true, wait until function has completed on other CPUs.
* [RETURNS] 0 on success, else a negative status code.
*
* Does not return until remote CPUs are nearly ready to execute <func>
* or have executed.
*/
int
smp_call_function (void (*func) (void *info), void *info, int retry, int wait)
{ {
struct smp_call_struct data; send_IPI_mask(mask, IPI_CALL_FUNC);
unsigned long timeout;
static DEFINE_SPINLOCK(lock);
int retries = 0;
if (num_online_cpus() < 2)
return 0;
/* Can deadlock when called with interrupts disabled */
WARN_ON(irqs_disabled());
/* can also deadlock if IPIs are disabled */
WARN_ON((get_eiem() & (1UL<<(CPU_IRQ_MAX - IPI_IRQ))) == 0);
data.func = func;
data.info = info;
data.wait = wait;
atomic_set(&data.unstarted_count, num_online_cpus() - 1);
atomic_set(&data.unfinished_count, num_online_cpus() - 1);
if (retry) {
spin_lock (&lock);
while (smp_call_function_data != 0)
barrier();
}
else {
spin_lock (&lock);
if (smp_call_function_data) {
spin_unlock (&lock);
return -EBUSY;
}
}
smp_call_function_data = &data;
spin_unlock (&lock);
/* Send a message to all other CPUs and wait for them to respond */
send_IPI_allbutself(IPI_CALL_FUNC);
retry:
/* Wait for response */
timeout = jiffies + HZ;
while ( (atomic_read (&data.unstarted_count) > 0) &&
time_before (jiffies, timeout) )
barrier ();
if (atomic_read (&data.unstarted_count) > 0) {
printk(KERN_CRIT "SMP CALL FUNCTION TIMED OUT! (cpu=%d), try %d\n",
smp_processor_id(), ++retries);
goto retry;
}
/* We either got one or timed out. Release the lock */
mb();
smp_call_function_data = NULL;
while (wait && atomic_read (&data.unfinished_count) > 0)
barrier ();
return 0;
} }
EXPORT_SYMBOL(smp_call_function); void arch_send_call_function_single_ipi(int cpu)
{
send_IPI_single(cpu, IPI_CALL_FUNC_SINGLE);
}
/* /*
* Flush all other CPU's tlb and then mine. Do this with on_each_cpu() * Flush all other CPU's tlb and then mine. Do this with on_each_cpu()
......
...@@ -30,6 +30,9 @@ extern cpumask_t cpu_online_map; ...@@ -30,6 +30,9 @@ extern cpumask_t cpu_online_map;
extern void smp_send_reschedule(int cpu); extern void smp_send_reschedule(int cpu);
extern void smp_send_all_nop(void); extern void smp_send_all_nop(void);
extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi(cpumask_t mask);
#endif /* !ASSEMBLY */ #endif /* !ASSEMBLY */
/* /*
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册