提交 d172ad18 编写于 作者: D David S. Miller

sparc64: Convert to generic helpers for IPI function calls.

Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 4fe3ebec
......@@ -16,6 +16,7 @@ config SPARC64
select HAVE_IDE
select HAVE_LMB
select HAVE_ARCH_KGDB
select USE_GENERIC_SMP_HELPERS if SMP
config GENERIC_TIME
bool
......
......@@ -788,89 +788,36 @@ static void smp_start_sync_tick_client(int cpu)
0, 0, 0, mask);
}
/* Send cross call to all processors except self. */
#define smp_cross_call(func, ctx, data1, data2) \
smp_cross_call_masked(func, ctx, data1, data2, cpu_online_map)
struct call_data_struct {
void (*func) (void *info);
void *info;
atomic_t finished;
int wait;
};
static struct call_data_struct *call_data;
extern unsigned long xcall_call_function;
/**
* smp_call_function(): Run a function on all other CPUs.
* @func: The function to run. This must be fast and non-blocking.
* @info: An arbitrary pointer to pass to the function.
* @wait: If true, wait (atomically) until function has completed on other CPUs.
*
* Returns 0 on success, else a negative status code. Does not return until
* remote CPUs are nearly ready to execute <<func>> or are or have executed.
*
* You must not call this function with disabled interrupts or from a
* hardware interrupt handler or from a bottom half handler.
*/
static int sparc64_smp_call_function_mask(void (*func)(void *info), void *info,
int wait, cpumask_t mask)
void arch_send_call_function_ipi(cpumask_t mask)
{
struct call_data_struct data;
int cpus;
/* Can deadlock when called with interrupts disabled */
WARN_ON(irqs_disabled());
data.func = func;
data.info = info;
atomic_set(&data.finished, 0);
data.wait = wait;
spin_lock(&call_lock);
cpu_clear(smp_processor_id(), mask);
cpus = cpus_weight(mask);
if (!cpus)
goto out_unlock;
call_data = &data;
mb();
smp_cross_call_masked(&xcall_call_function, 0, 0, 0, mask);
}
/* Wait for response */
while (atomic_read(&data.finished) != cpus)
cpu_relax();
extern unsigned long xcall_call_function_single;
out_unlock:
spin_unlock(&call_lock);
void arch_send_call_function_single_ipi(int cpu)
{
cpumask_t mask = cpumask_of_cpu(cpu);
return 0;
smp_cross_call_masked(&xcall_call_function_single, 0, 0, 0, mask);
}
int smp_call_function(void (*func)(void *info), void *info, int wait)
{
return sparc64_smp_call_function_mask(func, info, wait, cpu_online_map);
}
/* Send cross call to all processors except self. */
#define smp_cross_call(func, ctx, data1, data2) \
smp_cross_call_masked(func, ctx, data1, data2, cpu_online_map)
void smp_call_function_client(int irq, struct pt_regs *regs)
{
void (*func) (void *info) = call_data->func;
void *info = call_data->info;
clear_softint(1 << irq);
generic_smp_call_function_interrupt();
}
void smp_call_function_single_client(int irq, struct pt_regs *regs)
{
clear_softint(1 << irq);
if (call_data->wait) {
/* let initiator proceed only after completion */
func(info);
atomic_inc(&call_data->finished);
} else {
/* let initiator proceed after getting data */
atomic_inc(&call_data->finished);
func(info);
}
generic_smp_call_function_single_interrupt();
}
static void tsb_sync(void *info)
......@@ -890,7 +837,7 @@ static void tsb_sync(void *info)
void smp_tsb_sync(struct mm_struct *mm)
{
sparc64_smp_call_function_mask(tsb_sync, mm, 1, mm->cpu_vm_mask);
smp_call_function_mask(mm->cpu_vm_mask, tsb_sync, mm, 1);
}
extern unsigned long xcall_flush_tlb_mm;
......
......@@ -108,8 +108,6 @@ EXPORT_SYMBOL(__read_unlock);
EXPORT_SYMBOL(__write_lock);
EXPORT_SYMBOL(__write_unlock);
EXPORT_SYMBOL(__write_trylock);
EXPORT_SYMBOL(smp_call_function);
#endif /* CONFIG_SMP */
#ifdef CONFIG_MCOUNT
......
......@@ -58,7 +58,12 @@ tl0_irq3: BTRAP(0x43)
tl0_irq4: BTRAP(0x44)
#endif
tl0_irq5: TRAP_IRQ(handler_irq, 5)
tl0_irq6: BTRAP(0x46) BTRAP(0x47) BTRAP(0x48) BTRAP(0x49)
#ifdef CONFIG_SMP
tl0_irq6: TRAP_IRQ(smp_call_function_single_client, 6)
#else
tl0_irq6: BTRAP(0x46)
#endif
tl0_irq7: BTRAP(0x47) BTRAP(0x48) BTRAP(0x49)
tl0_irq10: BTRAP(0x4a) BTRAP(0x4b) BTRAP(0x4c) BTRAP(0x4d)
tl0_irq14: TRAP_IRQ(timer_interrupt, 14)
tl0_irq15: TRAP_IRQ(handler_irq, 15)
......
......@@ -688,6 +688,11 @@ xcall_call_function:
wr %g0, (1 << PIL_SMP_CALL_FUNC), %set_softint
retry
.globl xcall_call_function_single
xcall_call_function_single:
wr %g0, (1 << PIL_SMP_CALL_FUNC_SNGL), %set_softint
retry
.globl xcall_receive_signal
xcall_receive_signal:
wr %g0, (1 << PIL_SMP_RECEIVE_SIGNAL), %set_softint
......
......@@ -17,5 +17,6 @@
#define PIL_SMP_CAPTURE 3
#define PIL_SMP_CTX_NEW_VERSION 4
#define PIL_DEVICE_IRQ 5
#define PIL_SMP_CALL_FUNC_SNGL 6
#endif /* !(_SPARC64_PIL_H) */
......@@ -34,6 +34,9 @@ DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
extern cpumask_t cpu_core_map[NR_CPUS];
extern int sparc64_multi_core;
extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi(cpumask_t mask);
/*
* General functions that each host system must provide.
*/
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册