提交 48a048fe 编写于 作者: R Rusty Russell

cpumask: arch_send_call_function_ipi_mask: mips

We're weaning the core code off handing cpumask's around on-stack.
This introduces arch_send_call_function_ipi_mask(), and by defining
it, the old arch_send_call_function_ipi is defined by the core code.

We also take the chance to wean the implementations off the
obsolescent for_each_cpu_mask(): making send_ipi_mask take the pointer
seemed the most natural way to ensure all implementations used
for_each_cpu.
Signed-off-by: NRusty Russell <rusty@rustcorp.com.au>
上级 c2a3a488
......@@ -19,7 +19,7 @@ struct task_struct;
struct plat_smp_ops {
void (*send_ipi_single)(int cpu, unsigned int action);
void (*send_ipi_mask)(cpumask_t mask, unsigned int action);
void (*send_ipi_mask)(const struct cpumask *mask, unsigned int action);
void (*init_secondary)(void);
void (*smp_finish)(void);
void (*cpus_done)(void);
......
......@@ -78,6 +78,7 @@ extern void play_dead(void);
extern asmlinkage void smp_call_function_interrupt(void);
extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi(cpumask_t mask);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
#define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask
#endif /* __ASM_SMP_H */
......@@ -80,11 +80,11 @@ void cmp_send_ipi_single(int cpu, unsigned int action)
local_irq_restore(flags);
}
static void cmp_send_ipi_mask(cpumask_t mask, unsigned int action)
static void cmp_send_ipi_mask(const struct cpumask *mask, unsigned int action)
{
unsigned int i;
for_each_cpu_mask(i, mask)
for_each_cpu(i, mask)
cmp_send_ipi_single(i, action);
}
......
......@@ -141,11 +141,11 @@ static void vsmp_send_ipi_single(int cpu, unsigned int action)
local_irq_restore(flags);
}
static void vsmp_send_ipi_mask(cpumask_t mask, unsigned int action)
static void vsmp_send_ipi_mask(const struct cpumask *mask, unsigned int action)
{
unsigned int i;
for_each_cpu_mask(i, mask)
for_each_cpu(i, mask)
vsmp_send_ipi_single(i, action);
}
......
......@@ -18,7 +18,8 @@ static void up_send_ipi_single(int cpu, unsigned int action)
panic(KERN_ERR "%s called", __func__);
}
static inline void up_send_ipi_mask(cpumask_t mask, unsigned int action)
static inline void up_send_ipi_mask(const struct cpumask *mask,
unsigned int action)
{
panic(KERN_ERR "%s called", __func__);
}
......
......@@ -128,7 +128,7 @@ asmlinkage __cpuinit void start_secondary(void)
cpu_idle();
}
void arch_send_call_function_ipi(cpumask_t mask)
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
mp_ops->send_ipi_mask(mask, SMP_CALL_FUNCTION);
}
......
......@@ -43,11 +43,12 @@ static void ssmtc_send_ipi_single(int cpu, unsigned int action)
/* "CPU" may be TC of same VPE, VPE of same CPU, or different CPU */
}
static inline void ssmtc_send_ipi_mask(cpumask_t mask, unsigned int action)
static inline void ssmtc_send_ipi_mask(const struct cpumask *mask,
unsigned int action)
{
unsigned int i;
for_each_cpu_mask(i, mask)
for_each_cpu(i, mask)
ssmtc_send_ipi_single(i, action);
}
......
......@@ -21,11 +21,11 @@ static void msmtc_send_ipi_single(int cpu, unsigned int action)
smtc_send_ipi(cpu, LINUX_SMP_IPI, action);
}
static void msmtc_send_ipi_mask(cpumask_t mask, unsigned int action)
static void msmtc_send_ipi_mask(const struct cpumask *mask, unsigned int action)
{
unsigned int i;
for_each_cpu_mask(i, mask)
for_each_cpu(i, mask)
msmtc_send_ipi_single(i, action);
}
......
......@@ -97,11 +97,11 @@ static void yos_send_ipi_single(int cpu, unsigned int action)
}
}
static void yos_send_ipi_mask(cpumask_t mask, unsigned int action)
static void yos_send_ipi_mask(const struct cpumask *mask, unsigned int action)
{
unsigned int i;
for_each_cpu_mask(i, mask)
for_each_cpu(i, mask)
yos_send_ipi_single(i, action);
}
......
......@@ -165,11 +165,11 @@ static void ip27_send_ipi_single(int destid, unsigned int action)
REMOTE_HUB_SEND_INTR(COMPACT_TO_NASID_NODEID(cpu_to_node(destid)), irq);
}
static void ip27_send_ipi_mask(cpumask_t mask, unsigned int action)
static void ip27_send_ipi(const struct cpumask *mask, unsigned int action)
{
unsigned int i;
for_each_cpu_mask(i, mask)
for_each_cpu(i, mask)
ip27_send_ipi_single(i, action);
}
......
......@@ -82,11 +82,12 @@ static void bcm1480_send_ipi_single(int cpu, unsigned int action)
__raw_writeq((((u64)action)<< 48), mailbox_0_set_regs[cpu]);
}
static void bcm1480_send_ipi_mask(cpumask_t mask, unsigned int action)
static void bcm1480_send_ipi_mask(const struct cpumask *mask,
unsigned int action)
{
unsigned int i;
for_each_cpu_mask(i, mask)
for_each_cpu(i, mask)
bcm1480_send_ipi_single(i, action);
}
......
......@@ -70,11 +70,12 @@ static void sb1250_send_ipi_single(int cpu, unsigned int action)
__raw_writeq((((u64)action) << 48), mailbox_set_regs[cpu]);
}
static inline void sb1250_send_ipi_mask(cpumask_t mask, unsigned int action)
static inline void sb1250_send_ipi_mask(const struct cpumask *mask,
unsigned int action)
{
unsigned int i;
for_each_cpu_mask(i, mask)
for_each_cpu(i, mask)
sb1250_send_ipi_single(i, action);
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册