提交 c6d2b22e 编写于 作者: D David Daney 提交者: Ralf Baechle

MIPS: OCTEON: Add SMP support for OCTEON cn78xx et al.

OCTEON chips with the CIU3 interrupt controller use a different IPI
mechanism that previous models.

Add plat_smp_ops for the cn78xx and probing code to choose between the
two types of ops.
Signed-off-by: NDavid Daney <david.daney@cavium.com>
Cc: linux-mips@linux-mips.org
Cc: linux-kernel@vger.kernel.org
Patchwork: https://patchwork.linux-mips.org/patch/12499/Signed-off-by: NRalf Baechle <ralf@linux-mips.org>
上级 ce210d35
......@@ -43,8 +43,6 @@
#include <asm/octeon/cvmx-mio-defs.h>
#include <asm/octeon/cvmx-rst-defs.h>
extern struct plat_smp_ops octeon_smp_ops;
#ifdef CONFIG_PCI
extern void pci_console_init(const char *arg);
#endif
......@@ -888,7 +886,7 @@ void __init prom_init(void)
#endif
octeon_user_io_init();
register_smp_ops(&octeon_smp_ops);
octeon_setup_smp();
}
/* Exclude a single page from the regions obtained in plat_mem_setup. */
......
......@@ -30,25 +30,55 @@ uint64_t octeon_bootloader_entry_addr;
EXPORT_SYMBOL(octeon_bootloader_entry_addr);
#endif
static void octeon_icache_flush(void)
{
asm volatile ("synci 0($0)\n");
}
static void (*octeon_message_functions[8])(void) = {
scheduler_ipi,
generic_smp_call_function_interrupt,
octeon_icache_flush,
};
static irqreturn_t mailbox_interrupt(int irq, void *dev_id)
{
const int coreid = cvmx_get_core_num();
uint64_t action;
u64 mbox_clrx = CVMX_CIU_MBOX_CLRX(cvmx_get_core_num());
u64 action;
int i;
/*
* Make sure the function array initialization remains
* correct.
*/
BUILD_BUG_ON(SMP_RESCHEDULE_YOURSELF != (1 << 0));
BUILD_BUG_ON(SMP_CALL_FUNCTION != (1 << 1));
BUILD_BUG_ON(SMP_ICACHE_FLUSH != (1 << 2));
/*
* Load the mailbox register to figure out what we're supposed
* to do.
*/
action = cvmx_read_csr(mbox_clrx);
/* Load the mailbox register to figure out what we're supposed to do */
action = cvmx_read_csr(CVMX_CIU_MBOX_CLRX(coreid)) & 0xffff;
if (OCTEON_IS_MODEL(OCTEON_CN68XX))
action &= 0xff;
else
action &= 0xffff;
/* Clear the mailbox to clear the interrupt */
cvmx_write_csr(CVMX_CIU_MBOX_CLRX(coreid), action);
cvmx_write_csr(mbox_clrx, action);
if (action & SMP_CALL_FUNCTION)
generic_smp_call_function_interrupt();
if (action & SMP_RESCHEDULE_YOURSELF)
scheduler_ipi();
for (i = 0; i < ARRAY_SIZE(octeon_message_functions) && action;) {
if (action & 1) {
void (*fn)(void) = octeon_message_functions[i];
/* Check if we've been told to flush the icache */
if (action & SMP_ICACHE_FLUSH)
asm volatile ("synci 0($0)\n");
if (fn)
fn();
}
action >>= 1;
i++;
}
return IRQ_HANDLED;
}
......@@ -102,10 +132,10 @@ static void octeon_smp_setup(void)
const int coreid = cvmx_get_core_num();
int cpus;
int id;
int core_mask = octeon_get_boot_coremask();
struct cvmx_sysinfo *sysinfo = cvmx_sysinfo_get();
#ifdef CONFIG_HOTPLUG_CPU
int core_mask = octeon_get_boot_coremask();
unsigned int num_cores = cvmx_octeon_num_cores();
#endif
......@@ -390,3 +420,92 @@ struct plat_smp_ops octeon_smp_ops = {
.cpu_die = octeon_cpu_die,
#endif
};
static irqreturn_t octeon_78xx_reched_interrupt(int irq, void *dev_id)
{
scheduler_ipi();
return IRQ_HANDLED;
}
static irqreturn_t octeon_78xx_call_function_interrupt(int irq, void *dev_id)
{
generic_smp_call_function_interrupt();
return IRQ_HANDLED;
}
static irqreturn_t octeon_78xx_icache_flush_interrupt(int irq, void *dev_id)
{
octeon_icache_flush();
return IRQ_HANDLED;
}
/*
* Callout to firmware before smp_init
*/
static void octeon_78xx_prepare_cpus(unsigned int max_cpus)
{
if (request_irq(OCTEON_IRQ_MBOX0 + 0,
octeon_78xx_reched_interrupt,
IRQF_PERCPU | IRQF_NO_THREAD, "Scheduler",
octeon_78xx_reched_interrupt)) {
panic("Cannot request_irq for SchedulerIPI");
}
if (request_irq(OCTEON_IRQ_MBOX0 + 1,
octeon_78xx_call_function_interrupt,
IRQF_PERCPU | IRQF_NO_THREAD, "SMP-Call",
octeon_78xx_call_function_interrupt)) {
panic("Cannot request_irq for SMP-Call");
}
if (request_irq(OCTEON_IRQ_MBOX0 + 2,
octeon_78xx_icache_flush_interrupt,
IRQF_PERCPU | IRQF_NO_THREAD, "ICache-Flush",
octeon_78xx_icache_flush_interrupt)) {
panic("Cannot request_irq for ICache-Flush");
}
}
static void octeon_78xx_send_ipi_single(int cpu, unsigned int action)
{
int i;
for (i = 0; i < 8; i++) {
if (action & 1)
octeon_ciu3_mbox_send(cpu, i);
action >>= 1;
}
}
static void octeon_78xx_send_ipi_mask(const struct cpumask *mask,
unsigned int action)
{
unsigned int cpu;
for_each_cpu(cpu, mask)
octeon_78xx_send_ipi_single(cpu, action);
}
static struct plat_smp_ops octeon_78xx_smp_ops = {
.send_ipi_single = octeon_78xx_send_ipi_single,
.send_ipi_mask = octeon_78xx_send_ipi_mask,
.init_secondary = octeon_init_secondary,
.smp_finish = octeon_smp_finish,
.boot_secondary = octeon_boot_secondary,
.smp_setup = octeon_smp_setup,
.prepare_cpus = octeon_78xx_prepare_cpus,
#ifdef CONFIG_HOTPLUG_CPU
.cpu_disable = octeon_cpu_disable,
.cpu_die = octeon_cpu_die,
#endif
};
void __init octeon_setup_smp(void)
{
struct plat_smp_ops *ops;
if (octeon_has_feature(OCTEON_FEATURE_CIU3))
ops = &octeon_78xx_smp_ops;
else
ops = &octeon_smp_ops;
register_smp_ops(ops);
}
......@@ -299,6 +299,12 @@ static inline void octeon_npi_write32(uint64_t address, uint32_t val)
cvmx_read64_uint32(address ^ 4);
}
#ifdef CONFIG_SMP
void octeon_setup_smp(void);
#else
static inline void octeon_setup_smp(void) {}
#endif
struct irq_domain;
struct device_node;
struct irq_data;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册