提交 25475030 编写于 作者: O Olof Johansson

Merge tag 'renesas-smp-for-v3.12' of...

Merge tag 'renesas-smp-for-v3.12' of git://git.kernel.org/pub/scm/linux/kernel/git/horms/renesas into next/renesas

From Simon Horman:
Renesas ARM based SoC SMP updates for v3.12

* Per-CPU SMP boot and sleep code on SoCs that use SCU
* Shared SCU CPU Hotplug code on r8a7779 and sh73a0 SoCs
* Shared SCU CPU boot code on emev2, r8a7779 and sh73a0 SoCs

* tag 'renesas-smp-for-v3.12' of git://git.kernel.org/pub/scm/linux/kernel/git/horms/renesas:
  ARM: shmobile: Per-CPU SMP boot / sleep code for SCU SoCs
  ARM: shmobile: Introduce per-CPU SMP boot / sleep code
  ARM: shmobile: Use shared SCU CPU Hotplug code on r8a7779
  ARM: shmobile: Use shared SCU CPU Hotplug code on sh73a0
  ARM: shmobile: Add shared SCU CPU Hotplug code
  ARM: shmobile: Use shared SCU SMP boot code on emev2
  ARM: shmobile: Use shared SCU SMP boot code on r8a7779
  ARM: shmobile: Use shared SCU SMP boot code on sh73a0
  ARM: shmobile: Introduce shared SCU SMP boot code
Signed-off-by: NOlof Johansson <olof@lixom.net>
...@@ -17,9 +17,9 @@ obj-$(CONFIG_ARCH_EMEV2) += setup-emev2.o clock-emev2.o ...@@ -17,9 +17,9 @@ obj-$(CONFIG_ARCH_EMEV2) += setup-emev2.o clock-emev2.o
# SMP objects # SMP objects
smp-y := platsmp.o headsmp.o smp-y := platsmp.o headsmp.o
smp-$(CONFIG_ARCH_SH73A0) += smp-sh73a0.o headsmp-scu.o smp-$(CONFIG_ARCH_SH73A0) += smp-sh73a0.o headsmp-scu.o platsmp-scu.o
smp-$(CONFIG_ARCH_R8A7779) += smp-r8a7779.o headsmp-scu.o smp-$(CONFIG_ARCH_R8A7779) += smp-r8a7779.o headsmp-scu.o platsmp-scu.o
smp-$(CONFIG_ARCH_EMEV2) += smp-emev2.o headsmp-scu.o smp-$(CONFIG_ARCH_EMEV2) += smp-emev2.o headsmp-scu.o platsmp-scu.o
# IRQ objects # IRQ objects
obj-$(CONFIG_ARCH_SH7372) += entry-intc.o obj-$(CONFIG_ARCH_SH7372) += entry-intc.o
......
...@@ -36,3 +36,52 @@ shmobile_boot_fn: ...@@ -36,3 +36,52 @@ shmobile_boot_fn:
.globl shmobile_boot_arg .globl shmobile_boot_arg
shmobile_boot_arg: shmobile_boot_arg:
2: .space 4 2: .space 4
/*
* Per-CPU SMP boot function/argument selection code based on MPIDR
*/
ENTRY(shmobile_smp_boot)
@ r0 = MPIDR_HWID_BITMASK
mrc p15, 0, r1, c0, c0, 5 @ r1 = MPIDR
and r0, r1, r0 @ r0 = cpu_logical_map() value
mov r1, #0 @ r1 = CPU index
adr r5, 1f @ array of per-cpu mpidr values
adr r6, 2f @ array of per-cpu functions
adr r7, 3f @ array of per-cpu arguments
shmobile_smp_boot_find_mpidr:
ldr r8, [r5, r1, lsl #2]
cmp r8, r0
bne shmobile_smp_boot_next
ldr r9, [r6, r1, lsl #2]
cmp r9, #0
bne shmobile_smp_boot_found
shmobile_smp_boot_next:
add r1, r1, #1
cmp r1, #CONFIG_NR_CPUS
blo shmobile_smp_boot_find_mpidr
b shmobile_smp_sleep
shmobile_smp_boot_found:
ldr r0, [r7, r1, lsl #2]
mov pc, r9
ENDPROC(shmobile_smp_boot)
ENTRY(shmobile_smp_sleep)
wfi
b shmobile_smp_boot
ENDPROC(shmobile_smp_sleep)
.globl shmobile_smp_mpidr
shmobile_smp_mpidr:
1: .space CONFIG_NR_CPUS * 4
.globl shmobile_smp_fn
shmobile_smp_fn:
2: .space CONFIG_NR_CPUS * 4
.globl shmobile_smp_arg
shmobile_smp_arg:
3: .space CONFIG_NR_CPUS * 4
...@@ -9,7 +9,16 @@ extern void shmobile_setup_console(void); ...@@ -9,7 +9,16 @@ extern void shmobile_setup_console(void);
extern void shmobile_boot_vector(void); extern void shmobile_boot_vector(void);
extern unsigned long shmobile_boot_fn; extern unsigned long shmobile_boot_fn;
extern unsigned long shmobile_boot_arg; extern unsigned long shmobile_boot_arg;
extern void shmobile_smp_boot(void);
extern void shmobile_smp_sleep(void);
extern void shmobile_smp_hook(unsigned int cpu, unsigned long fn,
unsigned long arg);
extern void shmobile_boot_scu(void); extern void shmobile_boot_scu(void);
extern void shmobile_smp_scu_prepare_cpus(unsigned int max_cpus);
extern int shmobile_smp_scu_boot_secondary(unsigned int cpu,
struct task_struct *idle);
extern void shmobile_smp_scu_cpu_die(unsigned int cpu);
extern int shmobile_smp_scu_cpu_kill(unsigned int cpu);
struct clk; struct clk;
extern int shmobile_clk_init(void); extern int shmobile_clk_init(void);
extern void shmobile_handle_irq_intc(struct pt_regs *); extern void shmobile_handle_irq_intc(struct pt_regs *);
......
/*
* SMP support for SoCs with SCU covered by mach-shmobile
*
* Copyright (C) 2013 Magnus Damm
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/smp.h>
#include <asm/cacheflush.h>
#include <asm/smp_plat.h>
#include <asm/smp_scu.h>
#include <mach/common.h>
void __init shmobile_smp_scu_prepare_cpus(unsigned int max_cpus)
{
/* install boot code shared by all CPUs */
shmobile_boot_fn = virt_to_phys(shmobile_smp_boot);
shmobile_boot_arg = MPIDR_HWID_BITMASK;
/* enable SCU and cache coherency on booting CPU */
scu_enable(shmobile_scu_base);
scu_power_mode(shmobile_scu_base, SCU_PM_NORMAL);
}
int shmobile_smp_scu_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
/* For this particular CPU register SCU boot vector */
shmobile_smp_hook(cpu, virt_to_phys(shmobile_boot_scu),
(unsigned long)shmobile_scu_base);
return 0;
}
#ifdef CONFIG_HOTPLUG_CPU
void shmobile_smp_scu_cpu_die(unsigned int cpu)
{
/* For this particular CPU deregister boot vector */
shmobile_smp_hook(cpu, 0, 0);
dsb();
flush_cache_all();
/* disable cache coherency */
scu_power_mode(shmobile_scu_base, SCU_PM_POWEROFF);
/* jump to shared mach-shmobile sleep / reset code */
shmobile_smp_sleep();
}
static int shmobile_smp_scu_psr_core_disabled(int cpu)
{
unsigned long mask = SCU_PM_POWEROFF << (cpu * 8);
if ((__raw_readl(shmobile_scu_base + 8) & mask) == mask)
return 1;
return 0;
}
int shmobile_smp_scu_cpu_kill(unsigned int cpu)
{
int k;
/* this function is running on another CPU than the offline target,
* here we need wait for shutdown code in platform_cpu_die() to
* finish before asking SoC-specific code to power off the CPU core.
*/
for (k = 0; k < 1000; k++) {
if (shmobile_smp_scu_psr_core_disabled(cpu))
return 1;
mdelay(1);
}
return 0;
}
#endif
...@@ -12,6 +12,9 @@ ...@@ -12,6 +12,9 @@
*/ */
#include <linux/init.h> #include <linux/init.h>
#include <linux/smp.h> #include <linux/smp.h>
#include <asm/cacheflush.h>
#include <asm/smp_plat.h>
#include <mach/common.h>
void __init shmobile_smp_init_cpus(unsigned int ncores) void __init shmobile_smp_init_cpus(unsigned int ncores)
{ {
...@@ -26,3 +29,18 @@ void __init shmobile_smp_init_cpus(unsigned int ncores) ...@@ -26,3 +29,18 @@ void __init shmobile_smp_init_cpus(unsigned int ncores)
for (i = 0; i < ncores; i++) for (i = 0; i < ncores; i++)
set_cpu_possible(i, true); set_cpu_possible(i, true);
} }
extern unsigned long shmobile_smp_fn[];
extern unsigned long shmobile_smp_arg[];
extern unsigned long shmobile_smp_mpidr[];
void shmobile_smp_hook(unsigned int cpu, unsigned long fn, unsigned long arg)
{
shmobile_smp_fn[cpu] = 0;
flush_cache_all();
shmobile_smp_mpidr[cpu] = cpu_logical_map(cpu);
shmobile_smp_fn[cpu] = fn;
shmobile_smp_arg[cpu] = arg;
flush_cache_all();
}
...@@ -34,6 +34,12 @@ ...@@ -34,6 +34,12 @@
static int emev2_boot_secondary(unsigned int cpu, struct task_struct *idle) static int emev2_boot_secondary(unsigned int cpu, struct task_struct *idle)
{ {
int ret;
ret = shmobile_smp_scu_boot_secondary(cpu, idle);
if (ret)
return ret;
arch_send_wakeup_ipi_mask(cpumask_of(cpu_logical_map(cpu))); arch_send_wakeup_ipi_mask(cpumask_of(cpu_logical_map(cpu)));
return 0; return 0;
} }
...@@ -42,21 +48,16 @@ static void __init emev2_smp_prepare_cpus(unsigned int max_cpus) ...@@ -42,21 +48,16 @@ static void __init emev2_smp_prepare_cpus(unsigned int max_cpus)
{ {
void __iomem *smu; void __iomem *smu;
/* setup EMEV2 specific SCU base, enable */ /* Tell ROM loader about our vector (in headsmp.S) */
shmobile_scu_base = ioremap(EMEV2_SCU_BASE, PAGE_SIZE);
scu_enable(shmobile_scu_base);
/* Tell ROM loader about our vector (in headsmp-scu.S, headsmp.S) */
smu = ioremap(EMEV2_SMU_BASE, PAGE_SIZE); smu = ioremap(EMEV2_SMU_BASE, PAGE_SIZE);
if (smu) { if (smu) {
iowrite32(__pa(shmobile_boot_vector), smu + SMU_GENERAL_REG0); iowrite32(__pa(shmobile_boot_vector), smu + SMU_GENERAL_REG0);
iounmap(smu); iounmap(smu);
} }
shmobile_boot_fn = virt_to_phys(shmobile_boot_scu);
shmobile_boot_arg = (unsigned long)shmobile_scu_base;
/* enable cache coherency on booting CPU */ /* setup EMEV2 specific SCU bits */
scu_power_mode(shmobile_scu_base, SCU_PM_NORMAL); shmobile_scu_base = ioremap(EMEV2_SCU_BASE, PAGE_SIZE);
shmobile_smp_scu_prepare_cpus(max_cpus);
} }
struct smp_operations emev2_smp_ops __initdata = { struct smp_operations emev2_smp_ops __initdata = {
......
...@@ -84,33 +84,34 @@ static int r8a7779_platform_cpu_kill(unsigned int cpu) ...@@ -84,33 +84,34 @@ static int r8a7779_platform_cpu_kill(unsigned int cpu)
static int r8a7779_boot_secondary(unsigned int cpu, struct task_struct *idle) static int r8a7779_boot_secondary(unsigned int cpu, struct task_struct *idle)
{ {
struct r8a7779_pm_ch *ch = NULL; struct r8a7779_pm_ch *ch = NULL;
int ret = -EIO; unsigned int lcpu = cpu_logical_map(cpu);
int ret;
cpu = cpu_logical_map(cpu); ret = shmobile_smp_scu_boot_secondary(cpu, idle);
if (ret)
return ret;
if (cpu < ARRAY_SIZE(r8a7779_ch_cpu)) if (lcpu < ARRAY_SIZE(r8a7779_ch_cpu))
ch = r8a7779_ch_cpu[cpu]; ch = r8a7779_ch_cpu[lcpu];
if (ch) if (ch)
ret = r8a7779_sysc_power_up(ch); ret = r8a7779_sysc_power_up(ch);
else
ret = -EIO;
return ret; return ret;
} }
static void __init r8a7779_smp_prepare_cpus(unsigned int max_cpus) static void __init r8a7779_smp_prepare_cpus(unsigned int max_cpus)
{ {
/* setup r8a7779 specific SCU base */
shmobile_scu_base = IOMEM(R8A7779_SCU_BASE);
scu_enable(shmobile_scu_base);
/* Map the reset vector (in headsmp-scu.S, headsmp.S) */ /* Map the reset vector (in headsmp-scu.S, headsmp.S) */
__raw_writel(__pa(shmobile_boot_vector), AVECR); __raw_writel(__pa(shmobile_boot_vector), AVECR);
shmobile_boot_fn = virt_to_phys(shmobile_boot_scu); shmobile_boot_fn = virt_to_phys(shmobile_boot_scu);
shmobile_boot_arg = (unsigned long)shmobile_scu_base; shmobile_boot_arg = (unsigned long)shmobile_scu_base;
/* enable cache coherency on booting CPU */ /* setup r8a7779 specific SCU bits */
scu_power_mode(shmobile_scu_base, SCU_PM_NORMAL); shmobile_scu_base = IOMEM(R8A7779_SCU_BASE);
shmobile_smp_scu_prepare_cpus(max_cpus);
r8a7779_pm_init(); r8a7779_pm_init();
...@@ -121,47 +122,14 @@ static void __init r8a7779_smp_prepare_cpus(unsigned int max_cpus) ...@@ -121,47 +122,14 @@ static void __init r8a7779_smp_prepare_cpus(unsigned int max_cpus)
} }
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
static int r8a7779_scu_psr_core_disabled(int cpu)
{
unsigned long mask = 3 << (cpu * 8);
if ((__raw_readl(shmobile_scu_base + 8) & mask) == mask)
return 1;
return 0;
}
static int r8a7779_cpu_kill(unsigned int cpu) static int r8a7779_cpu_kill(unsigned int cpu)
{ {
int k; if (shmobile_smp_scu_cpu_kill(cpu))
return r8a7779_platform_cpu_kill(cpu);
/* this function is running on another CPU than the offline target,
* here we need wait for shutdown code in platform_cpu_die() to
* finish before asking SoC-specific code to power off the CPU core.
*/
for (k = 0; k < 1000; k++) {
if (r8a7779_scu_psr_core_disabled(cpu))
return r8a7779_platform_cpu_kill(cpu);
mdelay(1);
}
return 0; return 0;
} }
static void r8a7779_cpu_die(unsigned int cpu)
{
dsb();
flush_cache_all();
/* disable cache coherency */
scu_power_mode(shmobile_scu_base, SCU_PM_POWEROFF);
/* Endless loop until power off from r8a7779_cpu_kill() */
while (1)
cpu_do_idle();
}
static int r8a7779_cpu_disable(unsigned int cpu) static int r8a7779_cpu_disable(unsigned int cpu)
{ {
/* only CPU1->3 have power domains, do not allow hotplug of CPU0 */ /* only CPU1->3 have power domains, do not allow hotplug of CPU0 */
...@@ -173,8 +141,8 @@ struct smp_operations r8a7779_smp_ops __initdata = { ...@@ -173,8 +141,8 @@ struct smp_operations r8a7779_smp_ops __initdata = {
.smp_prepare_cpus = r8a7779_smp_prepare_cpus, .smp_prepare_cpus = r8a7779_smp_prepare_cpus,
.smp_boot_secondary = r8a7779_boot_secondary, .smp_boot_secondary = r8a7779_boot_secondary,
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
.cpu_kill = r8a7779_cpu_kill,
.cpu_die = r8a7779_cpu_die,
.cpu_disable = r8a7779_cpu_disable, .cpu_disable = r8a7779_cpu_disable,
.cpu_die = shmobile_smp_scu_cpu_die,
.cpu_kill = r8a7779_cpu_kill,
#endif #endif
}; };
...@@ -20,14 +20,11 @@ ...@@ -20,14 +20,11 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/spinlock.h>
#include <linux/io.h> #include <linux/io.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <mach/common.h> #include <mach/common.h>
#include <asm/cacheflush.h>
#include <asm/smp_plat.h>
#include <mach/sh73a0.h> #include <mach/sh73a0.h>
#include <asm/smp_scu.h> #include <asm/smp_plat.h>
#include <asm/smp_twd.h> #include <asm/smp_twd.h>
#define WUPCR IOMEM(0xe6151010) #define WUPCR IOMEM(0xe6151010)
...@@ -36,8 +33,6 @@ ...@@ -36,8 +33,6 @@
#define SBAR IOMEM(0xe6180020) #define SBAR IOMEM(0xe6180020)
#define APARMBAREA IOMEM(0xe6f10020) #define APARMBAREA IOMEM(0xe6f10020)
#define PSTR_SHUTDOWN_MODE 3
#define SH73A0_SCU_BASE 0xf0000000 #define SH73A0_SCU_BASE 0xf0000000
#ifdef CONFIG_HAVE_ARM_TWD #ifdef CONFIG_HAVE_ARM_TWD
...@@ -50,63 +45,33 @@ void __init sh73a0_register_twd(void) ...@@ -50,63 +45,33 @@ void __init sh73a0_register_twd(void)
static int sh73a0_boot_secondary(unsigned int cpu, struct task_struct *idle) static int sh73a0_boot_secondary(unsigned int cpu, struct task_struct *idle)
{ {
cpu = cpu_logical_map(cpu); unsigned int lcpu = cpu_logical_map(cpu);
int ret;
if (((__raw_readl(PSTR) >> (4 * cpu)) & 3) == 3) ret = shmobile_smp_scu_boot_secondary(cpu, idle);
__raw_writel(1 << cpu, WUPCR); /* wake up */ if (ret)
return ret;
if (((__raw_readl(PSTR) >> (4 * lcpu)) & 3) == 3)
__raw_writel(1 << lcpu, WUPCR); /* wake up */
else else
__raw_writel(1 << cpu, SRESCR); /* reset */ __raw_writel(1 << lcpu, SRESCR); /* reset */
return 0; return 0;
} }
static void __init sh73a0_smp_prepare_cpus(unsigned int max_cpus) static void __init sh73a0_smp_prepare_cpus(unsigned int max_cpus)
{ {
/* setup sh73a0 specific SCU base */ /* Map the reset vector (in headsmp.S) */
shmobile_scu_base = IOMEM(SH73A0_SCU_BASE);
scu_enable(shmobile_scu_base);
/* Map the reset vector (in headsmp-scu.S, headsmp.S) */
__raw_writel(0, APARMBAREA); /* 4k */ __raw_writel(0, APARMBAREA); /* 4k */
__raw_writel(__pa(shmobile_boot_vector), SBAR); __raw_writel(__pa(shmobile_boot_vector), SBAR);
shmobile_boot_fn = virt_to_phys(shmobile_boot_scu);
shmobile_boot_arg = (unsigned long)shmobile_scu_base;
/* enable cache coherency on booting CPU */ /* setup sh73a0 specific SCU bits */
scu_power_mode(shmobile_scu_base, SCU_PM_NORMAL); shmobile_scu_base = IOMEM(SH73A0_SCU_BASE);
shmobile_smp_scu_prepare_cpus(max_cpus);
} }
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
static int sh73a0_cpu_kill(unsigned int cpu)
{
int k;
u32 pstr;
/*
* wait until the power status register confirms the shutdown of the
* offline target
*/
for (k = 0; k < 1000; k++) {
pstr = (__raw_readl(PSTR) >> (4 * cpu)) & 3;
if (pstr == PSTR_SHUTDOWN_MODE)
return 1;
mdelay(1);
}
return 0;
}
static void sh73a0_cpu_die(unsigned int cpu)
{
/* Set power off mode. This takes the CPU out of the MP cluster */
scu_power_mode(shmobile_scu_base, SCU_PM_POWEROFF);
/* Enter shutdown mode */
cpu_do_idle();
}
static int sh73a0_cpu_disable(unsigned int cpu) static int sh73a0_cpu_disable(unsigned int cpu)
{ {
return 0; /* CPU0 and CPU1 supported */ return 0; /* CPU0 and CPU1 supported */
...@@ -117,8 +82,8 @@ struct smp_operations sh73a0_smp_ops __initdata = { ...@@ -117,8 +82,8 @@ struct smp_operations sh73a0_smp_ops __initdata = {
.smp_prepare_cpus = sh73a0_smp_prepare_cpus, .smp_prepare_cpus = sh73a0_smp_prepare_cpus,
.smp_boot_secondary = sh73a0_boot_secondary, .smp_boot_secondary = sh73a0_boot_secondary,
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
.cpu_kill = sh73a0_cpu_kill,
.cpu_die = sh73a0_cpu_die,
.cpu_disable = sh73a0_cpu_disable, .cpu_disable = sh73a0_cpu_disable,
.cpu_die = shmobile_smp_scu_cpu_die,
.cpu_kill = shmobile_smp_scu_cpu_kill,
#endif #endif
}; };
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册