提交 9327e2c6 编写于 作者: M Mark Rutland 提交者: Catalin Marinas

arm64: add CPU_HOTPLUG infrastructure

This patch adds the basic infrastructure necessary to support
CPU_HOTPLUG on arm64, based on the arm implementation. Actual hotplug
support will depend on an implementation's cpu_operations (e.g. PSCI).
Signed-off-by: NMark Rutland <mark.rutland@arm.com>
Signed-off-by: NCatalin Marinas <catalin.marinas@arm.com>
上级 e8765b26
...@@ -161,6 +161,13 @@ config NR_CPUS ...@@ -161,6 +161,13 @@ config NR_CPUS
default "8" if ARCH_XGENE default "8" if ARCH_XGENE
default "4" default "4"
config HOTPLUG_CPU
bool "Support for hot-pluggable CPUs"
depends on SMP
help
Say Y here to experiment with turning CPUs off and on. CPUs
can be controlled through /sys/devices/system/cpu.
source kernel/Kconfig.preempt source kernel/Kconfig.preempt
config HZ config HZ
......
...@@ -34,6 +34,11 @@ struct device_node; ...@@ -34,6 +34,11 @@ struct device_node;
* @cpu_boot: Boots a cpu into the kernel. * @cpu_boot: Boots a cpu into the kernel.
* @cpu_postboot: Optionally, perform any post-boot cleanup or necesary * @cpu_postboot: Optionally, perform any post-boot cleanup or necesary
* synchronisation. Called from the cpu being booted. * synchronisation. Called from the cpu being booted.
* @cpu_disable: Prepares a cpu to die. May fail for some mechanism-specific
* reason, which will cause the hot unplug to be aborted. Called
* from the cpu to be killed.
* @cpu_die: Makes a cpu leave the kernel. Must not fail. Called from the
* cpu being killed.
*/ */
struct cpu_operations { struct cpu_operations {
const char *name; const char *name;
...@@ -41,6 +46,10 @@ struct cpu_operations { ...@@ -41,6 +46,10 @@ struct cpu_operations {
int (*cpu_prepare)(unsigned int); int (*cpu_prepare)(unsigned int);
int (*cpu_boot)(unsigned int); int (*cpu_boot)(unsigned int);
void (*cpu_postboot)(void); void (*cpu_postboot)(void);
#ifdef CONFIG_HOTPLUG_CPU
int (*cpu_disable)(unsigned int cpu);
void (*cpu_die)(unsigned int cpu);
#endif
}; };
extern const struct cpu_operations *cpu_ops[NR_CPUS]; extern const struct cpu_operations *cpu_ops[NR_CPUS];
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
#include <asm-generic/irq.h> #include <asm-generic/irq.h>
extern void (*handle_arch_irq)(struct pt_regs *); extern void (*handle_arch_irq)(struct pt_regs *);
extern void migrate_irqs(void);
extern void set_handle_irq(void (*handle_irq)(struct pt_regs *)); extern void set_handle_irq(void (*handle_irq)(struct pt_regs *));
#endif #endif
...@@ -65,4 +65,9 @@ extern void secondary_entry(void); ...@@ -65,4 +65,9 @@ extern void secondary_entry(void);
extern void arch_send_call_function_single_ipi(int cpu); extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
extern int __cpu_disable(void);
extern void __cpu_die(unsigned int cpu);
extern void cpu_die(void);
#endif /* ifndef __ASM_SMP_H */ #endif /* ifndef __ASM_SMP_H */
...@@ -22,7 +22,7 @@ ...@@ -22,7 +22,7 @@
extern unsigned long __cpu_setup(void); extern unsigned long __cpu_setup(void);
struct cpu_info __initdata cpu_table[] = { struct cpu_info cpu_table[] = {
{ {
.cpu_id_val = 0x000f0000, .cpu_id_val = 0x000f0000,
.cpu_id_mask = 0x000f0000, .cpu_id_mask = 0x000f0000,
......
...@@ -81,3 +81,64 @@ void __init init_IRQ(void) ...@@ -81,3 +81,64 @@ void __init init_IRQ(void)
if (!handle_arch_irq) if (!handle_arch_irq)
panic("No interrupt controller found."); panic("No interrupt controller found.");
} }
#ifdef CONFIG_HOTPLUG_CPU
static bool migrate_one_irq(struct irq_desc *desc)
{
struct irq_data *d = irq_desc_get_irq_data(desc);
const struct cpumask *affinity = d->affinity;
struct irq_chip *c;
bool ret = false;
/*
* If this is a per-CPU interrupt, or the affinity does not
* include this CPU, then we have nothing to do.
*/
if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity))
return false;
if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
affinity = cpu_online_mask;
ret = true;
}
c = irq_data_get_irq_chip(d);
if (!c->irq_set_affinity)
pr_debug("IRQ%u: unable to set affinity\n", d->irq);
else if (c->irq_set_affinity(d, affinity, true) == IRQ_SET_MASK_OK && ret)
cpumask_copy(d->affinity, affinity);
return ret;
}
/*
* The current CPU has been marked offline. Migrate IRQs off this CPU.
* If the affinity settings do not allow other CPUs, force them onto any
* available CPU.
*
* Note: we must iterate over all IRQs, whether they have an attached
* action structure or not, as we need to get chained interrupts too.
*/
void migrate_irqs(void)
{
unsigned int i;
struct irq_desc *desc;
unsigned long flags;
local_irq_save(flags);
for_each_irq_desc(i, desc) {
bool affinity_broken;
raw_spin_lock(&desc->lock);
affinity_broken = migrate_one_irq(desc);
raw_spin_unlock(&desc->lock);
if (affinity_broken)
pr_warn_ratelimited("IRQ%u no longer affine to CPU%u\n",
i, smp_processor_id());
}
local_irq_restore(flags);
}
#endif /* CONFIG_HOTPLUG_CPU */
...@@ -102,6 +102,13 @@ void arch_cpu_idle(void) ...@@ -102,6 +102,13 @@ void arch_cpu_idle(void)
local_irq_enable(); local_irq_enable();
} }
#ifdef CONFIG_HOTPLUG_CPU
void arch_cpu_idle_dead(void)
{
cpu_die();
}
#endif
void machine_shutdown(void) void machine_shutdown(void)
{ {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
......
...@@ -167,6 +167,102 @@ asmlinkage void secondary_start_kernel(void) ...@@ -167,6 +167,102 @@ asmlinkage void secondary_start_kernel(void)
cpu_startup_entry(CPUHP_ONLINE); cpu_startup_entry(CPUHP_ONLINE);
} }
#ifdef CONFIG_HOTPLUG_CPU
static int op_cpu_disable(unsigned int cpu)
{
/*
* If we don't have a cpu_die method, abort before we reach the point
* of no return. CPU0 may not have an cpu_ops, so test for it.
*/
if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_die)
return -EOPNOTSUPP;
/*
* We may need to abort a hot unplug for some other mechanism-specific
* reason.
*/
if (cpu_ops[cpu]->cpu_disable)
return cpu_ops[cpu]->cpu_disable(cpu);
return 0;
}
/*
* __cpu_disable runs on the processor to be shutdown.
*/
int __cpu_disable(void)
{
unsigned int cpu = smp_processor_id();
int ret;
ret = op_cpu_disable(cpu);
if (ret)
return ret;
/*
* Take this CPU offline. Once we clear this, we can't return,
* and we must not schedule until we're ready to give up the cpu.
*/
set_cpu_online(cpu, false);
/*
* OK - migrate IRQs away from this CPU
*/
migrate_irqs();
/*
* Remove this CPU from the vm mask set of all processes.
*/
clear_tasks_mm_cpumask(cpu);
return 0;
}
static DECLARE_COMPLETION(cpu_died);
/*
* called on the thread which is asking for a CPU to be shutdown -
* waits until shutdown has completed, or it is timed out.
*/
void __cpu_die(unsigned int cpu)
{
if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) {
pr_crit("CPU%u: cpu didn't die\n", cpu);
return;
}
pr_notice("CPU%u: shutdown\n", cpu);
}
/*
* Called from the idle thread for the CPU which has been shutdown.
*
* Note that we disable IRQs here, but do not re-enable them
* before returning to the caller. This is also the behaviour
* of the other hotplug-cpu capable cores, so presumably coming
* out of idle fixes this.
*/
void cpu_die(void)
{
unsigned int cpu = smp_processor_id();
idle_task_exit();
local_irq_disable();
/* Tell __cpu_die() that this CPU is now safe to dispose of */
complete(&cpu_died);
/*
* Actually shutdown the CPU. This must never fail. The specific hotplug
* mechanism must perform all required cache maintenance to ensure that
* no dirty lines are lost in the process of shutting down the CPU.
*/
cpu_ops[cpu]->cpu_die(cpu);
BUG();
}
#endif
void __init smp_cpus_done(unsigned int max_cpus) void __init smp_cpus_done(unsigned int max_cpus)
{ {
pr_info("SMP: Total of %d processors activated.\n", num_online_cpus()); pr_info("SMP: Total of %d processors activated.\n", num_online_cpus());
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册