smp.c 7.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/*
 * SMP support for PowerNV machines.
 *
 * Copyright 2011 IBM Corp.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version
 * 2 of the License, or (at your option) any later version.
 */

#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched.h>
15
#include <linux/sched/hotplug.h>
16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/cpu.h>

#include <asm/irq.h>
#include <asm/smp.h>
#include <asm/paca.h>
#include <asm/machdep.h>
#include <asm/cputable.h>
#include <asm/firmware.h>
#include <asm/vdso_datapage.h>
#include <asm/cputhreads.h>
#include <asm/xics.h>
32
#include <asm/xive.h>
33
#include <asm/opal.h>
34
#include <asm/runlatch.h>
35
#include <asm/code-patching.h>
36
#include <asm/dbell.h>
37 38
#include <asm/kvm_ppc.h>
#include <asm/ppc-opcode.h>
39
#include <asm/cpuidle.h>
40 41 42

#include "powernv.h"

43 44 45 46 47 48 49
#ifdef DEBUG
#include <asm/udbg.h>
#define DBG(fmt...) udbg_printf(fmt)
#else
#define DBG(fmt...)
#endif

50
static void pnv_smp_setup_cpu(int cpu)
51
{
52 53 54
	if (xive_enabled())
		xive_smp_setup_cpu();
	else if (cpu != boot_cpuid)
55 56 57
		xics_setup_cpu();
}

58
static int pnv_smp_kick_cpu(int nr)
59 60
{
	unsigned int pcpu = get_hard_smp_processor_id(nr);
61 62
	unsigned long start_here =
			__pa(ppc_function_entry(generic_secondary_smp_init));
63
	long rc;
64
	uint8_t status;
65 66 67

	BUG_ON(nr < 0 || nr >= NR_CPUS);

68
	/*
69
	 * If we already started or OPAL is not supported, we just
70
	 * kick the CPU via the PACA
71
	 */
72
	if (paca[nr].cpu_start || !firmware_has_feature(FW_FEATURE_OPAL))
73 74 75 76 77 78 79 80
		goto kick;

	/*
	 * At this point, the CPU can either be spinning on the way in
	 * from kexec or be inside OPAL waiting to be started for the
	 * first time. OPAL v3 allows us to query OPAL to know if it
	 * has the CPUs, so we do that
	 */
81 82 83 84 85
	rc = opal_query_cpu_status(pcpu, &status);
	if (rc != OPAL_SUCCESS) {
		pr_warn("OPAL Error %ld querying CPU %d state\n", rc, nr);
		return -ENODEV;
	}
86

87 88 89 90 91 92
	/*
	 * Already started, just kick it, probably coming from
	 * kexec and spinning
	 */
	if (status == OPAL_THREAD_STARTED)
		goto kick;
93

94 95 96 97 98 99 100 101
	/*
	 * Available/inactive, let's kick it
	 */
	if (status == OPAL_THREAD_INACTIVE) {
		pr_devel("OPAL: Starting CPU %d (HW 0x%x)...\n", nr, pcpu);
		rc = opal_start_cpu(pcpu, start_here);
		if (rc != OPAL_SUCCESS) {
			pr_warn("OPAL Error %ld starting CPU %d\n", rc, nr);
102 103 104 105
			return -ENODEV;
		}
	} else {
		/*
106 107 108 109
		 * An unavailable CPU (or any other unknown status)
		 * shouldn't be started. It should also
		 * not be in the possible map but currently it can
		 * happen
110
		 */
111 112 113
		pr_devel("OPAL: CPU %d (HW 0x%x) is unavailable"
			 " (status %d)...\n", nr, pcpu, status);
		return -ENODEV;
114
	}
115 116

kick:
117 118 119
	return smp_generic_kick_cpu(nr);
}

120 121 122 123 124 125 126 127 128 129 130 131 132 133
#ifdef CONFIG_HOTPLUG_CPU

static int pnv_smp_cpu_disable(void)
{
	int cpu = smp_processor_id();

	/* This is identical to pSeries... might consolidate by
	 * moving migrate_irqs_away to a ppc_md with default to
	 * the generic fixup_irqs. --BenH.
	 */
	set_cpu_online(cpu, false);
	vdso_data->processorCount--;
	if (cpu == boot_cpuid)
		boot_cpuid = cpumask_any(cpu_online_mask);
134 135 136 137
	if (xive_enabled())
		xive_smp_disable_cpu();
	else
		xics_migrate_irqs_away();
138 139 140 141 142 143
	return 0;
}

static void pnv_smp_cpu_kill_self(void)
{
	unsigned int cpu;
144
	unsigned long srr1, wmask;
145 146

	/* Standard hot unplug procedure */
147 148 149 150 151 152 153 154
	/*
	 * This hard disables local interurpts, ensuring we have no lazy
	 * irqs pending.
	 */
	WARN_ON(irqs_disabled());
	hard_irq_disable();
	WARN_ON(lazy_irq_pending());

155 156 157 158 159 160 161
	idle_task_exit();
	current->active_mm = NULL; /* for sanity */
	cpu = smp_processor_id();
	DBG("CPU%d offline\n", cpu);
	generic_set_cpu_dead(cpu);
	smp_wmb();

162 163 164 165
	wmask = SRR1_WAKEMASK;
	if (cpu_has_feature(CPU_FTR_ARCH_207S))
		wmask = SRR1_WAKEMASK_P8;

166
	/* We don't want to take decrementer interrupts while we are offline,
167 168
	 * so clear LPCR:PECE1. We keep PECE2 (and LPCR_PECE_HVEE on P9)
	 * enabled as to let IPIs in.
169 170
	 */
	mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1);
171

172
	while (!generic_check_cpu_restart(cpu)) {
173 174 175 176 177 178 179 180
		/*
		 * Clear IPI flag, since we don't handle IPIs while
		 * offline, except for those when changing micro-threading
		 * mode, which are handled explicitly below, and those
		 * for coming online, which are handled via
		 * generic_check_cpu_restart() calls.
		 */
		kvmppc_set_host_ipi(cpu, 0);
181

182
		srr1 = pnv_cpu_offline(cpu);
183

184 185
		WARN_ON(lazy_irq_pending());

186 187 188 189 190 191 192 193 194 195 196
		/*
		 * If the SRR1 value indicates that we woke up due to
		 * an external interrupt, then clear the interrupt.
		 * We clear the interrupt before checking for the
		 * reason, so as to avoid a race where we wake up for
		 * some other reason, find nothing and clear the interrupt
		 * just as some other cpu is sending us an interrupt.
		 * If we returned from power7_nap as a result of
		 * having finished executing in a KVM guest, then srr1
		 * contains 0.
		 */
197
		if (((srr1 & wmask) == SRR1_WAKEEE) ||
198
		    ((srr1 & wmask) == SRR1_WAKEHVI)) {
199 200 201 202 203 204
			if (cpu_has_feature(CPU_FTR_ARCH_300)) {
				if (xive_enabled())
					xive_flush_interrupt();
				else
					icp_opal_flush_interrupt();
			} else
205
				icp_native_flush_interrupt();
206 207 208
		} else if ((srr1 & wmask) == SRR1_WAKEHDBELL) {
			unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
			asm volatile(PPC_MSGCLR(%0) : : "r" (msg));
209
		}
210
		smp_mb();
211 212 213 214

		if (cpu_core_split_required())
			continue;

215
		if (srr1 && !generic_check_cpu_restart(cpu))
216 217 218
			DBG("CPU%d Unexpected exit while offline srr1=%lx!\n",
					cpu, srr1);

219
	}
220 221

	/* Re-enable decrementer interrupts */
222 223 224 225 226 227
	mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_PECE1);
	DBG("CPU%d coming online...\n", cpu);
}

#endif /* CONFIG_HOTPLUG_CPU */

228 229 230 231 232 233 234 235 236 237 238 239 240 241
static int pnv_cpu_bootable(unsigned int nr)
{
	/*
	 * Starting with POWER8, the subcore logic relies on all threads of a
	 * core being booted so that they can participate in split mode
	 * switches. So on those machines we ignore the smt_enabled_at_boot
	 * setting (smt-enabled on the kernel command line).
	 */
	if (cpu_has_feature(CPU_FTR_ARCH_207S))
		return 1;

	return smp_generic_cpu_bootable(nr);
}

242 243 244 245 246 247 248
static int pnv_smp_prepare_cpu(int cpu)
{
	if (xive_enabled())
		return xive_smp_prepare_cpu(cpu);
	return 0;
}

249 250 251
/* Cause IPI as setup by the interrupt controller (xics or xive) */
static void (*ic_cause_ipi)(int cpu);

252 253 254 255 256
static void pnv_cause_ipi(int cpu)
{
	if (doorbell_try_core_ipi(cpu))
		return;

257
	ic_cause_ipi(cpu);
258 259
}

260 261 262 263 264 265 266 267 268 269 270 271
static void pnv_p9_dd1_cause_ipi(int cpu)
{
	int this_cpu = get_cpu();

	/*
	 * POWER9 DD1 has a global addressed msgsnd, but for now we restrict
	 * IPIs to same core, because it requires additional synchronization
	 * for inter-core doorbells which we do not implement.
	 */
	if (cpumask_test_cpu(cpu, cpu_sibling_mask(this_cpu)))
		doorbell_global_ipi(cpu);
	else
272
		ic_cause_ipi(cpu);
273 274 275 276

	put_cpu();
}

277 278 279 280 281 282
static void __init pnv_smp_probe(void)
{
	if (xive_enabled())
		xive_smp_probe();
	else
		xics_smp_probe();
283

284
	if (cpu_has_feature(CPU_FTR_DBELL)) {
285 286 287
		ic_cause_ipi = smp_ops->cause_ipi;
		WARN_ON(!ic_cause_ipi);

288 289 290 291 292 293 294 295
		if (cpu_has_feature(CPU_FTR_ARCH_300)) {
			if (cpu_has_feature(CPU_FTR_POWER9_DD1))
				smp_ops->cause_ipi = pnv_p9_dd1_cause_ipi;
			else
				smp_ops->cause_ipi = doorbell_global_ipi;
		} else {
			smp_ops->cause_ipi = pnv_cause_ipi;
		}
296
	}
297 298
}

299
static struct smp_ops_t pnv_smp_ops = {
300 301
	.message_pass	= NULL, /* Use smp_muxed_ipi_message_pass */
	.cause_ipi	= NULL,	/* Filled at runtime by pnv_smp_probe() */
302
	.cause_nmi_ipi	= NULL,
303 304
	.probe		= pnv_smp_probe,
	.prepare_cpu	= pnv_smp_prepare_cpu,
305
	.kick_cpu	= pnv_smp_kick_cpu,
306
	.setup_cpu	= pnv_smp_setup_cpu,
307
	.cpu_bootable	= pnv_cpu_bootable,
308 309 310 311
#ifdef CONFIG_HOTPLUG_CPU
	.cpu_disable	= pnv_smp_cpu_disable,
	.cpu_die	= generic_cpu_die,
#endif /* CONFIG_HOTPLUG_CPU */
312 313 314 315 316 317 318
};

/* This is called very early during platform setup_arch */
void __init pnv_smp_init(void)
{
	smp_ops = &pnv_smp_ops;

319 320 321
#ifdef CONFIG_HOTPLUG_CPU
	ppc_md.cpu_die	= pnv_smp_cpu_kill_self;
#endif
322
}