smp.c 9.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/*
 * SMP support for PowerNV machines.
 *
 * Copyright 2011 IBM Corp.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version
 * 2 of the License, or (at your option) any later version.
 */

#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched.h>
15
#include <linux/sched/hotplug.h>
16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/cpu.h>

#include <asm/irq.h>
#include <asm/smp.h>
#include <asm/paca.h>
#include <asm/machdep.h>
#include <asm/cputable.h>
#include <asm/firmware.h>
#include <asm/vdso_datapage.h>
#include <asm/cputhreads.h>
#include <asm/xics.h>
32
#include <asm/xive.h>
33
#include <asm/opal.h>
34
#include <asm/runlatch.h>
35
#include <asm/code-patching.h>
36
#include <asm/dbell.h>
37 38
#include <asm/kvm_ppc.h>
#include <asm/ppc-opcode.h>
39
#include <asm/cpuidle.h>
40 41
#include <asm/kexec.h>
#include <asm/reg.h>
42 43 44

#include "powernv.h"

45 46 47 48 49 50 51
#ifdef DEBUG
#include <asm/udbg.h>
#define DBG(fmt...) udbg_printf(fmt)
#else
#define DBG(fmt...)
#endif

52
static void pnv_smp_setup_cpu(int cpu)
53
{
54 55 56 57 58 59 60
	/*
	 * P9 workaround for CI vector load (see traps.c),
	 * enable the corresponding HMI interrupt
	 */
	if (pvr_version_is(PVR_POWER9))
		mtspr(SPRN_HMEER, mfspr(SPRN_HMEER) | PPC_BIT(17));

61 62 63
	if (xive_enabled())
		xive_smp_setup_cpu();
	else if (cpu != boot_cpuid)
64 65 66
		xics_setup_cpu();
}

67
static int pnv_smp_kick_cpu(int nr)
68
{
69
	unsigned int pcpu;
70 71
	unsigned long start_here =
			__pa(ppc_function_entry(generic_secondary_smp_init));
72
	long rc;
73
	uint8_t status;
74

75
	if (nr < 0 || nr >= nr_cpu_ids)
76
		return -EINVAL;
77

78
	pcpu = get_hard_smp_processor_id(nr);
79
	/*
80
	 * If we already started or OPAL is not supported, we just
81
	 * kick the CPU via the PACA
82
	 */
83
	if (paca_ptrs[nr]->cpu_start || !firmware_has_feature(FW_FEATURE_OPAL))
84 85 86 87 88 89 90 91
		goto kick;

	/*
	 * At this point, the CPU can either be spinning on the way in
	 * from kexec or be inside OPAL waiting to be started for the
	 * first time. OPAL v3 allows us to query OPAL to know if it
	 * has the CPUs, so we do that
	 */
92 93 94 95 96
	rc = opal_query_cpu_status(pcpu, &status);
	if (rc != OPAL_SUCCESS) {
		pr_warn("OPAL Error %ld querying CPU %d state\n", rc, nr);
		return -ENODEV;
	}
97

98 99 100 101 102 103
	/*
	 * Already started, just kick it, probably coming from
	 * kexec and spinning
	 */
	if (status == OPAL_THREAD_STARTED)
		goto kick;
104

105 106 107 108 109 110 111 112
	/*
	 * Available/inactive, let's kick it
	 */
	if (status == OPAL_THREAD_INACTIVE) {
		pr_devel("OPAL: Starting CPU %d (HW 0x%x)...\n", nr, pcpu);
		rc = opal_start_cpu(pcpu, start_here);
		if (rc != OPAL_SUCCESS) {
			pr_warn("OPAL Error %ld starting CPU %d\n", rc, nr);
113 114 115 116
			return -ENODEV;
		}
	} else {
		/*
117 118 119 120
		 * An unavailable CPU (or any other unknown status)
		 * shouldn't be started. It should also
		 * not be in the possible map but currently it can
		 * happen
121
		 */
122 123 124
		pr_devel("OPAL: CPU %d (HW 0x%x) is unavailable"
			 " (status %d)...\n", nr, pcpu, status);
		return -ENODEV;
125
	}
126 127

kick:
128 129 130
	return smp_generic_kick_cpu(nr);
}

131 132 133 134 135 136 137 138 139 140 141 142 143 144
#ifdef CONFIG_HOTPLUG_CPU

static int pnv_smp_cpu_disable(void)
{
	int cpu = smp_processor_id();

	/* This is identical to pSeries... might consolidate by
	 * moving migrate_irqs_away to a ppc_md with default to
	 * the generic fixup_irqs. --BenH.
	 */
	set_cpu_online(cpu, false);
	vdso_data->processorCount--;
	if (cpu == boot_cpuid)
		boot_cpuid = cpumask_any(cpu_online_mask);
145 146 147 148
	if (xive_enabled())
		xive_smp_disable_cpu();
	else
		xics_migrate_irqs_away();
149 150 151 152 153 154
	return 0;
}

static void pnv_smp_cpu_kill_self(void)
{
	unsigned int cpu;
155
	unsigned long srr1, wmask;
156 157

	/* Standard hot unplug procedure */
158 159 160 161 162 163 164 165
	/*
	 * This hard disables local interurpts, ensuring we have no lazy
	 * irqs pending.
	 */
	WARN_ON(irqs_disabled());
	hard_irq_disable();
	WARN_ON(lazy_irq_pending());

166 167 168 169 170 171 172
	idle_task_exit();
	current->active_mm = NULL; /* for sanity */
	cpu = smp_processor_id();
	DBG("CPU%d offline\n", cpu);
	generic_set_cpu_dead(cpu);
	smp_wmb();

173 174 175 176
	wmask = SRR1_WAKEMASK;
	if (cpu_has_feature(CPU_FTR_ARCH_207S))
		wmask = SRR1_WAKEMASK_P8;

177
	while (!generic_check_cpu_restart(cpu)) {
178 179 180 181 182 183 184 185
		/*
		 * Clear IPI flag, since we don't handle IPIs while
		 * offline, except for those when changing micro-threading
		 * mode, which are handled explicitly below, and those
		 * for coming online, which are handled via
		 * generic_check_cpu_restart() calls.
		 */
		kvmppc_set_host_ipi(cpu, 0);
186

187
		srr1 = pnv_cpu_offline(cpu);
188

189 190
		WARN_ON(lazy_irq_pending());

191 192 193 194 195 196 197 198 199 200 201
		/*
		 * If the SRR1 value indicates that we woke up due to
		 * an external interrupt, then clear the interrupt.
		 * We clear the interrupt before checking for the
		 * reason, so as to avoid a race where we wake up for
		 * some other reason, find nothing and clear the interrupt
		 * just as some other cpu is sending us an interrupt.
		 * If we returned from power7_nap as a result of
		 * having finished executing in a KVM guest, then srr1
		 * contains 0.
		 */
202
		if (((srr1 & wmask) == SRR1_WAKEEE) ||
203
		    ((srr1 & wmask) == SRR1_WAKEHVI)) {
204 205 206 207 208 209
			if (cpu_has_feature(CPU_FTR_ARCH_300)) {
				if (xive_enabled())
					xive_flush_interrupt();
				else
					icp_opal_flush_interrupt();
			} else
210
				icp_native_flush_interrupt();
211 212 213
		} else if ((srr1 & wmask) == SRR1_WAKEHDBELL) {
			unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
			asm volatile(PPC_MSGCLR(%0) : : "r" (msg));
214 215 216
		} else if ((srr1 & wmask) == SRR1_WAKERESET) {
			irq_set_pending_from_srr1(srr1);
			/* Does not return */
217
		}
218

219
		smp_mb();
220

221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239
		/*
		 * For kdump kernels, we process the ipi and jump to
		 * crash_ipi_callback
		 */
		if (kdump_in_progress()) {
			/*
			 * If we got to this point, we've not used
			 * NMI's, otherwise we would have gone
			 * via the SRR1_WAKERESET path. We are
			 * using regular IPI's for waking up offline
			 * threads.
			 */
			struct pt_regs regs;

			ppc_save_regs(&regs);
			crash_ipi_callback(&regs);
			/* Does not return */
		}

240 241 242
		if (cpu_core_split_required())
			continue;

243
		if (srr1 && !generic_check_cpu_restart(cpu))
244 245 246
			DBG("CPU%d Unexpected exit while offline srr1=%lx!\n",
					cpu, srr1);

247
	}
248

249 250 251 252 253
	DBG("CPU%d coming online...\n", cpu);
}

#endif /* CONFIG_HOTPLUG_CPU */

254 255 256 257 258 259 260 261 262 263 264 265 266 267
static int pnv_cpu_bootable(unsigned int nr)
{
	/*
	 * Starting with POWER8, the subcore logic relies on all threads of a
	 * core being booted so that they can participate in split mode
	 * switches. So on those machines we ignore the smt_enabled_at_boot
	 * setting (smt-enabled on the kernel command line).
	 */
	if (cpu_has_feature(CPU_FTR_ARCH_207S))
		return 1;

	return smp_generic_cpu_bootable(nr);
}

268 269 270 271 272 273 274
static int pnv_smp_prepare_cpu(int cpu)
{
	if (xive_enabled())
		return xive_smp_prepare_cpu(cpu);
	return 0;
}

275 276 277
/* Cause IPI as setup by the interrupt controller (xics or xive) */
static void (*ic_cause_ipi)(int cpu);

278 279 280 281 282
static void pnv_cause_ipi(int cpu)
{
	if (doorbell_try_core_ipi(cpu))
		return;

283
	ic_cause_ipi(cpu);
284 285
}

286 287 288 289 290 291 292 293 294 295 296 297
static void pnv_p9_dd1_cause_ipi(int cpu)
{
	int this_cpu = get_cpu();

	/*
	 * POWER9 DD1 has a global addressed msgsnd, but for now we restrict
	 * IPIs to same core, because it requires additional synchronization
	 * for inter-core doorbells which we do not implement.
	 */
	if (cpumask_test_cpu(cpu, cpu_sibling_mask(this_cpu)))
		doorbell_global_ipi(cpu);
	else
298
		ic_cause_ipi(cpu);
299 300 301 302

	put_cpu();
}

303 304 305 306 307 308
static void __init pnv_smp_probe(void)
{
	if (xive_enabled())
		xive_smp_probe();
	else
		xics_smp_probe();
309

310
	if (cpu_has_feature(CPU_FTR_DBELL)) {
311 312 313
		ic_cause_ipi = smp_ops->cause_ipi;
		WARN_ON(!ic_cause_ipi);

314 315 316 317 318 319 320 321
		if (cpu_has_feature(CPU_FTR_ARCH_300)) {
			if (cpu_has_feature(CPU_FTR_POWER9_DD1))
				smp_ops->cause_ipi = pnv_p9_dd1_cause_ipi;
			else
				smp_ops->cause_ipi = doorbell_global_ipi;
		} else {
			smp_ops->cause_ipi = pnv_cause_ipi;
		}
322
	}
323 324
}

325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372
static int pnv_system_reset_exception(struct pt_regs *regs)
{
	if (smp_handle_nmi_ipi(regs))
		return 1;
	return 0;
}

static int pnv_cause_nmi_ipi(int cpu)
{
	int64_t rc;

	if (cpu >= 0) {
		rc = opal_signal_system_reset(get_hard_smp_processor_id(cpu));
		if (rc != OPAL_SUCCESS)
			return 0;
		return 1;

	} else if (cpu == NMI_IPI_ALL_OTHERS) {
		bool success = true;
		int c;


		/*
		 * We do not use broadcasts (yet), because it's not clear
		 * exactly what semantics Linux wants or the firmware should
		 * provide.
		 */
		for_each_online_cpu(c) {
			if (c == smp_processor_id())
				continue;

			rc = opal_signal_system_reset(
						get_hard_smp_processor_id(c));
			if (rc != OPAL_SUCCESS)
				success = false;
		}
		if (success)
			return 1;

		/*
		 * Caller will fall back to doorbells, which may pick
		 * up the remainders.
		 */
	}

	return 0;
}

373
static struct smp_ops_t pnv_smp_ops = {
374 375
	.message_pass	= NULL, /* Use smp_muxed_ipi_message_pass */
	.cause_ipi	= NULL,	/* Filled at runtime by pnv_smp_probe() */
376
	.cause_nmi_ipi	= NULL,
377 378
	.probe		= pnv_smp_probe,
	.prepare_cpu	= pnv_smp_prepare_cpu,
379
	.kick_cpu	= pnv_smp_kick_cpu,
380
	.setup_cpu	= pnv_smp_setup_cpu,
381
	.cpu_bootable	= pnv_cpu_bootable,
382 383 384 385
#ifdef CONFIG_HOTPLUG_CPU
	.cpu_disable	= pnv_smp_cpu_disable,
	.cpu_die	= generic_cpu_die,
#endif /* CONFIG_HOTPLUG_CPU */
386 387 388 389 390
};

/* This is called very early during platform setup_arch */
void __init pnv_smp_init(void)
{
391 392 393 394
	if (opal_check_token(OPAL_SIGNAL_SYSTEM_RESET)) {
		ppc_md.system_reset_exception = pnv_system_reset_exception;
		pnv_smp_ops.cause_nmi_ipi = pnv_cause_nmi_ipi;
	}
395 396
	smp_ops = &pnv_smp_ops;

397 398
#ifdef CONFIG_HOTPLUG_CPU
	ppc_md.cpu_die	= pnv_smp_cpu_kill_self;
399 400 401
#ifdef CONFIG_KEXEC_CORE
	crash_wake_offline = 1;
#endif
402
#endif
403
}